From 419f4e6a76add0310644f030dbf7ab81fa4cacb1 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 11:13:26 +1000 Subject: [PATCH 01/20] initial commit --- recognition/vq-vae_s47036219/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 recognition/vq-vae_s47036219/README.md diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md new file mode 100644 index 000000000..bc7774a7b --- /dev/null +++ b/recognition/vq-vae_s47036219/README.md @@ -0,0 +1 @@ +hello world! \ No newline at end of file From 9fba326d72cefbcee06949581ef8f58aa84f71d9 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 13:12:30 +1000 Subject: [PATCH 02/20] encoder and decoder initial implementation --- recognition/vq-vae_s47036219/README.md | 3 +- recognition/vq-vae_s47036219/module.py | 39 ++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 recognition/vq-vae_s47036219/module.py diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index bc7774a7b..3efd78623 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -1 +1,2 @@ -hello world! \ No newline at end of file +hello world! + diff --git a/recognition/vq-vae_s47036219/module.py b/recognition/vq-vae_s47036219/module.py new file mode 100644 index 000000000..95733f944 --- /dev/null +++ b/recognition/vq-vae_s47036219/module.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader +from torchvision import datasets, transforms + +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(1, 64, 4, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(64, 128, 4, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(128, 256, 4, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(256, 512, 4, stride=2, padding=1), + nn.ReLU() + ) + + def forward(self, x): + return self.conv(x) + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + self.deconv = nn.Sequential( + nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1), + nn.ReLU(), + nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1), + nn.ReLU(), + nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1), + nn.ReLU(), + nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1), + nn.Sigmoid() + ) + + def forward(self, x): + return self.deconv(x) From 7c2efd22a3cd9b987128564ee0f2ec579afa64e5 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 13:16:00 +1000 Subject: [PATCH 03/20] workspace initialisation --- recognition/vq-vae_s47036219/dataset.py | 0 recognition/vq-vae_s47036219/{module.py => modules.py} | 0 recognition/vq-vae_s47036219/predict.py | 0 recognition/vq-vae_s47036219/train.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 recognition/vq-vae_s47036219/dataset.py rename recognition/vq-vae_s47036219/{module.py => modules.py} (100%) create mode 100644 recognition/vq-vae_s47036219/predict.py create mode 100644 recognition/vq-vae_s47036219/train.py diff --git a/recognition/vq-vae_s47036219/dataset.py b/recognition/vq-vae_s47036219/dataset.py new file mode 100644 index 000000000..e69de29bb diff --git a/recognition/vq-vae_s47036219/module.py b/recognition/vq-vae_s47036219/modules.py similarity index 100% rename from recognition/vq-vae_s47036219/module.py rename to recognition/vq-vae_s47036219/modules.py diff --git a/recognition/vq-vae_s47036219/predict.py b/recognition/vq-vae_s47036219/predict.py new file mode 100644 index 000000000..e69de29bb diff --git a/recognition/vq-vae_s47036219/train.py b/recognition/vq-vae_s47036219/train.py new file mode 100644 index 000000000..e69de29bb From f1b11bd1c2d06a6b2e5e89d10110c7ef6f5c586f Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 17:15:41 +1000 Subject: [PATCH 04/20] working monolith implementation --- recognition/vq-vae_s47036219/modules.py | 188 +++++++++++++++++++++++- 1 file changed, 185 insertions(+), 3 deletions(-) diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index 95733f944..c95d32c9e 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -3,37 +3,219 @@ import torch.optim as optim from torch.utils.data import DataLoader from torchvision import datasets, transforms +import pytorch_ssim + +def ssim(img1, img2, C1=0.01**2, C2=0.03**2): + mu1 = img1.mean(dim=[2, 3], keepdim=True) + mu2 = img2.mean(dim=[2, 3], keepdim=True) + + sigma1_sq = (img1 - mu1).pow(2).mean(dim=[2, 3], keepdim=True) + sigma2_sq = (img2 - mu2).pow(2).mean(dim=[2, 3], keepdim=True) + sigma12 = ((img1 - mu1)*(img2 - mu2)).mean(dim=[2, 3], keepdim=True) + + ssim_n = (2*mu1*mu2 + C1) * (2*sigma12 + C2) + ssim_d = (mu1.pow(2) + mu2.pow(2) + C1) * (sigma1_sq + sigma2_sq + C2) + + ssim_val = ssim_n / ssim_d + + return ssim_val.mean() +# Check for CUDA availability and set the device accordingly +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +print("Running on: ", device) + +# VQ-VAE Components class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() self.conv = nn.Sequential( nn.Conv2d(1, 64, 4, stride=2, padding=1), + nn.BatchNorm2d(64), nn.ReLU(), + nn.Dropout(0.2), nn.Conv2d(64, 128, 4, stride=2, padding=1), + nn.BatchNorm2d(128), nn.ReLU(), + nn.Dropout(0.2), nn.Conv2d(128, 256, 4, stride=2, padding=1), + nn.BatchNorm2d(256), nn.ReLU(), + nn.Dropout(0.2), nn.Conv2d(256, 512, 4, stride=2, padding=1), - nn.ReLU() + nn.BatchNorm2d(512), + nn.ReLU(), + nn.Dropout(0.2) ) def forward(self, x): - return self.conv(x) + x = self.conv(x) + #print(f"Encoder output shape: {x.shape}") + return x class Decoder(nn.Module): def __init__(self): super(Decoder, self).__init__() self.deconv = nn.Sequential( nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1), + nn.BatchNorm2d(256), nn.ReLU(), + nn.Dropout(0.2), nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1), + nn.BatchNorm2d(128), nn.ReLU(), + nn.Dropout(0.2), nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1), + nn.BatchNorm2d(64), nn.ReLU(), + nn.Dropout(0.2), nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1), nn.Sigmoid() ) def forward(self, x): - return self.deconv(x) + x = self.deconv(x) + #print(f"Decoder output shape: {x.shape}") + return x + +class VectorQuantizer(nn.Module): + def __init__(self, codebook_size=512, code_dim=512): + super(VectorQuantizer, self).__init__() + self.codebook = nn.Parameter(torch.randn(codebook_size, code_dim).cuda()) + + def forward(self, z): + # z shape: [batch, num_vectors_per_batch, dim] + # codebook shape: [num_codebook_entries, dim] + + # Calculate distances + dist = ((z.unsqueeze(2) - self.codebook.unsqueeze(0).unsqueeze(0)) ** 2).sum(-1) + + # Find the closest codebook entry for each z vector + _, indices = dist.min(-1) + + # Fetch the corresponding codebook vectors + # The shape is the same as z ([batch, num_vectors_per_batch, dim]) + z_q = self.codebook[indices, :] + + return z_q + +# Hyperparameters +learning_rate = 0.1 +batch_size = 32 +num_epochs = 75 +codebook_size = 512 + +# Weight for L2 and SSIM in final loss +l2_weight = 0 +ssim_weight = 1 + +# Constants for early stopping +patience = 12 +best_val_loss = float('inf') +counter = 0 + + +# Data Loaders +transform = transforms.Compose([ + transforms.Grayscale(num_output_channels=1), + transforms.Resize((64, 64)), + transforms.ToTensor(), + #transforms.Normalize(mean=[0.5], std=[0.5]) +]) + +train_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/train', transform=transform) +train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) + +# Validation DataLoader +val_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/test', transform=transform) +val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False) + + +# Models and Optimizers +encoder = Encoder().to(device) # Move to device +decoder = Decoder().to(device) # Move to device +vector_quantizer = VectorQuantizer().to(device) # Move to device + +optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate) +scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True) + +# Training Loop +for epoch in range(num_epochs): + for i, (img, _) in enumerate(train_loader): + img = img.to(device) # Move to device + + # Forward pass + z = encoder(img) + batch_size, _, H, W = z.shape + + z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) + z_q = vector_quantizer(z) + z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() + + # Decoder + recon = decoder(z_q) + + # Calculate L2 loss + l2_loss = ((recon - img) ** 2).sum() + + # Calculate SSIM loss + ssim_loss = 1 - ssim(img, recon) + + # Final Loss + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + optimizer.zero_grad() + loss.backward(retain_graph=True) + optimizer.step() + + # Update codebook + vector_quantizer.codebook.data = 0.99 * vector_quantizer.codebook.data + 0.01 * z.detach().mean(0).mean(0) + + # Validation phase + val_losses = [] + encoder.eval() + decoder.eval() + vector_quantizer.eval() + with torch.no_grad(): + for i, (img, _) in enumerate(val_loader): + img = img.to(device) + + # Validation forward pass + z = encoder(img) + batch_size, _, H, W = z.shape + + z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) + z_q = vector_quantizer(z) + z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() + + # Decoder + recon = decoder(z_q) + + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + ssim_loss = 1 - ssim(img, recon) + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + val_losses.append(loss.item()) + + avg_val_loss = sum(val_losses) / len(val_losses) + print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") + + # Early Stopping + if avg_val_loss < best_val_loss: + best_val_loss = avg_val_loss + counter = 0 + else: + counter += 1 + if counter >= patience: + print(f"Early stopping at epoch {epoch+1}") + break + +# Save Models +torch.save(encoder.state_dict(), 'encoder.pth') +torch.save(decoder.state_dict(), 'decoder.pth') +torch.save(vector_quantizer.state_dict(), 'vectorquantizer.pth') + +# Save Models +torch.save(encoder.state_dict(), 'encoder.pth') +torch.save(decoder.state_dict(), 'decoder.pth') +torch.save(vector_quantizer.state_dict(), 'vectorquantizer.pth') \ No newline at end of file From c744804e9c943d437ff2a26b70a5ad88908b4c3c Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 19:51:40 +1000 Subject: [PATCH 05/20] data validation on full model --- recognition/vq-vae_s47036219/modules.py | 71 ++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index c95d32c9e..0f80e7870 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -20,10 +20,26 @@ def ssim(img1, img2, C1=0.01**2, C2=0.03**2): ssim_val = ssim_n / ssim_d return ssim_val.mean() + # Check for CUDA availability and set the device accordingly device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Running on: ", device) +class ResidualBlock(nn.Module): + def __init__(self, in_channels): + super(ResidualBlock, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels), + nn.ReLU(), + nn.Conv2d(in_channels, in_channels, 3, padding=1), + nn.BatchNorm2d(in_channels) + ) + + def forward(self, x): + return x + self.conv(x) + +# VQ-VAE Components # VQ-VAE Components class Encoder(nn.Module): def __init__(self): @@ -99,7 +115,7 @@ def forward(self, z): return z_q # Hyperparameters -learning_rate = 0.1 +learning_rate = 1e-3 batch_size = 32 num_epochs = 75 codebook_size = 512 @@ -109,7 +125,7 @@ def forward(self, z): ssim_weight = 1 # Constants for early stopping -patience = 12 +patience = 75 best_val_loss = float('inf') counter = 0 @@ -215,7 +231,50 @@ def forward(self, z): torch.save(decoder.state_dict(), 'decoder.pth') torch.save(vector_quantizer.state_dict(), 'vectorquantizer.pth') -# Save Models -torch.save(encoder.state_dict(), 'encoder.pth') -torch.save(decoder.state_dict(), 'decoder.pth') -torch.save(vector_quantizer.state_dict(), 'vectorquantizer.pth') \ No newline at end of file + +from skimage.metrics import structural_similarity as ssim +import matplotlib.pyplot as plt +import numpy as np +import cv2 # Import OpenCV +import torch + +# Assuming your existing PyTorch models are already loaded +# encoder, decoder, vector_quantizer, and codebook + +encoder.eval() # Set the encoder model to evaluation mode +decoder.eval() # Set the decoder model to evaluation mode +vector_quantizer.eval() # Set the vector quantizer to evaluation mode + +with torch.no_grad(): # Turn off gradients for the upcoming operations + for img, _ in train_loader: # Loop through batches in the data loader + img = img.cuda() # Move the images to the GPU + z = encoder(img) # Encode the images to latent space + + batch_size, _, H, W = z.shape + z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) + + z_q = vector_quantizer(z) + z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() + + recon = decoder(z_q) # Decode the quantized vectors + + # Calculate SSIM + original_img = img.cpu().numpy().squeeze(1) # Convert to numpy and remove color channel dimension + reconstructed_img = recon.cpu().numpy().squeeze(1) # Convert to numpy and remove color channel dimension + + ssim_val = ssim(original_img[0], reconstructed_img[0], data_range=reconstructed_img.max() - reconstructed_img.min()) # Calculate SSIM + + print(f'SSIM: {ssim_val}') # Output SSIM value + + # Output images + plt.subplot(1, 2, 1) + plt.title('Original') + plt.imshow(original_img[0], cmap='gray') # Show original image + + plt.subplot(1, 2, 2) + plt.title('Reconstructed') + plt.imshow(reconstructed_img[0], cmap='gray') # Show reconstructed image + + plt.show() # Show the plot + + break # Exit the loop after one iteration for demonstration From 7e3381299f648d4cc004a1a49f2bbace3b341504 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 20:52:47 +1000 Subject: [PATCH 06/20] refactoring monolith --- recognition/vq-vae_s47036219/modules.py | 230 ++++++++++-------------- 1 file changed, 94 insertions(+), 136 deletions(-) diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index 0f80e7870..e969f54b0 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -26,98 +26,107 @@ def ssim(img1, img2, C1=0.01**2, C2=0.03**2): print("Running on: ", device) class ResidualBlock(nn.Module): - def __init__(self, in_channels): + def __init__(self, in_channels, out_channels, intermediate_channels=None): super(ResidualBlock, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, in_channels, 3, padding=1), - nn.BatchNorm2d(in_channels), - nn.ReLU(), - nn.Conv2d(in_channels, in_channels, 3, padding=1), - nn.BatchNorm2d(in_channels) - ) - def forward(self, x): - return x + self.conv(x) + if not intermediate_channels: + intermediate_channels = in_channels // 2 -# VQ-VAE Components -# VQ-VAE Components -class Encoder(nn.Module): - def __init__(self): - super(Encoder, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(1, 64, 4, stride=2, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Dropout(0.2), - nn.Conv2d(64, 128, 4, stride=2, padding=1), - nn.BatchNorm2d(128), + self._residual_block = nn.Sequential( nn.ReLU(), - nn.Dropout(0.2), - nn.Conv2d(128, 256, 4, stride=2, padding=1), - nn.BatchNorm2d(256), + nn.Conv2d(in_channels, intermediate_channels, kernel_size=3, stride=1, padding=1, bias=False), nn.ReLU(), - nn.Dropout(0.2), - nn.Conv2d(256, 512, 4, stride=2, padding=1), - nn.BatchNorm2d(512), - nn.ReLU(), - nn.Dropout(0.2) + nn.Conv2d(intermediate_channels, out_channels, kernel_size=1, stride=1, bias=False) ) def forward(self, x): - x = self.conv(x) - #print(f"Encoder output shape: {x.shape}") - return x + return x + self._residual_block(x) -class Decoder(nn.Module): + +class Encoder(nn.Module): def __init__(self): - super(Decoder, self).__init__() - self.deconv = nn.Sequential( - nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1), - nn.BatchNorm2d(256), - nn.ReLU(), - nn.Dropout(0.2), - nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1), - nn.BatchNorm2d(128), - nn.ReLU(), - nn.Dropout(0.2), - nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(), - nn.Dropout(0.2), - nn.ConvTranspose2d(64, 1, 4, stride=2, padding=1), - nn.Sigmoid() + super(Encoder, self).__init__() + + self.layers = nn.Sequential( + nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1), + ResidualBlock(64, 64), + ResidualBlock(64, 64) ) def forward(self, x): - x = self.deconv(x) - #print(f"Decoder output shape: {x.shape}") - return x + out = self.layers(x) + return out + class VectorQuantizer(nn.Module): - def __init__(self, codebook_size=512, code_dim=512): + def __init__(self, num_embeddings, embedding_dim): super(VectorQuantizer, self).__init__() - self.codebook = nn.Parameter(torch.randn(codebook_size, code_dim).cuda()) - def forward(self, z): - # z shape: [batch, num_vectors_per_batch, dim] - # codebook shape: [num_codebook_entries, dim] + self.num_embeddings = num_embeddings # Save as an instance variable + self.embedding = nn.Embedding(self.num_embeddings, embedding_dim) + self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings) + + def forward(self, x): + batch_size, channels, height, width = x.shape + x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, channels) + + # Now x_flat is [batch_size * height * width, channels] # Calculate distances - dist = ((z.unsqueeze(2) - self.codebook.unsqueeze(0).unsqueeze(0)) ** 2).sum(-1) + distances = ((x_flat.unsqueeze(1) - self.embedding.weight.unsqueeze(0)) ** 2).sum(-1) - # Find the closest codebook entry for each z vector - _, indices = dist.min(-1) + # Find the closest embeddings + _, indices = distances.min(1) + encodings = torch.zeros_like(distances).scatter_(1, indices.unsqueeze(1), 1) - # Fetch the corresponding codebook vectors - # The shape is the same as z ([batch, num_vectors_per_batch, dim]) - z_q = self.codebook[indices, :] + # Quantize the input image + quantized = self.embedding(indices) + + # Reshape the quantized tensor to the same shape as the input + quantized = quantized.view(batch_size, height, width, channels).permute(0, 3, 1, 2) + + return quantized + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + + self.layers = nn.Sequential( + nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), + ResidualBlock(64, 64), + ResidualBlock(64, 64), + nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(32, 1, kernel_size=4, stride=2, padding=1) + ) + + def forward(self, x): + return self.layers(x) + - return z_q +class VQVAE(nn.Module): + def __init__(self, num_embeddings=512, embedding_dim=64): + super(VQVAE, self).__init__() + + self.encoder = Encoder() + self.conv1 = nn.Conv2d(64, embedding_dim, kernel_size=1, stride=1) + self.vector_quantizer = VectorQuantizer(num_embeddings, embedding_dim) + self.decoder = Decoder() + + def forward(self, x): + enc = self.encoder(x) + enc = self.conv1(enc) + quantized = self.vector_quantizer(enc) + + dec = self.decoder(quantized) + return dec # Hyperparameters learning_rate = 1e-3 batch_size = 32 -num_epochs = 75 +num_epochs = 20 codebook_size = 512 # Weight for L2 and SSIM in final loss @@ -125,7 +134,7 @@ def forward(self, z): ssim_weight = 1 # Constants for early stopping -patience = 75 +patience = 10 best_val_loss = float('inf') counter = 0 @@ -147,29 +156,19 @@ def forward(self, z): # Models and Optimizers -encoder = Encoder().to(device) # Move to device -decoder = Decoder().to(device) # Move to device -vector_quantizer = VectorQuantizer().to(device) # Move to device +model = VQVAE(codebook_size).to(device) -optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate) -scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2, verbose=True) +optimizer = optim.Adam(model.parameters(), lr=learning_rate) +scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) # Training Loop for epoch in range(num_epochs): for i, (img, _) in enumerate(train_loader): img = img.to(device) # Move to device - # Forward pass - z = encoder(img) - batch_size, _, H, W = z.shape - - z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) - z_q = vector_quantizer(z) - z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() - - # Decoder - recon = decoder(z_q) - + # Forward pass through the entire model + recon = model(img) + # Calculate L2 loss l2_loss = ((recon - img) ** 2).sum() @@ -180,11 +179,11 @@ def forward(self, z): loss = l2_weight * l2_loss + ssim_weight * ssim_loss optimizer.zero_grad() - loss.backward(retain_graph=True) + loss.backward() optimizer.step() - # Update codebook - vector_quantizer.codebook.data = 0.99 * vector_quantizer.codebook.data + 0.01 * z.detach().mean(0).mean(0) + # Assuming you'd still want to update the codebook, though this needs to be integrated with the VQ process + #model.vector_quantizer.embedding.weight.data = 0.99 * model.vector_quantizer.embedding.weight.data + 0.01 * enc.detach().mean(0).mean(0) # Validation phase val_losses = [] @@ -216,65 +215,24 @@ def forward(self, z): avg_val_loss = sum(val_losses) / len(val_losses) print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") + # Update learning rate + scheduler.step(avg_val_loss) + # Print current learning rate + current_lr = optimizer.param_groups[0]['lr'] + print(f"Current Learning Rate: {current_lr}") + # Early Stopping if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss - counter = 0 + counter = 0 # Reset counter when validation loss decreases else: counter += 1 if counter >= patience: print(f"Early stopping at epoch {epoch+1}") break -# Save Models -torch.save(encoder.state_dict(), 'encoder.pth') -torch.save(decoder.state_dict(), 'decoder.pth') -torch.save(vector_quantizer.state_dict(), 'vectorquantizer.pth') - - -from skimage.metrics import structural_similarity as ssim -import matplotlib.pyplot as plt -import numpy as np -import cv2 # Import OpenCV -import torch - -# Assuming your existing PyTorch models are already loaded -# encoder, decoder, vector_quantizer, and codebook - -encoder.eval() # Set the encoder model to evaluation mode -decoder.eval() # Set the decoder model to evaluation mode -vector_quantizer.eval() # Set the vector quantizer to evaluation mode - -with torch.no_grad(): # Turn off gradients for the upcoming operations - for img, _ in train_loader: # Loop through batches in the data loader - img = img.cuda() # Move the images to the GPU - z = encoder(img) # Encode the images to latent space - - batch_size, _, H, W = z.shape - z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) - - z_q = vector_quantizer(z) - z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() - - recon = decoder(z_q) # Decode the quantized vectors - - # Calculate SSIM - original_img = img.cpu().numpy().squeeze(1) # Convert to numpy and remove color channel dimension - reconstructed_img = recon.cpu().numpy().squeeze(1) # Convert to numpy and remove color channel dimension - - ssim_val = ssim(original_img[0], reconstructed_img[0], data_range=reconstructed_img.max() - reconstructed_img.min()) # Calculate SSIM - print(f'SSIM: {ssim_val}') # Output SSIM value - - # Output images - plt.subplot(1, 2, 1) - plt.title('Original') - plt.imshow(original_img[0], cmap='gray') # Show original image - - plt.subplot(1, 2, 2) - plt.title('Reconstructed') - plt.imshow(reconstructed_img[0], cmap='gray') # Show reconstructed image +# Save Models +torch.save(model.state_dict(), 'vqvae.pth') - plt.show() # Show the plot - break # Exit the loop after one iteration for demonstration From dd3b4d6ba716a8b64af4277d90beed614cc98910 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 21:28:14 +1000 Subject: [PATCH 07/20] working monolith implementation --- recognition/vq-vae_s47036219/modules.py | 61 ++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index e969f54b0..05331bda4 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -126,19 +126,18 @@ def forward(self, x): # Hyperparameters learning_rate = 1e-3 batch_size = 32 -num_epochs = 20 +num_epochs = 40 codebook_size = 512 # Weight for L2 and SSIM in final loss -l2_weight = 0 +l2_weight = 0.05 ssim_weight = 1 # Constants for early stopping -patience = 10 +patience = 12 best_val_loss = float('inf') counter = 0 - # Data Loaders transform = transforms.Compose([ transforms.Grayscale(num_output_channels=1), @@ -164,6 +163,7 @@ def forward(self, x): # Training Loop for epoch in range(num_epochs): for i, (img, _) in enumerate(train_loader): + model.train() img = img.to(device) # Move to device # Forward pass through the entire model @@ -187,9 +187,7 @@ def forward(self, x): # Validation phase val_losses = [] - encoder.eval() - decoder.eval() - vector_quantizer.eval() + model.eval() with torch.no_grad(): for i, (img, _) in enumerate(val_loader): img = img.to(device) @@ -235,4 +233,53 @@ def forward(self, x): # Save Models torch.save(model.state_dict(), 'vqvae.pth') +import matplotlib.pyplot as plt +import numpy as np +import cv2 # Import OpenCV +import torch +import random + +# Assuming your existing PyTorch models are already loaded +# encoder, decoder, vector_quantizer, and codebook +model.load_state_dict(torch.load('vqvae.pth')) +val_losses = [] +model.eval() +with torch.no_grad(): + for i, (img, _) in enumerate(val_loader): + img = img.to(device) + + # Validation forward pass + z = model.encoder(img) + z = model.conv1(z) + z_q = model.vector_quantizer(z) + recon = model.decoder(z_q) + + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + print(img.dtype, recon.dtype) + ssim_loss = 1 - ssim(img, recon) + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + val_losses.append(loss.item()) + + # Calculate SSIM + ssim_val = ssim(img, recon).item() # We already have a PyTorch-based SSIM function + print(f'SSIM: {ssim_val}') # Output SSIM value + + # Assuming the images are single channel and the channel dimension is at the second position + original_img = img.cpu().numpy().squeeze(1) + reconstructed_img = recon.cpu().numpy().squeeze(1) + + # Output images (for first image in the batch) + plt.subplot(1, 2, 1) + plt.title('Original') + plt.imshow(original_img[0], cmap='gray') + + plt.subplot(1, 2, 2) + plt.title('Reconstructed') + plt.imshow(reconstructed_img[0], cmap='gray') + + plt.show() + + break # Exit the loop after one iteration for demonstration From 15480a8a22af7ced0e5d51387036121128d51b81 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 21:52:29 +1000 Subject: [PATCH 08/20] readme intro --- recognition/vq-vae_s47036219/README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 3efd78623..3c67ec21f 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -1,2 +1,11 @@ -hello world! +# VQ-VAE for the ADNI Dataset +**Author**: Connor Armstrong (s4703621) + + +# Project: +The goal of this task was to implement a Vector Quantized Variational Autoencoder (henceforth referred to as a VQ-VAE). The VQ-VAE is an extension of a typical variational autoencoder that handles discrete latent representation learning - which is where the model learns to represent data in a form where the latent variables take on distinct discrete values, rather than a continuous range. This is done by the model passing the encoders output through a vector quantisation layer, mapping the continuous encodings to the closest vector in the embedding spacve. This makes the VQ-VAE very effective at managing discrete structured data and image reconstruction/generation. + + +## VQ-VAE and the ADNI Dataset: +The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection of neuroimaging data, curated with the primary intent of studying Alzheimer's disease. In the context of the ADNI dataset, a VQ-VAE can be applied to condense complex brain scans into a more manageable, lower-dimensional, discrete latent space. By doing so, it can effectively capture meaningful patterns and structures inherent in the images. \ No newline at end of file From ce5117b75216dc3f3be534829be7ad9ceaaf379b Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 22:04:12 +1000 Subject: [PATCH 09/20] modules refactor --- recognition/vq-vae_s47036219/README.md | 2 + recognition/vq-vae_s47036219/modules.py | 214 +++--------------- recognition/vq-vae_s47036219/utils.py | 287 ++++++++++++++++++++++++ 3 files changed, 318 insertions(+), 185 deletions(-) create mode 100644 recognition/vq-vae_s47036219/utils.py diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 3c67ec21f..093ea10ff 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -4,6 +4,8 @@ # Project: + +## The Vector Quantized Variational Autoencoder The goal of this task was to implement a Vector Quantized Variational Autoencoder (henceforth referred to as a VQ-VAE). The VQ-VAE is an extension of a typical variational autoencoder that handles discrete latent representation learning - which is where the model learns to represent data in a form where the latent variables take on distinct discrete values, rather than a continuous range. This is done by the model passing the encoders output through a vector quantisation layer, mapping the continuous encodings to the closest vector in the embedding spacve. This makes the VQ-VAE very effective at managing discrete structured data and image reconstruction/generation. diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index 05331bda4..4cb91cb49 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -1,29 +1,5 @@ import torch import torch.nn as nn -import torch.optim as optim -from torch.utils.data import DataLoader -from torchvision import datasets, transforms -import pytorch_ssim - - -def ssim(img1, img2, C1=0.01**2, C2=0.03**2): - mu1 = img1.mean(dim=[2, 3], keepdim=True) - mu2 = img2.mean(dim=[2, 3], keepdim=True) - - sigma1_sq = (img1 - mu1).pow(2).mean(dim=[2, 3], keepdim=True) - sigma2_sq = (img2 - mu2).pow(2).mean(dim=[2, 3], keepdim=True) - sigma12 = ((img1 - mu1)*(img2 - mu2)).mean(dim=[2, 3], keepdim=True) - - ssim_n = (2*mu1*mu2 + C1) * (2*sigma12 + C2) - ssim_d = (mu1.pow(2) + mu2.pow(2) + C1) * (sigma1_sq + sigma2_sq + C2) - - ssim_val = ssim_n / ssim_d - - return ssim_val.mean() - -# Check for CUDA availability and set the device accordingly -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print("Running on: ", device) class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, intermediate_channels=None): @@ -49,8 +25,15 @@ def __init__(self): self.layers = nn.Sequential( nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(32), nn.ReLU(inplace=True), + nn.Dropout(0.5), + nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + ResidualBlock(64, 64), ResidualBlock(64, 64) ) @@ -59,7 +42,6 @@ def forward(self, x): out = self.layers(x) return out - class VectorQuantizer(nn.Module): def __init__(self, num_embeddings, embedding_dim): super(VectorQuantizer, self).__init__() @@ -95,17 +77,24 @@ def __init__(self): self.layers = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + ResidualBlock(64, 64), ResidualBlock(64, 64), + nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(32), nn.ReLU(inplace=True), + nn.Dropout(0.5), + nn.ConvTranspose2d(32, 1, kernel_size=4, stride=2, padding=1) ) def forward(self, x): return self.layers(x) - class VQVAE(nn.Module): def __init__(self, num_embeddings=512, embedding_dim=64): super(VQVAE, self).__init__() @@ -122,164 +111,19 @@ def forward(self, x): dec = self.decoder(quantized) return dec + + +def ssim(img1, img2, C1=0.01**2, C2=0.03**2): + mu1 = img1.mean(dim=[2, 3], keepdim=True) + mu2 = img2.mean(dim=[2, 3], keepdim=True) + + sigma1_sq = (img1 - mu1).pow(2).mean(dim=[2, 3], keepdim=True) + sigma2_sq = (img2 - mu2).pow(2).mean(dim=[2, 3], keepdim=True) + sigma12 = ((img1 - mu1)*(img2 - mu2)).mean(dim=[2, 3], keepdim=True) + + ssim_n = (2*mu1*mu2 + C1) * (2*sigma12 + C2) + ssim_d = (mu1.pow(2) + mu2.pow(2) + C1) * (sigma1_sq + sigma2_sq + C2) -# Hyperparameters -learning_rate = 1e-3 -batch_size = 32 -num_epochs = 40 -codebook_size = 512 - -# Weight for L2 and SSIM in final loss -l2_weight = 0.05 -ssim_weight = 1 - -# Constants for early stopping -patience = 12 -best_val_loss = float('inf') -counter = 0 - -# Data Loaders -transform = transforms.Compose([ - transforms.Grayscale(num_output_channels=1), - transforms.Resize((64, 64)), - transforms.ToTensor(), - #transforms.Normalize(mean=[0.5], std=[0.5]) -]) - -train_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/train', transform=transform) -train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) - -# Validation DataLoader -val_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/test', transform=transform) -val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False) - - -# Models and Optimizers -model = VQVAE(codebook_size).to(device) - -optimizer = optim.Adam(model.parameters(), lr=learning_rate) -scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) - -# Training Loop -for epoch in range(num_epochs): - for i, (img, _) in enumerate(train_loader): - model.train() - img = img.to(device) # Move to device - - # Forward pass through the entire model - recon = model(img) - - # Calculate L2 loss - l2_loss = ((recon - img) ** 2).sum() - - # Calculate SSIM loss - ssim_loss = 1 - ssim(img, recon) - - # Final Loss - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Assuming you'd still want to update the codebook, though this needs to be integrated with the VQ process - #model.vector_quantizer.embedding.weight.data = 0.99 * model.vector_quantizer.embedding.weight.data + 0.01 * enc.detach().mean(0).mean(0) - - # Validation phase - val_losses = [] - model.eval() - with torch.no_grad(): - for i, (img, _) in enumerate(val_loader): - img = img.to(device) - - # Validation forward pass - z = encoder(img) - batch_size, _, H, W = z.shape - - z = z.permute(0, 2, 3, 1).contiguous().view(batch_size, H * W, -1) - z_q = vector_quantizer(z) - z_q = z_q.view(batch_size, H, W, 512).permute(0, 3, 1, 2).contiguous() - - # Decoder - recon = decoder(z_q) - - # Validation losses - l2_loss = ((recon - img) ** 2).sum() - ssim_loss = 1 - ssim(img, recon) - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - val_losses.append(loss.item()) - - avg_val_loss = sum(val_losses) / len(val_losses) - print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") - - # Update learning rate - scheduler.step(avg_val_loss) - # Print current learning rate - current_lr = optimizer.param_groups[0]['lr'] - print(f"Current Learning Rate: {current_lr}") - - # Early Stopping - if avg_val_loss < best_val_loss: - best_val_loss = avg_val_loss - counter = 0 # Reset counter when validation loss decreases - else: - counter += 1 - if counter >= patience: - print(f"Early stopping at epoch {epoch+1}") - break - - -# Save Models -torch.save(model.state_dict(), 'vqvae.pth') - -import matplotlib.pyplot as plt -import numpy as np -import cv2 # Import OpenCV -import torch -import random - -# Assuming your existing PyTorch models are already loaded -# encoder, decoder, vector_quantizer, and codebook -model.load_state_dict(torch.load('vqvae.pth')) -val_losses = [] -model.eval() - -with torch.no_grad(): - for i, (img, _) in enumerate(val_loader): - img = img.to(device) - - # Validation forward pass - z = model.encoder(img) - z = model.conv1(z) - z_q = model.vector_quantizer(z) - recon = model.decoder(z_q) - - # Validation losses - l2_loss = ((recon - img) ** 2).sum() - print(img.dtype, recon.dtype) - ssim_loss = 1 - ssim(img, recon) - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - val_losses.append(loss.item()) - - # Calculate SSIM - ssim_val = ssim(img, recon).item() # We already have a PyTorch-based SSIM function - print(f'SSIM: {ssim_val}') # Output SSIM value - - # Assuming the images are single channel and the channel dimension is at the second position - original_img = img.cpu().numpy().squeeze(1) - reconstructed_img = recon.cpu().numpy().squeeze(1) + ssim_val = ssim_n / ssim_d - # Output images (for first image in the batch) - plt.subplot(1, 2, 1) - plt.title('Original') - plt.imshow(original_img[0], cmap='gray') - - plt.subplot(1, 2, 2) - plt.title('Reconstructed') - plt.imshow(reconstructed_img[0], cmap='gray') - - plt.show() - - break # Exit the loop after one iteration for demonstration + return ssim_val.mean() \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/utils.py b/recognition/vq-vae_s47036219/utils.py new file mode 100644 index 000000000..fd9c7db58 --- /dev/null +++ b/recognition/vq-vae_s47036219/utils.py @@ -0,0 +1,287 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader +from torchvision import datasets, transforms +import pytorch_ssim + + +def ssim(img1, img2, C1=0.01**2, C2=0.03**2): + mu1 = img1.mean(dim=[2, 3], keepdim=True) + mu2 = img2.mean(dim=[2, 3], keepdim=True) + + sigma1_sq = (img1 - mu1).pow(2).mean(dim=[2, 3], keepdim=True) + sigma2_sq = (img2 - mu2).pow(2).mean(dim=[2, 3], keepdim=True) + sigma12 = ((img1 - mu1)*(img2 - mu2)).mean(dim=[2, 3], keepdim=True) + + ssim_n = (2*mu1*mu2 + C1) * (2*sigma12 + C2) + ssim_d = (mu1.pow(2) + mu2.pow(2) + C1) * (sigma1_sq + sigma2_sq + C2) + + ssim_val = ssim_n / ssim_d + + return ssim_val.mean() + +# Check for CUDA availability and set the device accordingly +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +print("Running on: ", device) + +class ResidualBlock(nn.Module): + def __init__(self, in_channels, out_channels, intermediate_channels=None): + super(ResidualBlock, self).__init__() + + if not intermediate_channels: + intermediate_channels = in_channels // 2 + + self._residual_block = nn.Sequential( + nn.ReLU(), + nn.Conv2d(in_channels, intermediate_channels, kernel_size=3, stride=1, padding=1, bias=False), + nn.ReLU(), + nn.Conv2d(intermediate_channels, out_channels, kernel_size=1, stride=1, bias=False) + ) + + def forward(self, x): + return x + self._residual_block(x) + + +class Encoder(nn.Module): + def __init__(self): + super(Encoder, self).__init__() + + self.layers = nn.Sequential( + nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(32), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + + nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + + ResidualBlock(64, 64), + ResidualBlock(64, 64) + ) + + def forward(self, x): + out = self.layers(x) + return out + +class VectorQuantizer(nn.Module): + def __init__(self, num_embeddings, embedding_dim): + super(VectorQuantizer, self).__init__() + + self.num_embeddings = num_embeddings # Save as an instance variable + self.embedding = nn.Embedding(self.num_embeddings, embedding_dim) + self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings) + + def forward(self, x): + batch_size, channels, height, width = x.shape + x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, channels) + + # Now x_flat is [batch_size * height * width, channels] + + # Calculate distances + distances = ((x_flat.unsqueeze(1) - self.embedding.weight.unsqueeze(0)) ** 2).sum(-1) + + # Find the closest embeddings + _, indices = distances.min(1) + encodings = torch.zeros_like(distances).scatter_(1, indices.unsqueeze(1), 1) + + # Quantize the input image + quantized = self.embedding(indices) + + # Reshape the quantized tensor to the same shape as the input + quantized = quantized.view(batch_size, height, width, channels).permute(0, 3, 1, 2) + + return quantized + +class Decoder(nn.Module): + def __init__(self): + super(Decoder, self).__init__() + + self.layers = nn.Sequential( + nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + + ResidualBlock(64, 64), + ResidualBlock(64, 64), + + nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1), + nn.BatchNorm2d(32), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + + nn.ConvTranspose2d(32, 1, kernel_size=4, stride=2, padding=1) + ) + + def forward(self, x): + return self.layers(x) + +class VQVAE(nn.Module): + def __init__(self, num_embeddings=512, embedding_dim=64): + super(VQVAE, self).__init__() + + self.encoder = Encoder() + self.conv1 = nn.Conv2d(64, embedding_dim, kernel_size=1, stride=1) + self.vector_quantizer = VectorQuantizer(num_embeddings, embedding_dim) + self.decoder = Decoder() + + def forward(self, x): + enc = self.encoder(x) + enc = self.conv1(enc) + quantized = self.vector_quantizer(enc) + + dec = self.decoder(quantized) + return dec + +# Hyperparameters +learning_rate = 1e-3 +batch_size = 32 +num_epochs = 40 +codebook_size = 512 + +# Weight for L2 and SSIM in final loss +l2_weight = 0.05 +ssim_weight = 1 + +# Constants for early stopping +patience = 12 +best_val_loss = float('inf') +counter = 0 + +# Data Loaders +transform = transforms.Compose([ + transforms.Grayscale(num_output_channels=1), + transforms.Resize((64, 64)), + transforms.ToTensor(), + #transforms.Normalize(mean=[0.5], std=[0.5]) +]) + +train_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/train', transform=transform) +train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) + +# Validation DataLoader +val_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/test', transform=transform) +val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False) + + +# Models and Optimizers +model = VQVAE(codebook_size).to(device) + +optimizer = optim.Adam(model.parameters(), lr=learning_rate) +scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) + +# Training Loop +for epoch in range(num_epochs): + for i, (img, _) in enumerate(train_loader): + model.train() + img = img.to(device) # Move to device + + # Forward pass through the entire model + recon = model(img) + + # Calculate L2 loss + l2_loss = ((recon - img) ** 2).sum() + + # Calculate SSIM loss + ssim_loss = 1 - ssim(img, recon) + + # Final Loss + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # Validation phase + val_losses = [] + model.eval() + with torch.no_grad(): + for i, (img, _) in enumerate(val_loader): + img = img.to(device) + + # Validation forward pass + recon = model(img) # Changed this line to use the VQVAE model + + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + ssim_loss = 1 - ssim(img, recon) + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + val_losses.append(loss.item()) + + avg_val_loss = sum(val_losses) / len(val_losses) + print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") + + # Update learning rate + scheduler.step(avg_val_loss) + # Print current learning rate + current_lr = optimizer.param_groups[0]['lr'] + print(f"Current Learning Rate: {current_lr}") + + # Early Stopping + if avg_val_loss < best_val_loss: + best_val_loss = avg_val_loss + counter = 0 # Reset counter when validation loss decreases + else: + counter += 1 + if counter >= patience: + print(f"Early stopping at epoch {epoch+1}") + break + + +# Save Models +torch.save(model.state_dict(), 'vqvae.pth') + + +import matplotlib.pyplot as plt +import numpy as np +import cv2 # Import OpenCV +import torch +import random + +# Assuming your existing PyTorch models are already loaded +# encoder, decoder, vector_quantizer, and codebook +model.load_state_dict(torch.load('vqvae.pth')) +val_losses = [] +model.eval() + +with torch.no_grad(): + for i, (img, _) in enumerate(val_loader): + img = img.to(device) + + # Validation forward pass + z = model.encoder(img) + z = model.conv1(z) + z_q = model.vector_quantizer(z) + recon = model.decoder(z_q) + + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + ssim_loss = 1 - ssim(img, recon) + loss = l2_weight * l2_loss + ssim_weight * ssim_loss + + val_losses.append(loss.item()) + + # Calculate SSIM + ssim_val = ssim(img, recon).item() # We already have a PyTorch-based SSIM function + print(f'SSIM: {ssim_val}') # Output SSIM value + + # Assuming the images are single channel and the channel dimension is at the second position + original_img = img.cpu().numpy().squeeze(1) + reconstructed_img = recon.cpu().numpy().squeeze(1) + + # Output images (for first image in the batch) + plt.subplot(1, 2, 1) + plt.title('Original') + plt.imshow(original_img[0], cmap='gray') + + plt.subplot(1, 2, 2) + plt.title('Reconstructed') + plt.imshow(reconstructed_img[0], cmap='gray') + + plt.show() + + break # Exit the loop after one iteration for demonstration \ No newline at end of file From 34233e08a769a2e179f9f87687b403f3e1f4c2c2 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 22:22:35 +1000 Subject: [PATCH 10/20] refactoring from monolith --- recognition/vq-vae_s47036219/modules.py | 4 ++ recognition/vq-vae_s47036219/predict.py | 27 ++++++++ recognition/vq-vae_s47036219/train.py | 88 +++++++++++++++++++++++++ 3 files changed, 119 insertions(+) diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index 4cb91cb49..32577d941 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -1,6 +1,10 @@ import torch import torch.nn as nn + +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +print("Running on: ", device) + class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, intermediate_channels=None): super(ResidualBlock, self).__init__() diff --git a/recognition/vq-vae_s47036219/predict.py b/recognition/vq-vae_s47036219/predict.py index e69de29bb..b558982a9 100644 --- a/recognition/vq-vae_s47036219/predict.py +++ b/recognition/vq-vae_s47036219/predict.py @@ -0,0 +1,27 @@ +# CONSTANTS AND HYPERPARAMETERS: + +from modules import VQVAE, device + + +LEARNING_RATE = 1e-3 +BATCH_SIZE = 32 +NUM_EPOCHS = 40 # realistically stopped earlier by the validation set +CODEBOOK_SIZE = 512 + +# Weights for the loss functions +L2_WEIGHT = 0.05 +SSIM_WEIGHT = 1 + +# Constants for early stopping +PATIENCE = 12 +best_val_loss = float('inf') +counter = 0 + + + + + + + +def main(): + model = model = VQVAE(CODEBOOK_SIZE).to(device) \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/train.py b/recognition/vq-vae_s47036219/train.py index e69de29bb..689515e09 100644 --- a/recognition/vq-vae_s47036219/train.py +++ b/recognition/vq-vae_s47036219/train.py @@ -0,0 +1,88 @@ +# CONSTANTS AND HYPERPARAMETERS: + +import torch +import torch.nn as nn +import torch.optim as optim +from modules import VQVAE, device, ssim + + +LEARNING_RATE = 1e-3 +BATCH_SIZE = 32 +NUM_EPOCHS = 40 # realistically stopped earlier by the validation set +CODEBOOK_SIZE = 512 + +# Weights for the loss functions +L2_WEIGHT = 0.05 +SSIM_WEIGHT = 1 + +# Constants for early stopping +PATIENCE = 12 +best_val_loss = float('inf') +counter = 0 + + +def train(vqvae, train_loader, validation_loader): + optimizer = optim.Adam(vqvae.parameters(), lr=LEARNING_RATE) + scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) + # Training Loop + for epoch in range(NUM_EPOCHS): + for i, (img, _) in enumerate(train_loader): + vqvae.train() + img = img.to(device) # Move to device + + # Forward pass through the entire model + recon = vqvae(img) + + # Calculate L2 loss + l2_loss = ((recon - img) ** 2).sum() + + # Calculate SSIM loss + ssim_loss = 1 - ssim(img, recon) + + # Final Loss + loss = L2_WEIGHT * l2_loss + SSIM_WEIGHT * ssim_loss + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # Validation phase + val_losses = [] + vqvae.eval() + with torch.no_grad(): + for i, (img, _) in enumerate(validation_loader): + img = img.to(device) + + # Validation forward pass + recon = vqvae(img) # Changed this line to use the VQVAE model + + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + ssim_loss = 1 - ssim(img, recon) + loss = L2_WEIGHT * l2_loss + SSIM_WEIGHT * ssim_loss + + val_losses.append(loss.item()) + + avg_val_loss = sum(val_losses) / len(val_losses) + print(f"Epoch [{epoch+1}/{NUM_EPOCHS}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") + + # Update learning rate + scheduler.step(avg_val_loss) + # Print current learning rate + current_lr = optimizer.param_groups[0]['lr'] + print(f"Current Learning Rate: {current_lr}") + + # Early Stopping + if avg_val_loss < best_val_loss: + best_val_loss = avg_val_loss + counter = 0 # Reset counter when validation loss decreases + else: + counter += 1 + if counter >= PATIENCE: + print(f"Early stopping at epoch {epoch+1}") + break + +def main(): + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print("Running on: ", device) + model = VQVAE(CODEBOOK_SIZE).to(device) \ No newline at end of file From 44a507687307ef43baf4792c4c53b36fe9ddb789 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 22:34:38 +1000 Subject: [PATCH 11/20] dataset.py implementation --- recognition/vq-vae_s47036219/dataset.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/recognition/vq-vae_s47036219/dataset.py b/recognition/vq-vae_s47036219/dataset.py index e69de29bb..104949b36 100644 --- a/recognition/vq-vae_s47036219/dataset.py +++ b/recognition/vq-vae_s47036219/dataset.py @@ -0,0 +1,23 @@ +from torch.utils.data import DataLoader, random_split +from torchvision import datasets, transforms + +def get_dataloaders(train_string, test_validation_string, batch_size): + transform = transforms.Compose([ + transforms.Grayscale(num_output_channels=1), + transforms.Resize((64, 64)), + transforms.ToTensor(), + #transforms.Normalize(mean=[0.5], std=[0.5]) + ]) + train_dataset = datasets.ImageFolder(root='train_string', transform=transform) + train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) + + full_test_dataset = datasets.ImageFolder(root='test_validation_string', transform=transform) + test_size = int(0.3 * len(full_test_dataset)) + val_size = len(full_test_dataset) - test_size + + test_dataset, val_dataset = random_split(full_test_dataset, [test_size, val_size]) + + test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) + val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False) + + return train_loader, val_loader, test_loader From aa135a2bcf283dff14ebbb9e7d07e14509019402 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 22:46:39 +1000 Subject: [PATCH 12/20] predict and train initial implementations --- recognition/vq-vae_s47036219/predict.py | 79 ++++++++++++++++++++----- recognition/vq-vae_s47036219/train.py | 11 +++- 2 files changed, 74 insertions(+), 16 deletions(-) diff --git a/recognition/vq-vae_s47036219/predict.py b/recognition/vq-vae_s47036219/predict.py index b558982a9..c68d3e6e5 100644 --- a/recognition/vq-vae_s47036219/predict.py +++ b/recognition/vq-vae_s47036219/predict.py @@ -1,27 +1,76 @@ -# CONSTANTS AND HYPERPARAMETERS: +import torch +from modules import VQVAE, device, ssim +from dataset import get_dataloaders +from train import SSIM_WEIGHT, L2_WEIGHT, BATCH_SIZE +import matplotlib.pyplot as plt +import os -from modules import VQVAE, device +def evaluate(test_loader): + model = VQVAE().to(device) + model.load_state_dict(torch.load('vqvae.pth')) + model.eval() + + highest_ssim_val = float('-inf') # Initialize with negative infinity + lowest_ssim_val = float('inf') # Initialize with positive infinity + highest_ssim_img = None + highest_ssim_recon = None + lowest_ssim_img = None + lowest_ssim_recon = None + + val_losses = [] + with torch.no_grad(): + for i, (img, _) in enumerate(test_loader): + img = img.to(device) + + # Validation forward pass + z = model.encoder(img) + z = model.conv1(z) + z_q = model.vector_quantizer(z) + recon = model.decoder(z_q) -LEARNING_RATE = 1e-3 -BATCH_SIZE = 32 -NUM_EPOCHS = 40 # realistically stopped earlier by the validation set -CODEBOOK_SIZE = 512 + # Validation losses + l2_loss = ((recon - img) ** 2).sum() + ssim_loss = 1 - ssim(img, recon) + loss = L2_WEIGHT * l2_loss + SSIM_WEIGHT * ssim_loss + val_losses.append(loss.item()) -# Weights for the loss functions -L2_WEIGHT = 0.05 -SSIM_WEIGHT = 1 - -# Constants for early stopping -PATIENCE = 12 -best_val_loss = float('inf') -counter = 0 + # Calculate SSIM + ssim_val = ssim(img, recon).item() + print(f'SSIM: {ssim_val}') # Output SSIM value + # Update highest and lowest SSIM values and corresponding images + if ssim_val > highest_ssim_val: + highest_ssim_val = ssim_val + highest_ssim_img = img.cpu().numpy().squeeze(1) + highest_ssim_recon = recon.cpu().numpy().squeeze(1) + + if ssim_val < lowest_ssim_val: + lowest_ssim_val = ssim_val + lowest_ssim_img = img.cpu().numpy().squeeze(1) + lowest_ssim_recon = recon.cpu().numpy().squeeze(1) + # Output images with the highest and lowest SSIM values + plt.figure(figsize=(10, 5)) + plt.subplot(2, 2, 1) + plt.title(f'Original Highest SSIM: {highest_ssim_val}') + plt.imshow(highest_ssim_img[0], cmap='gray') + plt.subplot(2, 2, 2) + plt.title('Reconstructed') + plt.imshow(highest_ssim_recon[0], cmap='gray') + plt.subplot(2, 2, 3) + plt.title(f'Original Lowest SSIM: {lowest_ssim_val}') + plt.imshow(lowest_ssim_img[0], cmap='gray') + plt.subplot(2, 2, 4) + plt.title('Reconstructed') + plt.imshow(lowest_ssim_recon[0], cmap='gray') + plt.tight_layout() + plt.show() + def main(): - model = model = VQVAE(CODEBOOK_SIZE).to(device) \ No newline at end of file + _, _, test_set = get_dataloaders(BATCH_SIZE) \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/train.py b/recognition/vq-vae_s47036219/train.py index 689515e09..e74ac164b 100644 --- a/recognition/vq-vae_s47036219/train.py +++ b/recognition/vq-vae_s47036219/train.py @@ -4,6 +4,8 @@ import torch.nn as nn import torch.optim as optim from modules import VQVAE, device, ssim +from dataset import get_dataloaders +import os LEARNING_RATE = 1e-3 @@ -81,8 +83,15 @@ def train(vqvae, train_loader, validation_loader): if counter >= PATIENCE: print(f"Early stopping at epoch {epoch+1}") break + torch.save(vqvae.state_dict(), 'vqvae.pth') def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("Running on: ", device) - model = VQVAE(CODEBOOK_SIZE).to(device) \ No newline at end of file + train_loader, validation_loader, _ = get_dataloaders(BATCH_SIZE) + + model = VQVAE(CODEBOOK_SIZE).to(device) + model = train(model, train_loader, validation_loader) + +if __name__ == "__main__": + main() \ No newline at end of file From bc6451f01ddb3dd2c6b9b93689ded8cb69a53e91 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 22:51:58 +1000 Subject: [PATCH 13/20] final predict and train implementations --- recognition/vq-vae_s47036219/predict.py | 18 ++++++++++++++++-- recognition/vq-vae_s47036219/train.py | 10 ++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/recognition/vq-vae_s47036219/predict.py b/recognition/vq-vae_s47036219/predict.py index c68d3e6e5..52aafd633 100644 --- a/recognition/vq-vae_s47036219/predict.py +++ b/recognition/vq-vae_s47036219/predict.py @@ -1,7 +1,7 @@ import torch from modules import VQVAE, device, ssim from dataset import get_dataloaders -from train import SSIM_WEIGHT, L2_WEIGHT, BATCH_SIZE +from train import SSIM_WEIGHT, L2_WEIGHT, BATCH_SIZE, train_new_model import matplotlib.pyplot as plt import os @@ -73,4 +73,18 @@ def evaluate(test_loader): plt.show() def main(): - _, _, test_set = get_dataloaders(BATCH_SIZE) \ No newline at end of file + weight_file_path = 'vqvae.pth' + + train, validate, test = get_dataloaders(BATCH_SIZE) + + if os.path.exists(weight_file_path): + print("Weights exist -> Evaluating Model...") + evaluate(test) + + else: + print(f"Weight file {weight_file_path} does not exist.") + print("Training model now...") + train_new_model(train, validate) + + + \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/train.py b/recognition/vq-vae_s47036219/train.py index e74ac164b..089b06792 100644 --- a/recognition/vq-vae_s47036219/train.py +++ b/recognition/vq-vae_s47036219/train.py @@ -5,7 +5,7 @@ import torch.optim as optim from modules import VQVAE, device, ssim from dataset import get_dataloaders -import os + LEARNING_RATE = 1e-3 @@ -84,7 +84,13 @@ def train(vqvae, train_loader, validation_loader): print(f"Early stopping at epoch {epoch+1}") break torch.save(vqvae.state_dict(), 'vqvae.pth') - + +def train_new_model(train, validation): # Called if weight didnt exist in the test set. + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print("Running on: ", device) + model = VQVAE(CODEBOOK_SIZE).to(device) + model = train(model, train, validation) + def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("Running on: ", device) From 973cfb418c8ab195b4c520fe5576b1cdf0730d84 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:10:41 +1000 Subject: [PATCH 14/20] major bugfixes --- .gitignore | 3 + recognition/vq-vae_s47036219/dataset.py | 4 +- recognition/vq-vae_s47036219/modules.py | 3 +- recognition/vq-vae_s47036219/predict.py | 28 ++- recognition/vq-vae_s47036219/train.py | 18 +- recognition/vq-vae_s47036219/utils.py | 287 ------------------------ 6 files changed, 36 insertions(+), 307 deletions(-) create mode 100644 .gitignore delete mode 100644 recognition/vq-vae_s47036219/utils.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..81ca4a91e --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ + +*.pth +*.pyc diff --git a/recognition/vq-vae_s47036219/dataset.py b/recognition/vq-vae_s47036219/dataset.py index 104949b36..31bef8e9e 100644 --- a/recognition/vq-vae_s47036219/dataset.py +++ b/recognition/vq-vae_s47036219/dataset.py @@ -8,10 +8,10 @@ def get_dataloaders(train_string, test_validation_string, batch_size): transforms.ToTensor(), #transforms.Normalize(mean=[0.5], std=[0.5]) ]) - train_dataset = datasets.ImageFolder(root='train_string', transform=transform) + train_dataset = datasets.ImageFolder(root=train_string, transform=transform) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) - full_test_dataset = datasets.ImageFolder(root='test_validation_string', transform=transform) + full_test_dataset = datasets.ImageFolder(root=test_validation_string, transform=transform) test_size = int(0.3 * len(full_test_dataset)) val_size = len(full_test_dataset) - test_size diff --git a/recognition/vq-vae_s47036219/modules.py b/recognition/vq-vae_s47036219/modules.py index 32577d941..76218b859 100644 --- a/recognition/vq-vae_s47036219/modules.py +++ b/recognition/vq-vae_s47036219/modules.py @@ -2,8 +2,7 @@ import torch.nn as nn -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -print("Running on: ", device) + class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, intermediate_channels=None): diff --git a/recognition/vq-vae_s47036219/predict.py b/recognition/vq-vae_s47036219/predict.py index 52aafd633..278a2f0da 100644 --- a/recognition/vq-vae_s47036219/predict.py +++ b/recognition/vq-vae_s47036219/predict.py @@ -1,15 +1,17 @@ import torch -from modules import VQVAE, device, ssim +from modules import VQVAE, ssim from dataset import get_dataloaders -from train import SSIM_WEIGHT, L2_WEIGHT, BATCH_SIZE, train_new_model +from train import SSIM_WEIGHT, L2_WEIGHT, BATCH_SIZE, train_new_model, path_to_training_folder, path_to_test_folder +import matplotlib import matplotlib.pyplot as plt import os def evaluate(test_loader): + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = VQVAE().to(device) model.load_state_dict(torch.load('vqvae.pth')) model.eval() - + print("loaded") highest_ssim_val = float('-inf') # Initialize with negative infinity lowest_ssim_val = float('inf') # Initialize with positive infinity highest_ssim_img = None @@ -18,6 +20,8 @@ def evaluate(test_loader): lowest_ssim_recon = None val_losses = [] + ssim_sum = 0 # To keep track of sum of all SSIM values + total_images = 0 # To keep track of total number of images processed with torch.no_grad(): for i, (img, _) in enumerate(test_loader): @@ -37,7 +41,10 @@ def evaluate(test_loader): # Calculate SSIM ssim_val = ssim(img, recon).item() - print(f'SSIM: {ssim_val}') # Output SSIM value + ssim_sum += ssim_val # Add SSIM value to the sum + total_images += img.size(0) # Increase the total number of images processed + + #print(f'SSIM: {ssim_val}') # Output SSIM value # Update highest and lowest SSIM values and corresponding images if ssim_val > highest_ssim_val: @@ -50,6 +57,9 @@ def evaluate(test_loader): lowest_ssim_img = img.cpu().numpy().squeeze(1) lowest_ssim_recon = recon.cpu().numpy().squeeze(1) + mean_ssim = ssim_sum / total_images + print(f'Mean SSIM: {mean_ssim}') # Output mean SSIM value + # Output images with the highest and lowest SSIM values plt.figure(figsize=(10, 5)) @@ -73,9 +83,9 @@ def evaluate(test_loader): plt.show() def main(): - weight_file_path = 'vqvae.pth' + weight_file_path = "vqvae.pth" - train, validate, test = get_dataloaders(BATCH_SIZE) + train, validate, test = get_dataloaders(path_to_training_folder, path_to_training_folder, BATCH_SIZE) if os.path.exists(weight_file_path): print("Weights exist -> Evaluating Model...") @@ -85,6 +95,8 @@ def main(): print(f"Weight file {weight_file_path} does not exist.") print("Training model now...") train_new_model(train, validate) - + + - \ No newline at end of file +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/train.py b/recognition/vq-vae_s47036219/train.py index 089b06792..f7eda4017 100644 --- a/recognition/vq-vae_s47036219/train.py +++ b/recognition/vq-vae_s47036219/train.py @@ -1,12 +1,12 @@ # CONSTANTS AND HYPERPARAMETERS: import torch -import torch.nn as nn import torch.optim as optim -from modules import VQVAE, device, ssim +from modules import VQVAE, ssim from dataset import get_dataloaders - +path_to_training_folder = "C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/train" +path_to_test_folder = "C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/test" LEARNING_RATE = 1e-3 BATCH_SIZE = 32 @@ -23,7 +23,7 @@ counter = 0 -def train(vqvae, train_loader, validation_loader): +def train(vqvae, train_loader, validation_loader, device): optimizer = optim.Adam(vqvae.parameters(), lr=LEARNING_RATE) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) # Training Loop @@ -85,19 +85,21 @@ def train(vqvae, train_loader, validation_loader): break torch.save(vqvae.state_dict(), 'vqvae.pth') -def train_new_model(train, validation): # Called if weight didnt exist in the test set. +def train_new_model(train_set, validation_set): # Called if weight didnt exist in the test set. device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("Running on: ", device) model = VQVAE(CODEBOOK_SIZE).to(device) - model = train(model, train, validation) + model = train(model, train_set, validation_set, device) def main(): + print("WARNING: RUNNING FROM TRAIN FILE") + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("Running on: ", device) - train_loader, validation_loader, _ = get_dataloaders(BATCH_SIZE) + train_loader, validation_loader, _ = get_dataloaders(path_to_training_folder, path_to_test_folder, BATCH_SIZE) model = VQVAE(CODEBOOK_SIZE).to(device) - model = train(model, train_loader, validation_loader) + model = train(model, train_loader, device) if __name__ == "__main__": main() \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/utils.py b/recognition/vq-vae_s47036219/utils.py deleted file mode 100644 index fd9c7db58..000000000 --- a/recognition/vq-vae_s47036219/utils.py +++ /dev/null @@ -1,287 +0,0 @@ -import torch -import torch.nn as nn -import torch.optim as optim -from torch.utils.data import DataLoader -from torchvision import datasets, transforms -import pytorch_ssim - - -def ssim(img1, img2, C1=0.01**2, C2=0.03**2): - mu1 = img1.mean(dim=[2, 3], keepdim=True) - mu2 = img2.mean(dim=[2, 3], keepdim=True) - - sigma1_sq = (img1 - mu1).pow(2).mean(dim=[2, 3], keepdim=True) - sigma2_sq = (img2 - mu2).pow(2).mean(dim=[2, 3], keepdim=True) - sigma12 = ((img1 - mu1)*(img2 - mu2)).mean(dim=[2, 3], keepdim=True) - - ssim_n = (2*mu1*mu2 + C1) * (2*sigma12 + C2) - ssim_d = (mu1.pow(2) + mu2.pow(2) + C1) * (sigma1_sq + sigma2_sq + C2) - - ssim_val = ssim_n / ssim_d - - return ssim_val.mean() - -# Check for CUDA availability and set the device accordingly -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print("Running on: ", device) - -class ResidualBlock(nn.Module): - def __init__(self, in_channels, out_channels, intermediate_channels=None): - super(ResidualBlock, self).__init__() - - if not intermediate_channels: - intermediate_channels = in_channels // 2 - - self._residual_block = nn.Sequential( - nn.ReLU(), - nn.Conv2d(in_channels, intermediate_channels, kernel_size=3, stride=1, padding=1, bias=False), - nn.ReLU(), - nn.Conv2d(intermediate_channels, out_channels, kernel_size=1, stride=1, bias=False) - ) - - def forward(self, x): - return x + self._residual_block(x) - - -class Encoder(nn.Module): - def __init__(self): - super(Encoder, self).__init__() - - self.layers = nn.Sequential( - nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1), - nn.BatchNorm2d(32), - nn.ReLU(inplace=True), - nn.Dropout(0.5), - - nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True), - nn.Dropout(0.5), - - ResidualBlock(64, 64), - ResidualBlock(64, 64) - ) - - def forward(self, x): - out = self.layers(x) - return out - -class VectorQuantizer(nn.Module): - def __init__(self, num_embeddings, embedding_dim): - super(VectorQuantizer, self).__init__() - - self.num_embeddings = num_embeddings # Save as an instance variable - self.embedding = nn.Embedding(self.num_embeddings, embedding_dim) - self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings) - - def forward(self, x): - batch_size, channels, height, width = x.shape - x_flat = x.permute(0, 2, 3, 1).contiguous().view(-1, channels) - - # Now x_flat is [batch_size * height * width, channels] - - # Calculate distances - distances = ((x_flat.unsqueeze(1) - self.embedding.weight.unsqueeze(0)) ** 2).sum(-1) - - # Find the closest embeddings - _, indices = distances.min(1) - encodings = torch.zeros_like(distances).scatter_(1, indices.unsqueeze(1), 1) - - # Quantize the input image - quantized = self.embedding(indices) - - # Reshape the quantized tensor to the same shape as the input - quantized = quantized.view(batch_size, height, width, channels).permute(0, 3, 1, 2) - - return quantized - -class Decoder(nn.Module): - def __init__(self): - super(Decoder, self).__init__() - - self.layers = nn.Sequential( - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), - nn.BatchNorm2d(64), - nn.ReLU(inplace=True), - nn.Dropout(0.5), - - ResidualBlock(64, 64), - ResidualBlock(64, 64), - - nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1), - nn.BatchNorm2d(32), - nn.ReLU(inplace=True), - nn.Dropout(0.5), - - nn.ConvTranspose2d(32, 1, kernel_size=4, stride=2, padding=1) - ) - - def forward(self, x): - return self.layers(x) - -class VQVAE(nn.Module): - def __init__(self, num_embeddings=512, embedding_dim=64): - super(VQVAE, self).__init__() - - self.encoder = Encoder() - self.conv1 = nn.Conv2d(64, embedding_dim, kernel_size=1, stride=1) - self.vector_quantizer = VectorQuantizer(num_embeddings, embedding_dim) - self.decoder = Decoder() - - def forward(self, x): - enc = self.encoder(x) - enc = self.conv1(enc) - quantized = self.vector_quantizer(enc) - - dec = self.decoder(quantized) - return dec - -# Hyperparameters -learning_rate = 1e-3 -batch_size = 32 -num_epochs = 40 -codebook_size = 512 - -# Weight for L2 and SSIM in final loss -l2_weight = 0.05 -ssim_weight = 1 - -# Constants for early stopping -patience = 12 -best_val_loss = float('inf') -counter = 0 - -# Data Loaders -transform = transforms.Compose([ - transforms.Grayscale(num_output_channels=1), - transforms.Resize((64, 64)), - transforms.ToTensor(), - #transforms.Normalize(mean=[0.5], std=[0.5]) -]) - -train_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/train', transform=transform) -train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) - -# Validation DataLoader -val_dataset = datasets.ImageFolder(root='C:/Users/Connor/Documents/comp3710/dataset/ADNI/AD_NC/test', transform=transform) -val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False) - - -# Models and Optimizers -model = VQVAE(codebook_size).to(device) - -optimizer = optim.Adam(model.parameters(), lr=learning_rate) -scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True) - -# Training Loop -for epoch in range(num_epochs): - for i, (img, _) in enumerate(train_loader): - model.train() - img = img.to(device) # Move to device - - # Forward pass through the entire model - recon = model(img) - - # Calculate L2 loss - l2_loss = ((recon - img) ** 2).sum() - - # Calculate SSIM loss - ssim_loss = 1 - ssim(img, recon) - - # Final Loss - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Validation phase - val_losses = [] - model.eval() - with torch.no_grad(): - for i, (img, _) in enumerate(val_loader): - img = img.to(device) - - # Validation forward pass - recon = model(img) # Changed this line to use the VQVAE model - - # Validation losses - l2_loss = ((recon - img) ** 2).sum() - ssim_loss = 1 - ssim(img, recon) - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - val_losses.append(loss.item()) - - avg_val_loss = sum(val_losses) / len(val_losses) - print(f"Epoch [{epoch+1}/{num_epochs}], Training Loss: {loss.item():.4f}, Validation Loss: {avg_val_loss:.4f}") - - # Update learning rate - scheduler.step(avg_val_loss) - # Print current learning rate - current_lr = optimizer.param_groups[0]['lr'] - print(f"Current Learning Rate: {current_lr}") - - # Early Stopping - if avg_val_loss < best_val_loss: - best_val_loss = avg_val_loss - counter = 0 # Reset counter when validation loss decreases - else: - counter += 1 - if counter >= patience: - print(f"Early stopping at epoch {epoch+1}") - break - - -# Save Models -torch.save(model.state_dict(), 'vqvae.pth') - - -import matplotlib.pyplot as plt -import numpy as np -import cv2 # Import OpenCV -import torch -import random - -# Assuming your existing PyTorch models are already loaded -# encoder, decoder, vector_quantizer, and codebook -model.load_state_dict(torch.load('vqvae.pth')) -val_losses = [] -model.eval() - -with torch.no_grad(): - for i, (img, _) in enumerate(val_loader): - img = img.to(device) - - # Validation forward pass - z = model.encoder(img) - z = model.conv1(z) - z_q = model.vector_quantizer(z) - recon = model.decoder(z_q) - - # Validation losses - l2_loss = ((recon - img) ** 2).sum() - ssim_loss = 1 - ssim(img, recon) - loss = l2_weight * l2_loss + ssim_weight * ssim_loss - - val_losses.append(loss.item()) - - # Calculate SSIM - ssim_val = ssim(img, recon).item() # We already have a PyTorch-based SSIM function - print(f'SSIM: {ssim_val}') # Output SSIM value - - # Assuming the images are single channel and the channel dimension is at the second position - original_img = img.cpu().numpy().squeeze(1) - reconstructed_img = recon.cpu().numpy().squeeze(1) - - # Output images (for first image in the batch) - plt.subplot(1, 2, 1) - plt.title('Original') - plt.imshow(original_img[0], cmap='gray') - - plt.subplot(1, 2, 2) - plt.title('Reconstructed') - plt.imshow(reconstructed_img[0], cmap='gray') - - plt.show() - - break # Exit the loop after one iteration for demonstration \ No newline at end of file From 26146b6e323b4bc2059ac6400add96a3aa2fa7b3 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:17:52 +1000 Subject: [PATCH 15/20] updated readme --- recognition/vq-vae_s47036219/README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 093ea10ff..907806025 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -10,4 +10,15 @@ The goal of this task was to implement a Vector Quantized Variational Autoencode ## VQ-VAE and the ADNI Dataset: -The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection of neuroimaging data, curated with the primary intent of studying Alzheimer's disease. In the context of the ADNI dataset, a VQ-VAE can be applied to condense complex brain scans into a more manageable, lower-dimensional, discrete latent space. By doing so, it can effectively capture meaningful patterns and structures inherent in the images. \ No newline at end of file +The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection of neuroimaging data, curated with the primary intent of studying Alzheimer's disease. In the context of the ADNI dataset, a VQ-VAE can be applied to condense complex brain scans into a more manageable, lower-dimensional, discrete latent space. By doing so, it can effectively capture meaningful patterns and structures inherent in the images. + + +## Details on the implementation: + +# Usage: +**Please Note: Before running please add the directory to the train and test files for the dataset in 'train.py'** + +It is highly reccomended to run only the 'predict.py' file by calling 'python predict.py' while in the working directory. It is possible to run from the 'train.py' file as well, but this has implications with data leakage a I could not find a proper way to partition the test set. + +# Data: +This project uses the ADNI dataset (in the form from on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. \ No newline at end of file From ee8ca66e04f2b9a485767fcfd0f35acc41b81019 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:25:12 +1000 Subject: [PATCH 16/20] dependencies and output image --- recognition/vq-vae_s47036219/README.md | 17 +++++++++++++++-- recognition/vq-vae_s47036219/output.png | Bin 0 -> 81537 bytes 2 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 recognition/vq-vae_s47036219/output.png diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 907806025..2d8ae6cc8 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -18,7 +18,20 @@ The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection # Usage: **Please Note: Before running please add the directory to the train and test files for the dataset in 'train.py'** -It is highly reccomended to run only the 'predict.py' file by calling 'python predict.py' while in the working directory. It is possible to run from the 'train.py' file as well, but this has implications with data leakage a I could not find a proper way to partition the test set. +It is highly reccomended to run only the 'predict.py' file by calling 'python predict.py' while in the working directory. It is possible to run from the 'train.py' file as well, but this has implications with data leakage a I could not find a proper way to partition the test set. + +If all goes well, matplotlib outputs 4 images: the original and reconstructed brain with the highest ssim, and then the lowest ssim. # Data: -This project uses the ADNI dataset (in the form from on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. \ No newline at end of file +This project uses the ADNI dataset (in the form from on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. + + +# Dependencies: +| Dependency | Version | +|-------------|-------------| +| torch | 2.0.1+cu117 | +| torchvision | 0.15.2+cu117| +| matplotlib | 3.8.0 | + +# Output: +![Output Image](./output.png) \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/output.png b/recognition/vq-vae_s47036219/output.png new file mode 100644 index 0000000000000000000000000000000000000000..c93e8362b1ebfb7cb9877818e7cdf31eefcb4439 GIT binary patch literal 81537 zcmbrmbzD_l*EYH-B~%a)kQND%5|u_u5F`~)x~01t6hTA;R9YHEx};k{K)SoTyZemg z^S;k_zVq+7f9Q>y&02HKIpVsmF~$yfCNFsvmmC*`LS20-C9a4w0dsAzx2kgA;_gHC792{)y`8hZ& z|N91ZYdd2OuKc1*xCoAol$t#Xg|CPFK}!=(HASJJP*250m0e<2tDK`=^bMVRCyb|Z z{_4E--iN_VnRPH(Tz}IXjVmIJjEGC0C;#*#Ip_ZD)#~RKoa6?PCdp5G56Rzh>es2} zi-f($y<*tBmTFGBjizenIpE`nM9 zf3Dh#qU*%^?`4xN5ximlKP%vR{eRsO?f=g$pM6OTVCBSor}=O zTgj*XtX1KRF-hnrL78w&~E zLjAcq)g+-xm(`IQFD5q^4ewi;EQ!E*!jtEGJF;dHjQGhNVszB$JWtKjEMq;@w$-OR4CY+ag;Md$w z*Q~wkZuwJoXM5q+K6~}s2MaL)#dKnA&%GC;aB-KTxyY^ala0EK1AK=AH$RC$lBBY- zvRR+${?5yy1eeGdt}wsM(M&%VIaas8Wu3xYB`A))@wLkj# z)vHFiTd=LnsAX6cTiHY)M|B!1t&zribbm)lo=?J0EJ=H10PQkmq7yi_ws|h$xXVn? zhjpy2xD5}l&!xivYcH1pi5;ufzmO?k_~Jkbl!jjDsYsO2D@ z@y?N)^5wf&B5lzRB-ehnuxkw;_Lo$r%(us7qSB6b7F2qjL&fQm-d&~mVbl?CIpj4R zz?AXq&UbFhVdd?^4UYwZjQspAJ2nVU;kf>FX6>=a_SLaO zsxHn?BG#A^Jbt1-D^>uHd5P++!xzpfsXCgExl10TzCV(u_m*79qx37M!8=QVrdNt` zvAoX@3VI(^pY4zI<>}S+x-P_}qSE$O2AnI}An>M(B_)O_-X+FOhfS$3&IY5>wk&Al zz8fdMVhN$5*p5GL8Ek#^=LT4R*3Y4=aVIz<7XP{=q0`Met0L1z?h%V`KV+l3*N`)< zT`$WCQ;gKK3qVh#_T244TUOgBsJkYRhH;HIG1>FPEM8fTCL^i)=nE}2Mo4KXKBran zkr6#1&Zy&nmKH02TU%S{PP@SMkVe}cIi97pS)WReBfD6eS{xG#i$9Pe%#S{X$}-xn zmn|fhmEDAh=TGCsTYb7M!%KPXbk$h6=VYipQ6ZN1x<8c@nYP!Fp=^Q>6$CJyli>lI zi?aib_I%SG#?!+_;RbnuHD4Rvj}VF}-4>Hv7UG=UmgK`5*)N^dT%2)S6F#-^6}$D3 zj1!$y(*`ZxeVq@v#SG2GVt2##T+3FQwHMKET>^uKkNKm{baC=C8JRh?uo)N$9sag6 z4O7#B98dV;idQAqiy(Vo`_~OxRjh4ZoOxf+yl%@WcVErFjGF9KQs~QhpE5cNhI^q4 zYZiI@_;F+hqyxPne3VgZRF|P}WQ(6(-Mbr)E-&~J$^RO$uCcTY(5Y}@J3l|DnSqz` z9?kD`2$!ttd?eNJxK6=i^+(M(&TvS```qKtjP23CnSuNBOTjt8yGaF*M=xtq_@b>z*HShf50HNa`9qXUwp)ul2<- z+)=R@F2rexaxqSo=f^9mWp)hm5W=kXln9DR7KbDaF!T7XEI{0TqNGH0>Lu(n;pr^6*Q=xz zY-w%Xh()UTI)I3&7O=tUU_S5BY$%%^S9W$b?^Nl#plnweTVDi?gCe z?R23|Pl1cm9ZsuJ-a`)|3-GtntpMATjK&TQHJ=^Bbw_o-)PxR_zbt!TNp! z9$;CYX%1&W^7>3+k9-|-YOK%1`F=?hk2N(MD9>tM-8Ix}1>rMd1UlIw7w!K3RJFv4 z3P~iCHowqR;e?)qP-|@+@phT?a_96D*ExZ4Q95*rsumbsh1eiiSXelj9s{-8m=%a*>TT@%BgX=;gtW(T%=CJQffl5Y0rXA5InBvTpSPN>;6WE&C1d&BZPm|pe&sFwDNS(aw@l_ zW4(@-j!z~4b$!t8@Wmm@b68TWgjmTyG%Oc&4-K_B`4(Rv@Ck{n^3{`#8euq|K!0jC zY&;TH6iV=T@f$d>ssR*835xYKq$j*r*HeEi_ccMsnFcm<<948H?FvY=A+uL?!lrOVX-^g%O5wk~TCX4%-59CPL+uKL3C3v6nS(Q#* zIt9>JR3+4YLq+oqz?xXwhMT4HU(G+PtH{O7C0;n!ub*9}@pxTiK9HP}lCtP5=(cO} zkxJmDw>Ef?;Nj$3lcn^7aZb!>b|H8okgBTS+3v6%tl9Czlt%BV-Fb1)QeOf=VV!&jViJ0WY z0W!pUpSyaTA1y@j+R~A*YraQ*G-X2;spG}>ItMSyIL?ve9X?Gk?BGuevQJb9_(RaV zUV`U7APKz5J-4l4^V}%_8Uok&Wu_u4-|;|>uKUVmo@X;9 zhKgzY0v?TypyYPJ9a-5J?yCwyNA&W1b72`LEZDUu&cuE9Bo$L@YZX-QbxF$Sb~SLi znrP4B;^L&!+1URAr0|IJTzY^&d)(Lj;MQy>Z81CwXwLx3g^xe6vW4|Uk8|tQVnPPr z?Ur%VFgMTKKJr*j4rK2hQA`hOUf7AS_keALZ_*)Xsum~Vk{&2>^wgid#<-SO8hr+E zPZ`xqb8#qH#TG4_k*<^q$OBM+Atj#37z6`14gR>7Y;0_1>!W3EBM8!(bKvhoLViox zM(NBoED_)|7mCu%dzm_C8Q)v-75gemF93`wu}8|3X|fjq*vJIGRRtJne>b#Ab)SV8 z_t@=8FlBnM@VSd4#Qb!`BeaHD=ccOxZ|#A1sv7anvl{USK&rY=A>d>ok@JnCYWIsBFXi$FmYl`yp)N_9Y}hoK zvy!n#FQwC+$6Y1iQT9gdJD5|lCe)lqW{K@>I~H~_l9EKJfHFp`QE*#4OL``S_RPBa z=!c?oIuJNDv}fRAu8~d9%CBa!;zlhLbRsUYcmf-o;7Hh0@AE zu1cm#?n?xD0RmPLN>Iqwu2N@C=>_JQ1%!qvTsmbp-%m_*t)N3F;?rj^RYkOC3W8hW zk@nA3TWkIBZlLzSZx%c5M}Hi4->7=QOWD6e`W52}1$QeU_in22*;9DG@L3_OuDF;R zOJvMrr;Q=ED9W90?5*d)*uZ`HJ4JlrR*+G(f;krySYBp5lTTHma_oJsHP7QPmU3*< z#x;-z;8N&lmZ)VPTw577<;()XEr5T)37>9+w#@ZH?q*_N_qiuUKzeXX{2@6ON}TTV zmy@oASs$5L*ZF9}j;+;#4s;Px75(^US(Fg5s_JWnJq~RwO2(WHcY74}xDz@qr`qpy zJV@v;m}v+a&OXU58P7P>WK_|1WdK(`V&{mzl0nuXbi#@R8N_Lx^@{sX#d!Ufx&HRuS31o1YsH?M%iD^H78>kqx15- zQ?g`%qz+`4ddQC#LEkbaUL zRA0cO{DNF=5t9|6S&O7u$R=2DI+f?GGh373S`w@zbzc}{SQ|YLDkl0OJahw39tD+v z>Y<%WdI6xiv%kN8&3zM|iU2a1Mk63XZ}FsOzrt0t(-(Ne8Rjf#yiaXcOD4Pk&lk3m z8nwp;cnF0=YP;e5ns;Ats@VEQb0$TzVpo>&H5JF_Uf%n_69OM!9-KXWVxsL(M!CE0 z!AK;s;g8eWA2W)vyt}>tVZFiva$4b)uK+?zM(yxO*wlT&@U9$rXnVgjZ7rY@SSt** z{IPPG+9h!{A5O}f({+269m6!=oCZQmq$dRh#nyVo#&V4cq+!6cc_x<@`}<#_Vwh-Z zKvCFyOLK~XH8NilI@E2AesCFNBZtU0)#OOjK$!L4SN9SAunC4Utx|J*BUtFzq!K9C zAov-Y1&IWb!`N$1?XuJgE+LSNtz^^(K;{+dkksUp%P0_cSP-!X0VM$ZOiWF?00{EnAl^YkrmZ)Y*L93Yoe$fCCD|4n=1vC_hWjDb!HHLnAga~e9 z%L|`5ATWymr7a+^JH)t%Kw$$ZTEnYFJ6Gj`8((1c9wkJ@C;$P#HV~MVC3CXnLW%Gu z5CahEB;yUhr2Wx>W~@W^Q+^&%PGEG;E4Q0j5T}qqphsUG7&n3q5N||q3hEcod!HV8 zfvX_d_*b(ZUt6nv-nvoR%`Hdmu_;=r$?gG$v-U5n+Cf^;gFg}=Sa_X`OiZUlYIoxq zv8d$uD&neW+G6FDPUDWuXlQ6~8nu#vu5j^tw9H;6ex(NJ8NxUXJz}^mzn;e$w6jQM z^;81fpF-f41gVzq=i8F{xmEkMpxM0(RS+u6=nRvde_DSC$_7SiPkLjYnOeR+);NUJ zpF4htL6LEpDMuPB=9|&`(DqW(b@~(R?d`#fVG^ie#5D9Oa)C-h&4Rt zBf3EwyoRjvpYMM)&k+SK5-ZQtE_`vST|j%l6|QLvrjO9fi=%{#{4wXzQd<+a-C@)T+E&ki*E_93yC7}cmWHK3kYXMem z2%=zVRLf4)f<-6{k+^;jZ=dc)QTot{iwu&a|?9gG#$?M-{`0`hDH_O2}BTR!{w z$uZ}Ndi#z?0}TGZ&JHF*LWIxBAbcA~k{y{m`~V7s-k=Rh;V%N7!vjXx;Z4R%Un2AZicl7|%eURftHU@_&76r#i`U{vD77Cb1(7CrL z;$)D!bIaxA???ZE(+`M#VC)niux1tGb1w{GV(nn8q$KWS5EVO z){&W$?F%V}nYE*ZgpiAai*uTD2yD}33-NUb`UWkC8i}F#aQZJDs4H9*IahdiSa&Z$ z>_H^gau=EF59J|r9bSJU^oJLyl?@yY!gKng2l+0*jPuRjhMZ7|K&~BnJWaQN$YjSKO3${1%6ZQ5Q1OGikL;v+&m_%|B+%y=`1Rx`1{guceG(mrv;l!0UG`0zFkRPEOkOGt~ z{CKB)iEQS=RrK4wausu>RbcdadhfLH{FB10k?(+--p#}DaRZfIgY6YL1?sR-^GqQZJF#-9s6lmKP5=H;s4nb7)tymKl+?4_DeG%!tAcG!0X@sNyO*$+73;iCBU9U7z(&2TY@EI zY2$d;AH`oCFRS0wDKzr;pFS$&MCY(G!wt?Cd%50NdG%CQ^&f5= z!UbDRM2tk8qLJh7|_4ggYi0Yu|1PW44xm^2W{71gJ_e>~AN5;TL?$`r!*9UE0wbbU;^766`Lgc~K z3bjVHYP_nvgq_zlsT2*x47#1Gcc`+oDo8?vj?>OrBU`2r8N7BXZ>{zfGhqNGkUPT> zu!-vd;;ecPlAfh4+r5H~eIhC8U+-=U&Zd>K0fz1i6(k_ARr|?w;|C*kTj6NOo^EgfS~^R!p2-gAxTDLO}k%Wfh=2 zzW{oFYzk#^&O94)8o7#O4LI0*+nFa?MnU3I)@rs07GPD)L#4*M-9;D`qCF!)+>$Nw z459CDG_0#`K>iU#WS%vz)q@i>hmBzcD9i`~0OEx*z!q2)qOkm3*!fpX0VAA30w!dK zU|_V9klB&46ep12!s23msEL50mQ{Qx4-Nu=2Uwy2L4Y^`u3sMLA|yf40t>PBpQPyj zsgldR=?J`n@U#i1g#d3}5c<$ipjuu-Ks_Sf4uVvT*e1|;LV)`ln|&%edQ-K2FDeFO z80;;bf4$pjXu>IGhv3c?a0}sB z&;21XZVR$=i1jl_sR`d}6JyP(28w&2z!FWItvho-0`1F3xC>$a^|@S$L5!)eR#1%w`>a zBZAowM6pqGA%w(43zjG7TPHH`Do*N&g-Me5u0Ws#J<^RxSz`)JlcR4|EqN& z_y#GY9S#pvzXKkkx73}2#NXYWNhaYV91;%gk4Rph0F;$KVpX9BL=WL3Hc;Nx1PN zu4&)Kg;>x%<914r7%M@GC41!dFsJ7H_>1R1U;>_j3&1#vU5+B8aSJL+dRx==szqiu zzQ)9CU7Sx`)P219z-B2WQok?b1p@kk6DT-rH4tJZy}OyXx+O-l14okqaa%ixoJ-nO zkh-tCI~mbd_TXZULnX>j846_ID@xbT*m8kx-`Csw9l{7dRM+~z@v=~9Ks`k_RI*x! z!Hp_n8(B>wq*HnU>3#>~lY;){HB#|Whx%URSq58ykqWL%dYhqQbGVw4bUNq>%5CwF zG_cUoxPZYsc1e(?3xmps``QgsC0pYV)*5?sv$Knko+g>pL+)*p%p_A>_2OVi3 zM4gy{@!qAp#*#abSv+J!;Ruz-G%%8-v;J(2RD>}&{zAK)29*Zpl96nvJ@<)Trh3IJ z{6qQZzIloWesk7!Bw!DN1{((VgW&nH=D07HbFz!{YRc>@D*T7GkxIxkKbR4r@o6OAH(xyxJUAVfav z5Wesj61tP3{E++w6s4nwfqo;00yR4`&^Vvh9M*HNcKi;a;K>M;=YQJQ*Oz{L?B>}0 zlqnqn%!)>G1eb8GrQA)^qsKNxP#glGr{QjQDC+onkzC7OI*IPFRZmLxz^GNkS@M4x zT6{%6{7J-nVW6_%3?UtuKO($bPLw?JcU?N{P1Bt{BIt1TBk*P}N=vz_);E8))r!K1?2QMxzLaY>g4S5_J$~u!G&T{{I zG11`CQoI*CfRSdFGjoy3-}S(Hcn2U*D) z_-`N1n8g3xcSHD={faBtH8&J>u0w?q0WsR%XM1nfp*HH17fx&N3I6}=DqW}Ftu|=$ z_y9T&0n#1-S&9t-!ia7IiqhqBbGSz>fn)t2NeM<}GtN_Gi43fB)E zkflE&sDp}OIy8glhhBZW%mOr#mom6|J%@IePnuc$+w+gqkI3ayv$}A_bm-vrs-HQf z2$~-hK5)a);EI;L)Dpo~2XfSp623TjB$a_4mz@>_{S{_^jen39%fHOk4$3!Zyyn1z zsrMJT05P(tJHglvdVha{us6Ho3aGvNr|0b7&j?Pi2zi>?BbC~7Y;Ak*`n$WIrCudg z&|*LSI!^hvcIw{iHWJ4aWr=|4e4OEgS(C%&c{0I!|wJq)yacg z!(%6J)-!2)@9T?_zGDT;w<*(vI$50I3FM;A?rweeRz$CZqk48RHZ;V%?gwWh*0(!P zrhJDOg#tlJgR=#PvgfO`_lTO>5w{gX_A7U08M}0$7cx5P!8uq+^)H%@ zOiy^0ZS=2u7*Xu1adx1kgc%CEb#15;_;$a?(jxomQl-6GJYwZCqB>Dg zRBKh>=Sk%#7s*LbSVe01Y48V2772xF1<}D_0=)8j-^(^@@obx!Q+0vOHtsI< zpzulxYI-Bcf&oyo(noq+KnNLOFbzSLyvtq1cL`vK6_;3MMgNM~=y)KNvHrqWt!vb{ z&oa)coop8tm?gQ+?%deWVhg7q%KVY7n6v3a8}?*55l;+L>bGx? z#dtB38j?8gkX=E~Ye-ZP%lFJ|S{$Pw>0HOveGnXNo#`tbvcmCZoXXAM>-gA4r=|(> zK}AwW+Azxct#`b1%Bw4czJ##j|3ZQufl>f=NdY}+0lUi2vGvOOKd<;?a^s>{oYp}p zpBQW7){z%f>9(>ie13K8`cewXy=Frbo{2!MX9Uf+|EEbWsezfntHn@-=OFi$hP74h z51JoR3kjc}u{y=oW9l*^r2_4Bav4{qr~&;H)= zk6$OTdXsiz0 z?*HC7T-Q&CZkoeR?NL(>Q}Qm|8zmldV&ALBbaTF6ry}Ou+_Fb9M?!e_#unW!Rucbz zuVT2w_E}ZFFlIqX8al8Xjy_u7-m)-KSXd}nk<2G^IOmINTyjv~Pa&OX7v7N5Q)hMe zWK$bkBd}%1?)%>(5+x%jr-)g#;RPw4jl{w2^V}%3Ld@AMovlH9CowP8(rMjM9`ZOjvG@Z#`0)c)p_gYOhI6BSIyM+HkADt-K zGSUN@q>^XP!D_U3ZwB}1RRE)?QDqX6V!o>1kgsQ5h?SO!i`Iw7G~<--x6d8G^RGh# zP=>|7*2qkZJM5!@8z~_Jog8kLrPQFnzx6z+dzt?pWf)Hc4CFV{5a`T{4VBOQMefV-P_mqduHp`${xtprfBk3KH~5c_oK!6b$Cf+0bL-n#dIo8I;C(=Ftmv_vgKA}3*6s?gM<97CVQk) zz(T)+S;Bcy0XeC?^G0x{Yqo**itZ^Jo|re{624!e*v%eJ;aqwP#R*Y*LeV?KV)phA zp!0^Docv!)jfW5>IvP9|G)a9|Wd7b`D$_Oyrv#mY^r27T3sI2+#L)poK#>6)0FWDe z!8iG;SD>Xte-D+#zVpLf&=aCo9@q190b6S!noQG7%`@9`4u#YCJvYZ#S5MDJa0G(X z_;6!Qc2cn<0=Ru46KQ}uR3pJ+Q4wVJ>&@~8a$pYnkK*KSha(YS@j#lnDlfKU0p#l< z3cAnc)fVvl%we*Se} z|2U(qjsMM@BFv zjyJODHQ<`?aYB_bPx;}1`uO4(&5sfq<3d$aZQoaxg1ik+15KwSD}tBCHV(|+$ZMCMSuD@=e&=#3{(0%TVxN=NN@S7dGwlk@OHE~RC_yw!2 zaf*JGY7}8(q1TCrP*tML`WgTuC_8S#9{Py^LW2GpDEIZ|Y$_AQu`X(Q^ze$>&Mi5R zLl8_?mUg!_ynoYDE~Ie=8aA{V;#>rCLQ*!f!01WC$`W}^CUrJJG|DofOK(M?&xHh<+Qgvxg2=LfVbz&@?ZF4&uAu5PO~uNmGL!}PEQ#ME z^PSj`glC3|6Ff8E1h9y%p`rBb9XMF9%Pte*Aqx^j#WpwZ$zK`Wi_^p(86N&5%jjr9 zYq)p=X^!u7RyM$5Ckg&DGBwu&O<`b!(Ed#K`Tc%7IrM%3aD^9vypt@`cQ!T4=WG?~ zwi?uziKKdmHqrC*^E)Z2_kQcT^QGx~umMxPLcG3Uq%>!~m2>Xu z^cKqzwV7>YD5U%UB-A^&I2Iv|-`L2YU1!QhwW2um)BM&OL3aUBD_@V~SqFY_|J_$i zWLM}CI>3gSJWE*w1MHS5^)lz%F@HF@Zk0NiN>*2gEB-1|AtbCnNE*=_;KCPn_n9Y%$!LzMmP| z>A25L=ZmFh^Dj!Uf9i{66oKXmaR?4{pRtQ4yxc$12vb9!B$U(2!>+0^oBVv7v-(El z7a>b~!6v%MJ+fXr6d3K2!?3>VM4@S@(~a*>L-?j{@zlM#k6=SOlE7Oefrx6v&qt-k z(>iMhdE@ctHy+h5?5rv%nMSkY8fc}ZrAfA-K^iiJeIl?LK*peVtu0AdZO{*viSXD* zMv*wAgpG%=E`l%M*?izgwe@e#106<&(M6iEvEX-)i$wHfo4b*E`-zYK&Dj(3o7Vpm z5KxILx@&f24$+0zZ$Ht6iVXzTI?zS@kq*NphhkVix4Phv`yK7 zq;JyV0c^v`Kb2P~g&2%&?^VDbzFbooXm^EJ>EPs){OubC3XBWdUH8O4W-aSKC&kpu zQ+(>0Gj38doVD8bL8!uFt#DAL-#W4dt7gJFfQxA(+lDu*#q-PSjjM0kLqm=3--L&) z%EL>Pxx%l&y|Cr-sby{wmYb}k&EG34B6*fn)8?+0?JhuN-92Ft=PDE+N$T5t9kJwW zhPstKI%3@45OE@%em>ijEkAv9CYP&^ao@0IF4`7NHQ1Iv&h37k)>@TzZoJ6n6hB4> z6u~ur#}N>1-Fzg8?tZ|Zb+o|}CGgErHfUnougq00jj>vr?Ou1PaP;BnsfydE(|O3< z8Kr;Tg2nG@-$f=ia;JL+#TxZnG&Jn9+=w;HqkcotG)rZvYw@vF8 zC>M8_M_ z_!b&H;yfM_2K?GMfTs0t)z#zrYJho$Spt3eaV!Zpj4rtWXlbPTAe!3ACcRFAjrpl# zp-AN>+o57IFF>4viqJrJy3emCHU?&7p<)yMMSeE3ioCLG?2(?vm532CKby9&Hg!AmN!=lh3>JKer-9}|(<38bpf5+LqH zBp`Hj%N)0ii3As^KY=TQ@(ezLwDAE9APk_5X``bKG4HB|#%Qq1IFX!h-|7fi@EwZu zAyD*^V;p%*cQoo7_Ba0~C28HZ0FU$KW*hGB>swNcpBO?47MTUn44eX`6h;Hq2U(n~vCd{6UQFvSQke@hpMje1yr;FHEwe_HHR zQDeF~$tNf6_wq#`pVQ}}goa1dlI@QgO(<`bKs!q()0BK*n*){L>W74wi5h*9J@pb< zd}T#`QL@&5V^lLv71#gatj^KVkI?6x1k*uK4XuYpio`H6y3Zna!+(Zp&pfp%&6_7; zy6gFvySM0POHBDG3c?IEl04@PMon^pH7R zp;lXopXzVPA$Qq;nl|IfFBwVRU9z-fAQ=!J?tI^c< z><)Y08e!z;7Jju%W}(=UdZOVj-X|xD?@8FGL+_;CVP_zPN54xud@pm|5{3Z?Tx$EUnVL_kMofh{|3*6rHexxNZ%iDbX#$_WvMl2yXm( zpsF(sI1?WI)_ta-kao7e?*#bV!NDQ1s){OIx%vKe*%U=M9R2_;dcrpYD-yH>ouR*> zPqSdQo4#{p4+h}m72c=GTiV+fwC|onyS6@z#Ym+fz02R^^_kW|j=1ttI^ermym|dT zhp}l1(b6l^`5Pj4&@{EQsGw;#r!L`deHC~L^z%Z?sjpyW7cnL6 zyKVB{1t_nr&C3sn`ospO_}cXvH#hLS%jN)WM0PDTNO!-s~6ZE0>DqqKkWG-;CCqZvP&kl|hAQ zEfC$d6IDpUqQsI+QfVOnu3#LW4905+_PIOZa`Tf+dy~*nt}(TFA253O8er6OHSnP-i*1_ ztUj)@K$S;G^Nob+^zs9V1pfZx#S(uJxorD5`|J|cAFSZ8e z_R+$yPz^!0?_W8*;^rMgC5C+~KJ`@32kKA=2UjHaaHAry&hPzF3%?Z$xt%yS|=;81zL2W{*R zxwrtJtdJMd=RA#7icm)Zrx-qCkCp|VrN%m{*{k%!C578qD-{Pvty3wT_ zolK6hS-#5RUbZjGK~Fd@c#~4CEj_19>k7ZC)AO9`c0mh)HKSkTBP_he71!5a)pI{c zA5rUey&`~PK{z$)BcHR{Ys~)xxF_KcVmmAg?b#o;p~Xr8owAGuYP@$GJJtwK`#$WI zM;y~(7l9@!I*2=kWB=DoG?f0X@#7(TK(21}qjz0fc?`?AR(iJ8OGc+fqS)W{au+@A zzwz^UN7d!SXQoWN1l{LG&hcUX!zuJm4rlbefYVZxW8d3DTwSkgL|>oQUYD4`B0bc+ zv*R7e_9QBQ)2QZy>(e2%PCmi66|R6^VVF)@()o~x+%L%|M+uP*ROG9eFM;N+)XHS$(<2Ia0Q{)WJP^-n}-1?iF2G$K_ zJw;tnvw%ZQGS}>0Zj?-xUeW4-YcMUrK8f_jfE&vYn%%rAb|9#qHa@RXJH z?0pK4t0)aMCS$XSDH=cG8gWHMd8YZElXaNsy%8a}M1-vM{bLN6%=t5zZ_ou0ZK}@S z0IHi{skbM0j)1)E&AuLBV8?pYsYO=&IAx zaX>@>o@wkG^5h$cATVV&{N*?4C78iK4pi5sob{ZU?)}J>ilx##ZdLuaS^i|R{)G}M zhQuM5S$nBWF0i6Ln8nf4X+UPKXHjro=ttrY30?BHi%}x9iFHMP+o)dWNTFM(kzFqC zHRjoPs*sbOo7OjxMcaG#KaU?81I;b~P4$4RBJPQ_KEZUB$!D!Q#JiZduiNu-!k{Hi z0_aA+qjcgEqASz}*dl@Ugmo~x4${JBDe=ZcC7cHt!tFB`e=|f<#?pi<8(cx01ieDy zqtYX%VSl_`*x0(6EhMZWFu)MaXY>LTQsBX$X2D1=Xd*y)9Vzu-zf#WB<5}^1?m483 zt6u%H(^P2HbNIN!c>Q|41mBC(#2QyAB86(B_{LGI%Qcq;lVVIo7f!zNsbG5v_1xE4 z^1KytpS&lHirgDOF13m&nAw zMt7$H=G#}IY^aSJvnP#y-Xz?nH*?kGr|NhRKf=&|i^rBfxaKum&eD$}U&5}>MZTCq zxuh~gBJ8`a35V$Dq7hdJ7p+Ptpi@AjSt&h?9qZ2n+#R?DEw&c(e1ALJQe{J#+Dfd( zl$`UvqSgA3N$%`Lqc;0-YR@Mft^y_7iuwcX&Fvv6-b;cG)`#ePRSS;OfceKGkDE4R ztUpTJkH-;PLq4_P-5vU3W)>@mFwhI|5fr;?Fx3ijFSK<3$b-K{FhF1W$z>6ahaf#dK^F)g2(pOW zJTS1bqqy(5OsclFH2M(d%B8pbIM?f|j0@D@Z2$*>`N5mxM%hUMm)wd*t8^H-eP}J@ z1_Ly%F+91w!G^1r{Kp2215p=YYNf+EcOVd^X94%95wOiQx9_BEO6Z;QOgyQLIf&z^ zBqZjpC(T`3e-n+7qT0m8{{5+m(#XU1uKb5iAXx4v@oYKNyZMZ^RU0ahHVFXg`cvO>-^J^yMBS*o?VmRPMIusv>>aP1ZRyEj%a&Q6GZfB)NqT406H zjr0hIPKL|*_UxQdsh^Jqs|n%vux|FPUlBMWP=G0|+F>m!q$#;?qvHGli>F}3F4JG_ zK^&uIWymfc!_X}rQSHZ(S*MU`P^>psOt&SAI$f7|g#zd}&OcFx<8HaJl++G5e5d|tZH&{+P|o_$G(DIm_TDxm9>dUXMo4_ zR%x&blX(>+8Si@pKB=*X1w^xjX{JV&@XP#sl$N*gnaLwdOKuc(oQQeb-;$pp45D!& zaro(CUs^&|LoL8*HLo>wb4pHzAIEs7@&{Lf^an+aLXI&C^}P01_lJk$Wf|Gdxa`ZL zCSu0QPKeb0lKe+pdrZ7OEo3oxkBz-aS~QeNJ$r#Y0JSSb|B5;EUz$m5QUJpQL7nK< z&v(SQSXz%`rE@-;Q$X2ucn`#rIe)VjHMFF6 z@fwtujFd^loZX-4a1Fn%joaJKHf9m3;~%%eU!!8wS~kRfF+jXCn=P@q71^MTOZCk+ ztx}#pNTt$4li}vr(MdJ&(p)uE%03{9@4GvrAKICOoYq}D=He@gMht&)Wn*BL*x8%& zke<`KxH>t1;9?^6SnqDE=Pgkk+-q_;z$gxL2MM&iX5Kf*c&#Yry=^8z0_GRVH#bR{ zvElPX(ss)l9=|EYii=dDC1oSOcPw9s9LBU|LDq7 zP`Wcb_b{SM?0%4d=Ib8Szm2lhw6f#osNJ*VLwguJ1s;p##va*&SNAYtTRg`Ir_8J| z#V+6?Ljtpm)`8~wVr}9qG=?-1DG0tn`hTFxgM1E%Rpr(bWZ2Cu0VKmf_Ht}wtfqVE zAu@A@bZJ5h3&!Bi`J2F((0KM>?#l|F9JUEog8omOb#fD2=dZ+@t?MWUHv6Bl7@b6% zUu8cnBJnvsn6M7|@Xq2j4ouO-bGByhcZ8yU#uF-&Qe4w)E*|gdO3L2!=zgK269Q^h zuD@}A%83+_oWFt0nbXf?rEY4wqf~b$sJNn%BXMF&MuvqxsiKMOizm5Y)K6WwL_IHZszoK?4SEID~o~;BR0hy zJYwqdQ^L}2UbHtFp-5PG*;JKCoXI7L0({psxMTdFSOj5oVxuHBl7p<1Z;d2Yj-{N+ z$!hb_+1ibrcK}aWw9H>(*V5`XePM_a_7*}u(*v1-1F?s9_S0hm(71*EeUbvqu?wCb z&Zz!;g@ZI9!90~Q%6;s@29G}>HtHm*A5UOFTqu9CpRmMzho7b_66#T)m{Wag)A~~; za9d63&NmJPeaYk)4V}Nu`sG4j&ywvwCxSDAumokBI_Pxj>hFICgbq}*z&@Hhb)G}+ z>O|3qt6suu`Zusd$%ZY~WT;(dMgx0sVzWAdC_fkfAA9fmaizMg} zT%v%!pi2xedzLk~0Yh5&h-eK>t$ENe2&FU(nDWD?c3ZrK%*~@-BfZ4P*dx+c39ayXT+_U&e>$=W3@cbH8RFyPh-znx5^SEhw1Fiv!R#tEU%J>C3rU?yqrg3PlVwIk?-#hNS{w< zR~H%zYH;W%R*m8-0E%G{rS}Bs!bd(&s5?a_Q2t}uJU0-XA^)m%fkWEAMA^3a%|zw4 zZ)D#Jlzmis2UA2C@L3~cjZKc9G;zv!=Qt-}VmMIis|g4pP~u_BG1Z4`Urqb#K{m)o zapLUcL3GiwP?tqC19PZXL%-nS>gpeu_eQYOyi@thj3!8^ZK{$#^_k7$8$It262zAkNUyEUz}=O5&W*VVqAGvpy>_ z@ZHEr*rakY5dAgRH@*Pt_(tD~xzHD5t=3|6`(5+lUj9<3=WsasGv z12p{t&b!O})k9VX~4Uw%i)MyAuo%K4E;M00wm;yd! z0ZI??3rMM;c1!Ynq`8DzL4GIy`%*%Nx{|{Yn9Tnx;{STv{f+!*&7>MYh zQ@W85l|~GtySs)~QVxtrH$y3*bjJ{i3?VH@gOWqYASD8#zkTC*zVBb}S}xYJp5+mk z`?~kF_ow!>SWOdA>Azm$tg2_OQiS&MWT@qFk63GYjHF2pJOykBYr5IV>>(h?8V}u| z{`RmaV8t4a=$|fY&NH1E1-YEHd+B%)_Jv-D`pAh8{p4c8LPJY<wXw!9$PzM(c?zOGLxAjkt6X**W+P7Z_^X$IKIFAZ}*pnFk6* zp%bk}Ou1)j7V3!tUU=MZ$EYe-0{1;kNHnT_L!r5dtSoWDkf-eeYC#=#1#E zl#V))ime$GiZ%y)A`_-!HT>e4?`C4x$#0RZf<_Kdurt@B>0vrc)OQ7}+V1iWMpxU) zM3L|lnp>c?#KY799D+&E1VeZO3YBY(LKL6JFPiC;svffpS7vx-@C`>DpYrI|H!63R z8KsG(CzrL@L`WmVBsu0AEwm2Calc->JzCvLHG_U9p| zMb6^!)HVed*SzYjy#@j`^8;q|E#`Ou?wzTfFx9vYj<>Kn4$(1O>(IaOVW#wVajo)= zI$YkMHN0fdg(96B96(thlRB4GX8KH*>I#_5Z>Z%BBK&&xEtnXde)7vKcQBEn3@!?t zrVsTNKeGq7@P>m_DrlJ@W{LH_ znkhq{H?&eab@}|HrF&zJ^=(_Y6M?~k+}knj7rmSZ`~Ywy}*rDCkBga}N~ zr!Es+{h_eiTfF9CeEN{jVl0H5G|(vWr`hQssvMeL=>t2&eDo9Ci7ypnS+u3UfV}zZ zh9?Lad6(?Lhs-Ap{Np|}eyOOHyT&LUU)hy~ZRmo_aXg%uYOt%`qc*gJKpy*Iy zc840}xVfmvRH;`pB&yhrI25ukiucKvt9UpLE;QYnC<%l~2NK0`=^7jNtRY3R?uVj5)#lhRRT&Cd1yxLtdRPBM-Rb^U_ zbkg>@4MtkDb-_smh-AS z?!fDyO$|1mbIpVB>V~gt9X+_B{kaKgP!uhV^0=x5Fa2ZXls77y?*H%XJDBXvv!LQE z*=E+`0XCR%8a2V;(@7fEC&y*m!4CklHKa+wEUuq1^&$soek*~8ZCTSaYA9MG?A8UJ z^SL48YCm1IoUpI07rRt`w$T||>t*qUv=QrkT`+#waBV07Ly0fm&!OcI)zbqYj4kg4 zVSZEH*nEBfccA@EqG%gijgDVm4vc)V`&YRT6r*(VG|1<`baNIOS>0RP`NE~TYxvlB zli2Hlx;=`oH=l@3S7YL{A7*rL%s7cwnq#;w=EUHW?8mfOt!~KEo)Qqi9+4OsuaiwM zSn*B8Y;CIiq#UGw=+w9vy1yc3ZrLc@*Iu-^m}D02zGXPgLboAlkk+hHP>RvWE2Ai7 z5z5(!G8wAdX-qniwmU))|5jWw=_lAotg^drva8ev(nMc`0gyeDk?%w&eQa*p*EG-ny!t^u;U;|$t}<$?);{GT%&u@s5DJ^-w6%vFRT9lX59?(bSJkqG zAz@6rO0fmS=;G_ZAL_u@F;Ni-qY)GajMl*a`SA!i(>JRTjbkvAqM@pcpi_C-Y8NQH@uA{n%K&bi$5eFxyolqmC5c=aHTm(*AUCmKSRD zMTNZw`I}@l-NeI5&P`uc53{7BN9eBkuzfjwZ8zU>JG}`XQ2iRMU`Y8^9yF0>&U=mt zT<&;7g9gT$*3|$#%yXyTZR6E0?xfdXe6)INz8~QZg^W5!hlv_*`1wAI3ZVYR8>7Y{ z9!7uPI??Q3Y~F)ss_I09)19W{J@&br&C5bY(@;Bui^#CAO;E+d?-KCBS1sE@v|M<$ z8gRX;>*%@(I@?5YPr|fXOCoJ_9VHLys;|mSc3sRWXNcJJc%w+s*00guw4HwOo@$iO zkMPteleY1Y55^D#CpQxA4zjY7L68eT18G=*z5%LyCldl@bT4&(_JJf5GyZyMF6P`B zeC`Fm^3zVMQmS`4_+@E>A99!{|AO=bC<6er*&73Sip(nDaIV5(3aOBG7!uZY#K;UX zq@wRF?XQAB*)p0n@)+Uxy{b_Ft^iK`v|On$@9NykYGKxh4-0{V6WBizN39kAZ;4G& z@@MIhkIZ)~$Rok347zL&lQr8VywyUk_a4lit6zq=hl`~&>5)E?@zf5!hB{n1KfLT} z^lydlWF>`+ZMX89)>vOp#;zuwcFr!7W&fN*&su}pk{;EM#3WwkrtKfShcB(jO#PDT z<>;*&r?sUI62A)weD!bEBR7uwkD<3Vt%SA-p|pvU_3!>HY&^@A5I4H|r0&F| z@5XJ(IFf(4CP0s9q-xWya8tX{`We*^xCMZGf71L^3b?VC1tP4+d}rm>28BKyKbGOY zp?I;z%}M#BF@t5>d8HUpko2Bjn2h0czEVTo3v<|m!Yw9eK-E-M^?XwwBrwkP zMZ3GDuU&5CRG)&};apXR^{=mov*?2~?uyr`3-5lKg#@!^OcD(TB>WIy)oyT8 zgX7JIX9`!G+_d}d>mna$$et6)`X12TU|%+x7O*nbKT-Y}{1WZhl=N5HcT6Vno-1}r z{DV|YdhETLih9>bB8Q^9ryWeU;T{dNR>k*;OT6zpk(=xphxQnZaq#Bd=&Lmno-w72 zToW$pk$e{BC6zO|V||S$YS|ZTj9M%+;_?n9yE8v7_!m0bqZl|q$&BE`x_4lcUZ2&LltkFnK3=B_&dFMDOs`at^q?V zt)17h@_6XW8B=2QVvBnBM5+-ciynS^)WH}x*LR;ghcy*nw?aq>iYMM_*7xuXde@bG zaQI{MQlz=&cdlM-FeB+>b!4o`3S{N(H;Y>4aiF8Mqw`mB%FD%q5W?TN(6 zQZY%wuKE2^Ux&9$$gA8aR!a8t@FOjBW@{#ImM?eSP-l45z)**wyUb0xbd(wGONTqf>opo}aXGzb_b;OfE)q7xMDJ!DCp^C1qE3p<5QIHICUJRNNV!9VL4K5GyeV?TIFgNLC_gZXh zZiEeRYszi&7ko9(Q=EI;45*4Kt;l8Yx33#_RiqDABqXl9KA~?j(W|3kKOpTZ`;Ml= zH#&uD#GFPrE6VE`lC0vK@rqTw`BWS60A-7zpy-~`;-=M@pRpKd9egOM~S+Jx5XcO850 zp5^3JYD+pBJL|LDLDAXW1MX>ERDv6ULaoneJTcOPskrWYZrN@2oIh2{YNzbZe-Wf$ zrXp)3RZG|XmBQ{A2JPUqiJ*nIBm*Wmjw+FA-b-P^x;y_i+|jDcC4?7u{fu`KHtth$ zAbQa=CbeA|b(?*s{HWdoJpv`5-3T~MDeLUcb7ZdAuuDcgPt-Poe<_rOyrb1}A3WP% zY*3V}5!zZlvI>~?z=q$X6h`lxm#&`ZgBSml)b;kMuYG?lFWDBE-*)ysNcwJ?u%>ZD zxaawK)(N_p<9WVGzP~K>0VFESVf4Hqcj{Ea%{}mkT#K^~mXCx_i0iX*i%xJ$Ce5AE z=k@Y%7I}*G^9sb8Oq*IX_>^7z`M%9)t%mUo4MNmY$^*|7S5sAGP3|R14-6%%9C6|B z3W#A8*1^5))>31}|0Xg|Wd%4N(005_xacl@elCbNBh9HmAk0j9Y=+jI;X^y?O{jiA zj5gzGoi<4`ZRaoHJL56(yZ%F zF?pjc+|N#KDwv+Wdt=v;!DRvb0~?hZDt%s;Z?UbPs{1e)SdpwS^(#?#$_wM(^5Rw= ziv1j!VZV^+s+X7GWlor*evo|STlxHg%=ec-^Qqr;d-_SH4;+69dmrpF&t)?{3`NRM zYZTT9sx4&4!S4lg8*)g&8je1ouUpw5JE@%%(8D(YSzdA=zy@=Vj*bo@`~qD&+(Fg_ z5*~kqxxWsvLhtCNI^L$7;Jiv^RJ5!_2%1;AM_mO9&=a7+kb!%7VYs6)imE5Ni#c9* z0?{Tq(1BEU$D0o_e?ePze6+j+)bZPL&ow{B3}9+HOTc1oeDi@yTJmG_@itdCrwyfH ztMJc@eO^=_whMEfIDicepx!qxWaTit-@!a#@F z7hjVP0$na>5_5hSH=*7;VW%Lh9_%xp6&*Pgznh#!4aOKhwNNwg>=fD`fw~Jf#ID;J zTzaezwoX)*`c?`f3l-Vl&JmhNgyg)1>6Hx&g;RsmQFB<|PQNXsJ^bdJpfUA(kle}e zQT8!780T8v)NTvlE?XANjoL|j8>mqzW+<9t%4QaJOKWZA_125!$#Da_=%h@)j(;L( zPv$F;a~e(7$T~DQ+|+^>V=s);EHN!wM}dt0qL|BT+BdoG*h!M8_D{)W zzVB!^dh~BBI~s_e@@M{yVKJidQ6Ev=c(!PAyk;osiZ4LNH=>D%KysJ~upEp4(iwgH zZ(fD5>?9V463#VRIWxs8X%ngt>iyG1f}2E^@`UQDNGYz1Y}7;ypVJAUQxwWK0*d24 zF{sY7^in5pO80MJe6$7x#^*Lhr_T*mg!`xr1Rgt7U66{`^gedcU!ir-^6J&AI+?`% zEe$||Yxc%{DKXhiY!>NiPfjP>E}1IJ^^y7`6}S16GjwYGZQ4aV`aJ)g({f%%w`qH? zz7!Y$Vmqw@db!_EDx{65=T)-}*T-ByVc(}`eBe^G7@^n+V9~YMWWa`zWaJN+g>%s_ z$`NJIY$y6(0>o`G>tde5Y5_)Q#dP3@M#KG4FME~pIGTQ|r?!LM& zbso}#buvHAcOH38JQS1UlZu^kTI-g7JJg>~8FT7siq-vxSNQ|?7vI^t4)|a3Jr>dT zn1Bwb6nfq&O*bN#4F3S=T7VjZuSjz=mkHz~AVkovV!H?Zvc12V1>nu+JnwiU2g5i# zG8C>5gLd<7i@~L~9jAIL`8e&zE4&c*a6MRAp+$Dj`?&=@MPtjo4iInbHqFkaex0y- zpY%*k4%2owJt>Zs;oW$_3hO>_?$XcH5`I5S^ycpa<5MAN*=b`2tyl@K+ zFL@P4$iICiIe~caLNvLZ&6n=u>ao%eQZqRVwaSh9v|ySIE;_rl*A1E5#Pim&Y z7_InA!sbo9C_{8(w~rw?jqWIjB>9h~kpN4V95d(n-|fmF1Fe>bWldIHUOuQdS9y5h z3Dbcml3-AVZ z$N3~A_eo$yojLAn7gnZ>tbU%mAe9HHyCBa45(#)9X1lkb?Z5TP+EA(A;`u)V7dsm3 ziL5-lyw&~kz83?q2>ocrAz)EHn#tF-AI=IUtE}MG!S@VJq#3{U#(Xx;QD9G(Ps9!T zVy%+s9o;cc(kErYdmfKFJtiKg-0!8^I3df1D2Ol0^=GP3Uny5f;_(GD7qn_n96@Mk z*||{W3$q)PZ4e5G3#Vc+i8|-;!>Dr6?{js^H)`x$2h`W#H~^WCxKaV6aHkS{Gk0m` z*r=LSM|_Ol%HHee<$q3saY-yaBZzQq3YKjcj~Q@r$pB0wvQ&as^5`{vF168YO`mz< zAw%!&K5GFl)`(N{vua~g>_h(s^Ilc{^GWnES1=k|l1kn(1ej_;h%oNemh|Rnb~7me z9=)T%Xus?n_B;2-o=Ls_B+wHJ$EUVW3rw&-$2`I|KUM#~fm@!Y>A3o{tpH3bm*F1V zATVVR3A0Ko=V-!4A}iQU0E6bthNs)4aM&{mty}5GFuFa~ath8Hi>-ky4^Ypoa4Y;B=cC{Ht5hwBA8 zV~~E3!@huOUruG~3rHJ}OlI6espGzyl=%p zr$>?bB+ic^k4))EK@4$2w#njEs$trC70a&&A%GGl?2*MyxWVt@+;Rk z>TaQ)O&r>?XgZ@HXP69WnNC7Dq3U0Y>#VvltOBH zXWW@y$QIyZ1D?)SoQwdBH7G1QPxrZoMQL5cHM%-?#n!vy$LvL>A0BVE#03i*>ec8{ zvWmZ;3`EzinKiR6N(r&_s<#K0Pb@VZsF14`Lj&OAbojBcKAJ61IABO>9>Y5FXYZe!11lGeYDfhOE?Wfi_ zvi$o>I%P%=Qhvt2JYTlfdVBS^e&WYjgVpJ4fg0pT{tm{1A_<_k_lH7hzA2H z?2<>LQ?tG71>eJGB5r#Vm--+z?ousyJNYlY7XC){E}5F+{0iV&RPm&5T>1)4j$<`d zt~)%}i&nrSn!UR1BA9@9rlDq;@Oery0ErIX78tMLdt9}-0Q4#f3-da7nszGE+LE9l zfxB}n6aEfx?TwT@Cp|9ah4_qr~kLD9sM}N`9n_+LO~DH`B!2th(j&`+!nCYC+uuVRXW(H zlI?qfT_mSdEoXB&DY;3ZvaSt3arpGUXaYs8!QkVdg-tVA-+PK0Cath%cn0`}M%VU` z_t=$Tb*v?`$9+7yQ7{26Xp9(!HmY+!?YFoN&Sarsmv8LKm*8MTervTGJR|CN@^KsYop z?v*GCdOm7aNCmc?+El!D^~2Pko-o=P11B`;6v7SZw5Z+Q$x*0?v#m-N!rZEPqj%|j z<14GOq>mZ*I|2Qw>{mMYw3VDlxq!wAS|s@`K6aH@6XUEKaYq{jT`U^5EkQu7l<>&1nt_jf5!wm=VZNU?3cjV=XA&a3(nUQ%I^hgS< zG?U`0Jo6Bp_q-1;4_3Q_7<9d+QsTqar6m~{fT7q_Nz_l7Jx{#-s&MPquXz)I&v$Nf zf#C^mUqxd8W0Ri+^iT_YWK_osu*T~BqN4`K7O9^CbKt)ZY`!$95m}6kn4pt$bCPN0 zmkr$s^f7R5F7vzER)4qD9oi8o}C8)7l4x=7%V$qeLLVTp@p|f$u3xOiJu`U@ST|ta!5Bjg?76 zhS1VZ{Vhre!!?RyIT3fUmq(WV)~PRQ6Zb|_b38olu77cu&NGRHu&CgG%*Pq>Is4{3 zlu5_0`v34ep4m=%IS33DL`(b2G!<&-Ql{QzQJ46b^mU?M>9zy-)nHrH`Bn&q~?ki2BP9yF5m z`VLiUOjP@@r@@VON;AP4Ov|e~HX?Tj9EyZ}rmuD6K7i=LGnvH{vOK&p!c}ueG05tc zL5nKFlnznl%B=EodRNg%@P@fCc&HMVw5AF+=o>{U+EU5aK2QJAY>riW z_KyiSU2yt!I7}qEwHE-z0V^ux*CAhzl0kBY+3p}%COw6&$!TB)vj_5-&tLK2gjE;f z?DQBk3*%%DrfU(rhe;e*}yM6uEkFV zAgJe@w@vcextXD>BNk0JyEE1vdIhB~12D!KvE|(ea5{|s-T@;=qB>A|Sl?G`!L)K$ zy^rmNAh_3>Z?d%~z5e{X!LwUEJ3D@6;4PCP{b5&Z(PmlbzT=|Yo7=w<_ltj4r(1j% zL87bv%d$X=-DgD0a>Re4rJ$M?yna)8dDOgWA@-tlGT+p^avkkv_u67`cAR_azIh^J zhaH+%IH}6gHs^K3k!)SJPws(Uio-f|9z>M|5>lzwSLzy5!=yXq!*#3oYFoFG;;3r% z(o*M0{R~#poz5FLVA)64S{tXHFwMmfv$qYVLs5IZG3>_tbh7FXL51v#=YPWs#fRP3+eK)Jn2`H3aLQY2;C2S zO#gYDnW)4AqDdh?U_Zr$wu7L<=CeA6qqD%es!6<8I-c~_`Fn`bxLQkU`VMkkIqj#m zg~??RL9mB>h3A?2R#x?o2c!EQH6P=HlFL&c+;O8ShqWYk87WJjWLa;@w4|`m0zx*?*_akH;H>2;!Fe)cQo|6; z56mYq>JCe|qBRR$$Q~XdZ0h*}W(t=8ggEm!`~=!iHeaaQQdfOy0$nG)OEb8lt5UnQ z(MqqoB*pznvzVHy|6zCpe*?5yhN34kO(&JZ^lV}$=jw!}3M&j$(h{-PP~;Q2BeZn* z1+LBb{?so8Q-=i>8+ZOf_<{Z0Kj-})?|P-tXUHTI{I@Tuvsqdw^ID$xy0*dO4%lmr zsf8yisC#QDGvmrz|7^@lqKbd=Zva}CuNmQAa65SOnev+DX$uKwhx*2?ZUV$Oq0w0V z!s;oZ(J)7zZ*M9uH(F&SeSA6|wyim0Q**ONA2jb6^p;KGB5?#$6m{?>v<)O#HK zTG6~PdghU=-O5rKeC9j~^!d==wt)N?0wQ62t2cAH#ufVyNV#q`a=U!QsqCIupcUDx zh}Roqccv#Z^G<(IOQT(NY-uBesxul-OE=&z)J{e>`!GFXat?5K*u@F7dhwzxzPO+O z?g@WTU?XSrfuD2+Me=OcV;pVpo(VCxcdVuMk;kQ)DfE?e(w?qL*%#e77grIfL7vLe@<|p);J^KVY}#1x-igjXh-$6assaYKinD;S2-D;VZ{)_i|Jd_#1(Dk&%b!&) zH7Emvp6>Zx&ahEt?zMI2H!)ul&7A+sD3Oa~97yXlt2A09tj8sV&enHQ$@Y_U0Mr75 zG_?H;k&wEfJJ~5V5A}}HCXtI_a&5rvoS-SBCEupK=wQvN{0!kdZ+cGFAqt-*$BwSN zk|pKqpD`#^Esx*R+X>m0B7MVZ2eFyL856nW;t^*nDc^nBQduYBB78$B&encCm1#P= z*3=9pFVth_rlN;cu}!%p&msd$tB@ic%n;jS0Z+Wr3H*_u$k_ts5b5=f%BP9yhGVpO`WJt^>lS8mXUilEm({P?=6>3PH3!rZgl=`C-6TB5Y4 z_730X@V)szLo%iJ8SVaf&GExXmb;*qZBKr{E-cM{BkLQ)|IiykGyR|G_lNuev2JvtJsr$bF ztfHf|ZtXlwQ;pM`*!wbcGD4fFLrgGtgU-XY2I3BSahOQmN*N=&YtFu(S~7OW=2*?E)MNp-X7*_ZE>eed_!Ka~mI6Z{eXp z2AM&k;H$7Oc$MIDhrlT4FkzytZ%vOi4=AK?b09p;I8sfb+nQo=!6syTx1)5I6<)(# zQ7}}0P`3AL;l|kHouuOFN!(QTlG<_1^n=MdjEo339tJyH+2E+$EaZ2=nY6VP4>8<0 zVJ*>3$u-E|+b8e5G)e4Vy*0PSwH*KST%#iQpGA_aVP^`rQ-D&`&OE>J)k*XyHs2m( zyol@*Y*XM?MTCox^y~0dTr3z;_>c$$+F4L z94Zq6fNMF5DZ-$}$zX8~Vy1(!92#p7vcR*0R0c#9YYb5`fa;HQF2!kjQ^}hTSe4(2 z<@xw@VqgHt&PVK>_0IrC?0nWyu1?KeUI;^llo%C>@oMfZaG>}MKm6yn%6p51Yn(g$ zeb#O1seuXUf8s=1y93q8YbMX8v(hv7@ft8Q!RtyDmic$FA(91F@>gL1|0mG3Hs@{< zRvbLkRccV_jTo2Gq!F1vFz9T*U6^EB_}EQ$1CC=`Ivit zRQ1der+zit|~Qq{7er&4y*rdPsYM2&u28Y?}TjW4IV;ECJQZk27eUM$fb$ABJ?S6StWa$+VD0q31y_m4w?gGTpkI1-t-IJ$2q0}D>C}6}L*e>Bl zFS7PQd0u0af#ceon>gae2b5rAuH6@apdBrtmx3E@51FYGYGpv_(ZTjE&sfg z(tzNECVcSc(9ea=g9G2jy$=Ne;6=E>blyFRa+O`43yLGY%{z9A0(IscDx&P3I|OD zBE|6LBl*uzvkgzGf0%6GExvh`z4n+2yTz;E(afnx#Y#E_q~A>|FIYG89hO7SzzK%c zb>mgGz)C_WhtZfcP?NAhqS`8T#PFF^F&Ltz2`i6}6=k~8xumFk_Y@yoQE5q0S0P&R zl*x&s-84Ioq&tRw0PiC6As_YfsOpoStz@J8dtYgtl|(JIJ|yV+tLy^WUCra|Lm$JJ z&#!_m!jK9V4aPr7-g8aYI?Gq!=cFR?L$hp)$nHaN_|6o~HctAQ%3QzEO2H8`9U|Ka zxLwzzO}bzFvjm#eU=0gP`I4bx;btxFS+KGJb6(iP#Z_N{2O2mqPywv8eSA zFO`m&+>Sj*wE<^k^=)En$dMDx^G0G%p*+K*07K60qbw-MKXdt7%Hddc&6~Cq zgKssA32w4%KqVP#l-rH*!;L(9<;&0K(+Pce#Az`DPYT&t1mQwKKop^OkRl7Ym+a#_ z%>Ph(ztl{v$Z4N*1_BAmaD&Skelh6&U1s;P*bbnWs@CNBb#6RrrN@Y8xOTKLD#H4{?{$&Nh7`}v5tXi*Fzy%RS}0t`6~ zOLJAWCCMAL9U^#XwK#jRvDBGZaseFG0@;zi5nPYRt|0KBS&pG#AA<=5^&?BdZbHTd zfL0CQl1P&9;34`7OAFdyKm1GZN%%07mAyu_(2vj6y{7sULfs+_ z@h#((!AWadKdfTREn;iBq9me{U0{^B3bpf@l9WdsTaD`_-h_>`8tdl*8_ofaulR72 zcb=sl=4_4Y2VuGzz4VxQ4i*StOb$AQ_%dM}F@o%dN;~opImDVh{S>zHA^sqwG9Zgt zV4Hvutjn0@5WrK=#64=%U}L--VFJVp7`KsI5qfC2?cl1T0Fx#+A?{c@*QA-^S2FK7 z&2byR!4PwV{w#GE34pwyumx~X?Z@!g7+DStGF3}RU{cGQh*P;4B_HtCU5GgW`JT$j zLb9VDqn-12*;(JA?Q5tP-_h>6fkln(IcP%6-;WlfL~p&Uxl7($NveXssUTM3O!3ZS zVUa+XyzHoTxwl)?usf$QX=ieqDSz>)ma9tLmfNl#^pb!cT`d2#qSYJ-+hAbN8HAtk z!8R%`ol2`rY}F%Ko>?ekP#U2Tgi4C^f^e0?AApv)7ly6=zyt{$Hl(0IiqBjxaR5=A zCdA#yK+MDcGCqT)dBc3dZz28somb67$tCNJa4;3WIe_wlP!ixEr!Hr1dc30Nus z-^Rta+g=aReMN+Sw@eN;J>3W|)x1<|Q)~4%g@|aC#XJo0m!_?oO*B3|7$ooV?TbyL zrJlzrm0hUlr9*#S=MArWKQim6PHA%%o@CSvEH4dRaQy$=2ovwz?v<)~%gc#C(3Z&o z(JIsPYwEFaAm-qvwuo?bSo<30oqtwbB+P>lbML!K!z6c8(gLKp1q>R_X{alV7@}!08Fs2vEQqHr zqo%sLWyT3uxMN6ECBo3DDD(WwRL;(TnH+y3RSr@x3TPKef@FzGpNM;!FqUV+en?P| ztaM{5n?W_?fR^l)ibr%%NcJlrbMQ5`^Ir|{b!ZGukWG7!KQ1db%lW~#TVosK@s0m8OAP1~S-q{K z*KyQcz)}y|u@-v05nu>1;JV`d=Az@^zP)A7;>6F{kYua4R_Vrjg_s*NUg@_?ie*ZP`tDu4{c*uXTX0Iw=t zEb})D18pU5hmM@$B9ZN518L`MR6d1x)rSLHtxtD=9!XeIKK9VIuR)V%2)A!jAFDOH zVxW?lTK1*#mY3JDQcu%lzByEhiQg+%4P6v@EAif8-08ZTbetqd#U1b$niP-_7(m>X zBtvNX91?8?4HwK%T-xL8Z}hS>`orcsbkt&bIkN7|LJq^(Pb3&S*_i(hh6#kBjajgxc- zmldjd*aO}qcSNAAALI`TizoE<2-L@>N*HFc*w%sU_c5wTqr2~OA`hXlYUv--oWDdr z{&n`!gVU{P|R-; z7Tw-t)m53qtqp!#6zz`Y#zxiiG1ksmo+DcoWw3ORx~9smY|85>nY_dNhD&7g@RABN zrFM=1BM68sBnLyvLbV){b<9qm(RT`o#rqDXshIZ|77e}cTk}qkd)Qu@$`=YKwXQ_t zvW_EbEjBi-FzG;|N{eGVT&c|YdS7G(+Erv?{*gjAle1ugQ-Kt7z$@wg?3;&OT`kEu zGd9sCc0G+)~_h>Q2AU2k&XaVY{*vayj>-+1fBOj7-(nYWBS;*4pK^;>5jt zxG6we5dF{08N1#e(6Jynd)ilB^jQPco)PQLj=sUbob{K*Meg8HB+b_`uNIzGaDW9H zS-MSnLHsw{nsbKVb2zRubz9TBV5h*-lhDsbln^pwLN*qT%N%UBm=vd5{e_Etw_)al z(_7`tqJ7HAk2?LopNg?YGKU6onX2Pr4-5d#GMR!(4#|RWJO0H;UR85NJ%V7{7DzWI z2Y3^Cfe;F2WhF`}ryuMleakL&P=VYVUCYxSY}P?%27jQOBAaKB(*oQy1PJ6#QK%OD z3s|#8=3~7`bR8jaMVH+J#be`@bb)SR{tg8ZIL67XK`UU|S%**14;1F<-9x3pD3TKWllnx+R3chlK1f0? zljyz7^a(QDn8|q>J!eG)a`{qwP>K|H6I>lSDPgI@BkzMtkI1tN(nRx~DUjre(C!F45(g2>0?LI=Df9@rO;*T)^+&o`%e5?EfkY%^)*I{ibKcD1>Y4 zcUxcaQ)B*Qh5az&4@?w%mOSCGyIQw>d11%n*vP{=YettS-!bX8ZtFjO#Fvl57VS0?<7D z7y`g~Z%owHrUs_+(hT~1w2$e-yBAYeTqaXzxJy1L$*y0v%A3GgQnM5xNkZIc1)bnA zXonfM$$dW=>CkrU+yv=y7-gtUR_r50XAj|LRAE+K9+2-rHro*hS@+PA3wWu63;~KG zy!XM-5y2*3h%yHz#dK%oGhio!)XZh+0O^p&`B3bH7I>@o4GjK%bmJRZd$7(a*Tn{g z6p*MuQ;!55!g_BmgzZcszmRL5s@o0=44AX5;ok?zDX9J6ZUMITxYXr{6(|J_3;X&k&5If*k!j`ce%nJr1{uPv>=(mbhr>qORLMC>KkY;@YqXe+bF_GRp65E$}n8{Ve zp{>Hr$*QRa3h;jkA0hR0p3il|w;-qk;^Rc=GnPkT;U z8~Qp>+>AH`!|*+h!*G3rWDMPokrKm_qQ$>_RDA~WZ{|PR=YLS6=%^#jX;Qb=)jc0&|q zB8rV{&2*Ue!)|9Q|JX&sx=B~v%hvO0V@Jv4ijMH`r|-L6Qq}U{yk^CH$}vYcd+>`P zsQBSieTImS6a2x>4WP**J!cnA-zTh~yqfvBui22JN5(s-I3Ga>T3zhaPV$#s8Qpv|rdcP^SC&%v zXs5uj0Bqgh7p4UE_gBJfh{VMr;Xa4*>*IrE9-tuGL_%v7+$6b=e-DepJc3ux_~s|- zjRzb7PngSaK|6b$uGvJoE3XHIVFJ@13*Z*hfFJ>s*DllmE<%I7fm%e3>Rfy>NpZ?; zCkvrioeRxXwVobpVNa#M4L;P+AwOYP>8^S4jU;9E~3OS!x-&kFD}!Wy(Zd)w$;+w(-AE-3Q#Ha?Z$4qCp*XAu#4W=v0YHTmL|U z6h`o+(kG1u1|sx@z-C@|2+}6W!pCtId`|6~n08rSoI6F=3Ro%x!$;oTo`zRr!9{y| za1>V%0slba7S5P3fZNJ=7<0<}ruOSo4-8La4IhcpOx&BKs_ohe)c8nEkyV|#2_Q^-LSe>8ce>_lbR@iwW zkY@g&UxuGOn3!Xth=Pw)3J z*}o=sCGrn||Hbt{z|@ab@jZ?=hj-S7!2mYn+pw>ZQaWHILNr! zsu(q_Zx!s>Z-*EaS7zu=z^=M_^#6VoSs9_@B{l4c*BhIAwQZUSW;rE4^wL~-CgYig zI9YTGmersAKIxreZ@{$@sKMau{P&w(oKI@W6A96HBR=x$k%N!ngPy(pSB|LNR>IbF z`O-PTpsdKin-^h49kMtMp2Y|5s+_{2Mf{<@GBz%%=7ZZDfgA0s&L6H7OIj?EFsoFZ zxMKQ@*OPMLY0$5W1`^YD%6gM0vLyfa`JOxt~LsI6t&z*|1q!`{7*>;j)&r@zsJ&Ml3R@$q84I@Vl7PAdHANk+fvU zkB;O$!~gwf{2Uj={qB3Dj*zS?*Ty%pR<*-eRFO-K${ziNy=;qe+<7H|Kd)R5sqkD& zN8gDwC{F421H&f<2l8o&69%fYWiAQRHpO{)BL+RY_fjs7S)EJySB+q7Cv*4ll|v_}&Lj!%+(l+$+InwxYq@`Zn-@p^yXjkkcG zGK=$npWJhzM$BlzsO4dDBQ~YnazUwkXS;!TLFi@@+u}>!;x#xy$j(chreB-NzUw|; zJ|GMyDM=|TE}iKS`7)HjnRB#+eklJw;8nmCvA~9UK7T1%ICRgc4uvRoEXFuLDj9rx z)Y*i>bB#E7oY1K%gX&r^`WsihBUs>5N9EP%wGOJA-TdJLc)ySkGL26V26suo;@#ZF zS7Ze!Eb!duGW<;y5fR~?k?K%S_UEAa4_bmyOI*_U<1zrMNWkAi4k3Rjh&!M&9SgwHNXy^Q_uj>EU zx$Ai^=lq0q;2}tUQ#Jfa38 z_4xes8mf40TRK_g<3%P?W@Kq2n->84rI156mt^|#J!DmsA{$p>6Y+4yGg$PE47(P+ zX>V#EM%om%jkiJ~+o|j>v9I{btq#DaBj#Lu=WC54QZgF;S2fr7qSb%>2-}mUr-M@&m6S$)MMRh zFgE)zqj~Il`)o;T)yv$s%W9ddpuiIT$E}i8X+dl85SAuJ{EyG8hl*|z7zjRGF(1Os$bRG=|Bx#B#tZ&SqTFR?icg*AJVh#rd{EQHP^Adj+?yQF zr|=Y^@{ah&Pnvk`YbGAA{rF8~3R@(FM*bgh-yP2N-@g56(m*I8@ztPVMP*Ye4U&o? zGqYsxO{F49B_ykoWGC6PtR$l(dy~Dg_jA5{@B8=HbNv2z{&|kW(Q)79`}w@b^}5D+ zo!5C?+B7=y;C+hJ>Y`gAj713r)_(u0d85C#j#=FuTgs`((<1BUv+;D&VH@W7i&b?J zln}S2*%`7+d;jHk+CSf(P49Skkk~I98kuVvk>jnsURB-N`6P6vk9NNR&)I&3BEPHkjcH(88|W$@(l$g*DriLsM8l|{^2j{v`5 zDeq3?{dIP&E@Okqm0&v3X`^ipwB#7?;}enU0k?-$`O8x31Gf&?@p_y1onyMPem>;| zlclQm7LX-vs@A<;dx=V8d(0V{YscTa+}I3}7%Xmj6`auMpBX^M#Chg9L+_udb4~HF zHlGt^jRP!W`G5QBY_%#+T=#7Q_8AVp46P`SPJBxXA1eD_A9`f-hJP3y73ny77rksu zS*OIMB(jaKRAqgIZzx(Re(vDJ_)0Hrhq+5)aj&GF z&|_O$Tb-tKixX3<59TIU`D;b2Ou>Pv%w%`s_~JH@`p*L-}v8A-Al! z26&@~AQ~>}HZ{s9bk{WdOXyla75n7tm`?t1wEFF7RxZ*9y z<3hy$hw0zv{BLfu>c0>2KmSO0v@5RxfBxUTm!S!}yvVu0Pviid3`Ry`F(SpB>HeTih}{T-(61?~Cp8i{E% z_Xg&nO)!=-;zCCxv;WzWuOL3;A>8VG*)8Gu7qa;FKRXoud=FAd(kY@$VXtqP07Sv! zO*XIAV|!+CW=kfSp5FK6e_#IGtn>dSxGLFV-$~Qt&=Wc{SDR;AFIAVA2;2vow?Q;q zS(M#f7T6h>x3CxFi%BWrW^2Z!X}4u&yCw@N;+3FNMby-ym5eA(fN|`$pX;ppeR%yU zLE2K0J=BuNZ~)4%Cb)*|Aa6w(;)At(8dz_=rY^Q6A|*w6m7-h#&&6~5Yrx^hA|l6y zG3di`xX!#@{VIS@Lrk$b?j>+XEcSjA=C9q z|66qZMwwQ?=Lp{K*uM6w$ezw8|4h)5Qeg+Q+uJQOGWD)#q?ztS7+wGVYED7DVYrX< z@@%C&&~~Eio|A*N&A)To0gF^+NBE%>8y_D(Q!Jc|?XxtP<=HD2(B`&0?FK{%gZy8Q zySPvUjDGDi&Rd)cFhQT801u0nGADMwW8}(~)T8(N|NErzu#flB5Pv`eiG-}7)E%1k z?dx*l75BB>W7f=vMxdm-bwz}Ab*mro%Eo$dVQLz0>Ht^%gaOb}pxa40@C@&7*WWXKs)Yd^i zsaDg;q)HtDF_^`uvr3WUE`{X*ez1;rdoPa-sD%=LzwZ9lGae0_zQC)Ms~+k)*B1j- zv3QLv+gDk?dCB{XhG4>^uiL_jc-3UtJO(e1mY(lIl}buaj2UM)>T zNk=30UfpR?NN-iEw3g;m-$`>$h;(cDB| zf6Y~`oxOZEJ1D7%Ae3Ak@ssDOQ8f&WgzHci*sgSXr_J+g$p+>5Hkl7zFh;;)FGw$b zYJi@te`U|*%TWr-cFi_{-Q!qboRHqJ@d(mGC{g_SOvMo=1*Pa!{qg@$OX|OWbV99j z``hP8+-Nt*Oo}|eWTb68y%IEuufOxp7L*tN{x9<95%>SZ-~Xq$@INubgY_6`%>DOk ze*I4&PJZL{dIFmK_uu|MedR=aNciHQApqlMv8mko;k$@IG0L%yX_p@<$6i5~qz!*L zki4?H0wt)vp48qIe$CzIbW$^v*7=rc0fX<`5q_~bm6bcpewWlt+uhs44ep~bw>21L z@@D2|?@gH42-c<6SaSGcT*o5x(1!DtT?aFI!iWkSVk8#Pd;49GVoA)Mi_vez7q`Pl z7Il^S5o8lt?c#JR$5jm*fBiftbZ$Xn9--d|$venS5?9>l+T=3bj9b8u{dl?X!Qs5K zPEO(q(u)vdm0|<2w{_u<@EC|$%FYY$eGhL|zawhrP%}#hQwa(k@s7~~x)Rh? z_}GUnR+*+*wJ1X-T*1zY3bkdmZL&BGirH>9THT&dyJFt7raSQRH^(CbHNUh!;CPIk zDSadDjjc;ZmnIY4>Zc%>+Lk$xQ1C1IGVX$?hcIwBxIlc`|6zf@budA94 z>u_}W!8XE~HwU2;OG~$zB&r9K{2IdE{Nx^7^Ej|c=rghd-^qbmwW;~u2w!tJL0!}n zAohQTa+rYAlyPJ0cRWTf)ntR~*n@>^XuG&A&ts{%8`;nh*O=m+k_>hD6BMvWD>0h? zrZoy=l=&Z4k#z$Nw-Qr6&BJ96%-7e|c_L@Xyud85?zQx*B6~+TfJZDs>&O3;7X)g# zidHobC^PY1JKVN9m1z#uUVaCAFD-S=IJl{8Go8MK8jED*Fj=}0dnNvhBB5&r- zr>EIJUp&22`Jx(;qqXRZ4782q;10EBcq~3FGUJ?Ux|4rv54gDpp)Z&E@YEcP!<&eY zo@TRHOR{|cr_KoT_J4v5M^h4AXuxk}rf7G6^RGjq1tef}am0;-kz0X?b+51`i+7k6 z!nJ5jspH}C=g#cHc24hr0H!iSST139U5Zw!sg;Ma2zf6ES+uTy{lZ^i<1s{Dzucx` z_tY;7P+gm}Gf>@fgK1_T7n{q(4-(owIL}%xAnhrJH+1g_bE^0_a=~rxGQ_}1Bp>?A zV%YxWo`%N&Jge3Md}9&k1GYD5Am%uaex{nKu*<#s@AB6Uy`|Eio(57+DE3TbGbEEQUm@@s{KKP?`n4TesoU&6)7!Sq{>yo+;vYy$zpEaE&C% zck01q0lWo#S%Y^vAB&SEGRUSlhjZsm@x5-Jk>WA+*D-5YKhd)si@_IDkZA_x+nx6rO8`f9LZAe8F5>7S{O2OOCFr>kzQsY=8!w) zW7dXz8QYY&O*6Wg2D3(TXApgTpjy?~B6NH^50AGrT@;)feQc3}=(wfrC)A-r#W0cmD$xdG{j*1;MZ6Qht}UZxXp7UO#eK~ zt(v+F)4W1~|CH3PUqZsPKl12u>#A1?nf);j=N-zA)b6A7Yc0=Rt#`qu0poSYkP^J> zas?m3hfVZS`O70^9~a42K`M}}71%aClGVDHAO?Q9ao*|3=VBn6*TCB-VVhHnEwb2y zlztr5fh)nbJet`ms}$*^7e2I1{RuXUxpCO2-VEi75w=PgL4sbtQ9a8x9<+`AM?w`J zjKc-4-+3wNWP55+a297bkwUVI4}-al#>qY}1@#~5Hv$MsOszNA;g3~rsU&a0z@QC8 zt8HU1p#R>4r=MCmA;*_-a+M->Z}d_oyTbU?f4}i8< zQg<^d6q6W)Xb&mwt#BU-{LTBFq!uSGvsryVMB*RTjiDyGM^5;hRpeGAz81L2eiLN! zkd#LcjWlW-&9G7gKG0i#65jdCkJB7JX!oI&G>G_Y52TLA1zolDx&GKg`ro$V?@5VC z;tDB%DEW}HHg7aZTjgF9|6zN$%M{D=ODkDtC1_ZXCw#rUROz;KH%Yfd2W!7Hrn=|- z$u7l;5K*RE0u+gbQL=))Fitf&lMMq)WBB&#uoV49t!(>4vX9xkM6LexnqYjeB&mem zoA`Gyb58kcWLodXDm3EY%WUW75do5n!$mA~>X){5zJ9>|1(4MT*x>W#z={GlXC($+HvDn7ys0*hd{*8jHs|X4JO4j^4xDu8G&~}`#WHfG?5pl8tLHCuGb$u4KYS-0hI|?Y?5kxa{m&U zp1@q5NQdX{bHmfv<)Vw2A~IIUIGGBz_4V1m-6H+(Bd}3#atiK?1tr74-z-Lt*ZB|( zr_vrzmr)X2zdAUaiVT!+DTKT~aaBQTR?oQZ!|{=7a1DS@(*Uak_o}2qeB@E<;;>zS z?Y@LL=DeA8utPriFbNh69O0^|NJ(9mi#o0-Ix6fT-o+Q>D8RGHrA@FIKob?I72OBD zQU=LkcKf#5l+^Eb0X(XAk}Jf>e(S0*IfUrsopKMmXB^sYQwg}tS?}DKjKJS%<~FZZ zFID9tj*Tj04I$Ds0`w^FcXs5)ao$)=k>qiH6q>5FMoS14Je7WqeJSe{ajzv zA&ZWak^(OW^OlzYb-X!c%}~N-wHd|r9e{qc=c3>Aybv~H$Adomcj&&B@91& z02^tXQFfKUH{8b=t@POGJXalFjFQ}!&TXauki!1^4<3xchqtHYE%LTZrKYvN@#Z7r zp-BeitXHS6?VxWY>tHGnP6|86UY3v=tcXB8X}Oc}x~>5nGx=1bNEdeUXOFH%wzgI~ zEnjr}yHKo-{XLRyQ0=td0{(kQW|B?beDjETA^qw6kwi^tb$sD>#kg`oYouz+_rQ!a$pXd!O>9x zkmFO$S|2YjR4x}HxZhnKcU$tUCrAe&;MjNWX!YF0pS^`VT>L7rXSew@w*s}6zX35* z-?|0)GP)R?5k#k)72Fm&IW8XXH~)3clI+4V^4g&PTKwhSCJUe~ zZ3Jj(YioPIT#X7^7=#VJ%-&rvtS0+TACbk0>M^8uHIL33hFGj8@3o*$n%Us0e~k?d?t(4XSirAC6D4z(xsD znqU=QF=yH?33e>j%GW3QUTNnl0FRIF`pac2X2BT<{$ao6ato{TZ0swE!2u zZqo$&?!@pImAh5l^#`IT&4R`YIF_o;MpZ{Vp0u=Z>k)75gNYg<>8vo6o>#^9_aH;F zp@^VGWM%WgAT(B3+~eD@w7k^1Y()_vSyfNQ-|2&k4D1MFRPF#tG%9cX zE?WF{FwvSKLY77iER8t#h|^NWMG>$qLaTa zhw{6Qc~`)F&Mka_6SKxq#V9>E91DI7P5)e?S3mwp;+gWncd2=>W z*PWCKlofX`*2X|!uv4T0b7}Z11o4|w+cvH<#8jbzV9?>tza$i<=;-LKnxqM_WQ_rsSW}mCE)GMca;(EWz_!%&+rxzn zX=(Aa7XUXLX~ejH(vtwVvAU!+KrE=(%#U5J&kSzIv)zE&%cyC76=qF3F#l9Y5-Aig zO!=m1IhGlZv=;lXQf$?}p`T;VhAPG$hloO{lo;QGM**K}sT|Op zH=mVPuu|9+NH46>bnM%^(3rQlgCq%?H*anWZe1`asNJ-7wFoS1EOf+~D&I7=0r+DM76q;KWBo0EK4IzbEJ1i zDoviBi@XC!JB!3*i>l*P8SLW$Y1!*;{f_%gFbZGiuHE>>u-^eYOw(5_PiigSBk+Tn z>v%A7B*g>%-(5GnhD-jIySSK4xEMmT>+78y;hSOxKRku1{{#NfVTXtBo ztAbDJ{)wq6;-_a|QXV8-T3lST5h^fi&fG>q^at~H8D~T_JIOz!>y7&25ujKW5}h#| zfx(xWLnMeuEsgnhHjlUpS4b~AW1yS%;FUOmv!mJR@Xv<~E7-D!Ow3~INy({$v_<*1 z6$Nb7^xik#1pHTbnXIY%aD5G7=#rICC@YQ)<*CWMTT>RuUlJl}^+Egd>JY#}Ane%# zB}jrVIecV&3*wOoa$^m@`iF8ei1m!$gN5nv(&o!^75C{KIFIW{=+8#}om2dbZqOnx z(BU)>6A3Amu2VudI=8-8wuLYCg#()@wjL&DU+KU$b-*?e5{0b$?VL#TBX|)UsY5QY zD5QHA`2u}f7)BSa>{%zPH+imGg_5M<%nTj4NsYo5V372dKJy z#eSoz^#%lJ_bW9lEG*`D(xvFOgIcp2nfNtH*|xOgia1!3?ss7=K>=`P+%Gns(#t`x zr`URD+e?@hrc+NaDu}GQAUU}e48MAAon?Nv*-dOEhjz)jy>+ht9j~@(u);%JM8s1# zyPbw?AC#I(@@)rQfxR{dTkYeJ=MK~+mP`#bjsYBR8(Cf&83RW5_+g1_!HyPp5?*}k!{<^Fi3GzT$zpiQd&Iai0L0ihM)czvJ3Gi1ihWQgjS)wYI_BSS;+ z;`Dw;b70HGuw`t#1}s5jUs=qo16hxo_rs;-^7ddjF7Qy z^d{Fy&HeE0Am|@FuNTmmOamLSRxcI3>r#XN&X?Y9{>~!l5L$5P!aFPQ5nX1 zkbEZ~%opbglBI6`c^xTO#_kJeuR>2?D*#iz^tRQM8>Ox6oR>?_3paZ}bD)hR=i)76 zwD42SJb#Y}t-s+I>$3Bx@HF@uR?C3QOTcxIX=G@VBBv6W=6dS9rbY;R8 zDI8Xn6r-B=dfa~Ju?2$E_GG|+227(n$X`2gN=owcDM(+#>^nBB)D@%oYkW>!oP%Fo zs7yHf(_PY=jXT7AHEB@++Fdh~xID+dP~x^gmHXE}u?@)gPUX3qJQI_X?HVJF%qvyQ zTR#O-ZH?kKe9vuW2A2v|8;|P8wFC%6`pwjY7 zkt8vOFLZIQoD)6{aJ$!%cmeTF-X=zGlA4RxKZU1zln7O_O#I_#Y!l z-^@afv@cE|`{UaO(7tk_5hC6d=XQDl} z&m0*{2O>WwyF(W}v4M231&EU%eFw8%G1KvViT7?c78qv3Q4*IvJ|SpSG$TuWlWt#C z6bm_axwGYFh;TEf{9{u`acbMqe2Ce<+9;{jb^bM+9tbgUF~W;zyz75=vF#*hVdYJN zrJ&qMMMWj)chV=2U?Fh6P49=6$EBBbr#6*vxJl#!5AKMPUfe%7KW_o_0N6wO$kOo0 z%C`X3iDlT4=+rzw8DyIFp>Aqv(G0O`aUf{8fS}+V;yQ^Z^EqaAZ!at5u~`2;5~ZMM zWLe&n;Jc=2of>b;^3rstRrov`tyu3rY;Z-LeKG9>K0{%AAES2qk`5hS+>ou^5#|9T zv2KAz8qpsd@#^$}Z-2%enn*nAd+Yb|oZi0Sav%E#uoRKUcmJJ>N6rpSALV-os3Dto z*!v`5;~ntd$d@y5iBRbx9&3Oz1aMn0PBVw~0h=W?oJW{r|4Q%n&kOXYTXA&;YCzp}yiSos_52 z0p+V-={9fPzDkQPkhHos6Xb)pS5KCndaXGaA)UNt4x4hPQWD$IX`!mB+n$aOkU&*_ z#JAooZ*deB>dQ*iR6Jmy^63VBLiXQt>JHuhBz6NS5pbR@(<+7!-Jor(YlfhC>%Mj4 zw>*3TtVIHk>PU%Pq7*r&yLa!l2_FGG^?)P&{|o!@vC`r{y`zD^WmWl$;7>&MKv)fpuS5N!&eep0J7f5dRx=g>}+fZ zfS3;MNvw=P?&6S{uAbgn)KOQeP2N@Cto>_-oQ1=7sNu)~-G#k@>UTz5Mx4@T{{+XL z>#t9jj0)6r+yFc<-UuC+&`pLoT*v<$6m>=f`)B*i#WygnYGu zR=!@6S~ zMWC>XkD@N!0#eUPL+sur<_^+U#mde5r~o^~TW25dcd6ky&F-+LLZovuImP0(g97@$ zSI}*+E5m>-l8Z1W!Xt#Q3hezf31Gzxl2^c4%{@TBJH)C&G`&A2c9c{UgcIV6LTD=j z$jbKvlHoU?)C7^FQ$u2XukTDsjP(P?L@ zas3^5P&Q@{-hA=dOe}|1BkGYN~1TLsHEH_*!yDKb$4egOt!jmV&gkv(W)tOIZN6ROGhQ z;q(1LTfLkJeLU7I6oM9W3OZK>9f}^dR!^okfrIBTJI4e4mRD2H= zXHLxiyOFSu5ZaTBE^Qp7-JpsBt7YzY8mrtC&YCiw;mc>nD4R%Vm7P2r*Ep4nUd1>8 zXU!2YRw`OF8HHE;BRq}mLgMmd-b+&}w4PMZ&}jijp#wQNf#8uh-xD9nX2kJyQ&xQo zB(M{ms$x*O5O{nh&)HWxwdgWM6BH83G0TIUw%d^_fNH*;5dCDGH?34-cmVCh*#N^! zvg%PFCvc@LeUIPyb4p>5TIpe7Bokf_*d3{X+ue`ay8!6F5Wn?+1WFX=1U+1dD;!Is z42^E5RUcH(W>}jhb1`HhlU% zK6o`}@G443vmCqt-Q~sU5@KuwfmX?Jv}xAMI9e&aEJ0clgr_8e4gwU-Ebx1}q*Bx! zKt|-HW}2}se7>n?6i%5Pt|ngX*#x`+ht!zYg*M3%2RdS~7?*3ZEaN4KkD~v?6fg%# zDR|Eg#5o><=Y)WE1WTA9O=2_-H-S3jQZBV}TjY{a8WcTGI3HC#L(qQY#muBaC-z9P zB2s`TU)8ur93HxW0)TBeU}U9F>GKEh0%nN9{sAeCi4QCKO%{Ow@mjIf>2Nx z^N|xUU&h~W2SaDad&HAezKa5}55-Y-{$)`fpgKNx=4J}$)Sn4%9y&a1%FXH{hI4A; zih?~Wb@rWbPU)&Pd=R4ntZqt}$~_r{bgdQ1!B;@SP*h!_P|U8!-G-9;CrDNKA@Za{ zBIN?b06Qx1Fj;|%ajXY;diLdgTfY-Dh=4pO%m0W-g(n)6rnI+T?`d5cY7GIFM*tCD z#*Ozv(0V5TR)uihYmBu5(64q8*wM5#NUFnR-g^tGci3aj?KTSAN5M#XShG&HSMl` zbjHxY_*`Pb0h&OtsJC*Y>yL>MpBTx5`3 z2s(U^#v}P%qK#KJ?L-c+C;yAmBk+pv0Cx?z{4D)SP;M3btDdKHOoJ zO;6AhQmeo=NK4ipm^)ceeWm6)7e0pk{!g*q4h31#0CWB$6#f}lmiN=A^eg0#ss~aY zq#Z@QKI7eq|9m#%s&!)I3ReEp^|*@f_JqYl+L)M)7Mbgf7{l;{f=wWP;X8xKdU_e^ zo9j1P3r5VgOy@p0n2^3YeC}7&D9$z$@wFHyx{aobf=w;?ee-{RpD8olkENUx?7}&H z=VuyfbJtVeD;-ezpuLLptq3LErZQ3K08w1R)Caa#ab;$C$OBu3I`DN=DR){ck6e@C zVt9PqL#g19=aV^sPUfH_aFU=l$qsZorT)+Kd=dyp5}-0B^gt#BKjR}SEj$kAQIBc= zC#8SDaw?LV9~7!-{ztF)%OTG#X1KZznss79Nd&F!wq!xYiu!N;`cY|=df$*XG}F-$3iH+a3Hf=B-{D3w#U$j)h0 z3aZEYnx?1xcz?sT@y}$4QA%WZTJ4+rR~x6s?ZjJKxEq_6OtTwO=Q$gVCL@3JTu(be z9ZiG6u9$)LfdvvAouP&l1GUIYpHVL+8P|W@KgfbF3{p5)^3M>Lgyzc>k+e6VSANDA zw%aTzso6%&&Bgbg*v(`u<0<^<6%1(GRrT9&2vh{tB0MSNB~T0N1dA%TKLK_b)c(nG z^UpUoV@|0{#yZ5#3kOUMT76nq%iEIu6Qy#vFl!gRD2lmQj_C)0@mJ77ksy5tY zBgX82SvhZ$#?*FD{EFZ=EYO*1$1!F1|MUQ7EF^m@RS5lfiM~(N;H>fn2 z2)|U7xG^<*Au^xaBfUVZ*W;HqI;PlEdAXTJ=2}Hn)fP;hM`iBtc*6RXjEA&|I58Av zc$o$}S~SML3EJFN?)mxJoyH;7$%YR%iy2ztYO-s*DU+^d>x(ubl zd$ylELv!OeH#w(4?|T0#PoC^PspcEW@3ZmBe4Q*$>#houAGI6HEQH=W(Mf5*K zw)U=kXtjrIM|q>aLC3e|e#^#28}Z40@5T~`3_qtuwTiI=JO2F64cup9CzleOZi@Z*jEi15I&zuf_(ntNTkNSG} z^$(umAsL}JY4qO4l60$wG@Ct@n@rqz2Qp2%ryn-|W}3LRXp_axsb_oQwgT>Hi%kDd zc`h?i*Ecr{8B90X3V$}qG8q^u-)PsYuv;g)zNcXyV~uHYZQJU#oaM*={Z9nXzc`mv zJ(!}_-67dBkmn!-PLc>{rRUUJ zP{jMPM$;d|u+U4WlnjytfFlZnT#9>hYTUG(H&{ zsKE(^62;RqZ;fP>4)|lSAIek3XeIn9FTSu6M-D;&) zm~t-z>S&PDUXi>xxo>Ms1OKMtT_OklAI|Q71KcjiqExE*pqg{e{5V5@Id%P&jNDr9 zTC=V>pO9E)7(~kJW8Conj&HV7uLUg(_%j9WmL=bGKsHH*?TBiZwgPp~&@aY2KDqoQ ztOuLFA1VG~?-8uwt-eV^?LF1q&j&0L)`?M)1$az{8XhL3)eiXPawqCp#p}i81k!;%E^DW2!v^Tgn)7IIs^Jqv8P(Q@1*$axWtqcI8F=JEbi75BW8G$MX>Mb z1!&&@Puca9nzZvs1jmLzLL91UZS_S*60HP4Kc7DzCqk0jSut3vhz*Uh(MmE$gNsH- zKUmzZ?6y3j`)fzDEH!7f&zQ=~5{3dO(q><@bNXCPB!jkKQ3`~*Hm^T_LmApQ66}c& zPeC2#kBaJArrVl4Og@T%7Ey}(9$gb)va{#?S(545G`QH8PRh)07aQdGF zyy8ClY%Sl4{4lg3A=LLOHg*&21|xXALqjl4*?n7Oz)y?3bR6G;haj;1FykwrhuHHI zx2V1XyN>}XaKgaFEE=p-^luxMMJh2pD5P2!f(`ulN(W(pK+N<2mYlyMxPFg=g?k^=GTTz)u-$WZYle zUliqA#Ht=!er+hDd)JSl`ms(O-df!!DwTNWkbH*l#IsU8|3GGn6+2~j_TIr5b-o?3 z4!CnJKl~gj`MT>P`(0}pQH`LJ#gW&-HcyS49zB|^?A6(?5%pAe1E((WiGrq|YA~=B z0l)%^c*wAYNjnR?UozW?a+f?$7zczW`2q_-*@2b9N)WdUxx{xBLX`+@QP`}ktgL{9 z#3L99Qj5+xDM3MrVh%f56Pc4fk-4({xDazwWcAo*;b!!{ax7* zEbH@of0)&Wm7c%ktXihV@n;1$<(yz+dl(M!jZ$KoT5dOY2ByvooO)u*@nGN24Wvzg z$1Kamfzd(?gcomI%B; zw9*rWD4#4MD-Gz=X9vdNu#qc2K_g3^YsUDfntsuZ=~8-5k6${wPHJXt5KJ5#{Cl}1 z5Z8%nb9a8E&KX0)sL}?$642W^JNPzzJC>rXnt-Ynhf!9wsz`@Fz>+0gFXCJQMqp4l=drh z2s4Wx#1qU=P}im03Z$>WC>>%WaFaqTdCZL|4UoUHC)pHjeLK4}`bGb9>(E;O*o?D* zU1gt9(jsu;CFnG@7wt{DqRNJAKmBelv?i`MIlhfG&DQ$MQ4QMj#cUhyfmRQz?;RdK zDyo1|yZ)$vfa2aN{foj(-a$oL;#OJgj1P-+lv@}}t#;`?Q>iK{*C;+Ang~r^1~1C7 znq8mt90KRf^pfl)u1v{>&#SzzNxZgblJ;byOanb4yC?V5u>*{IgV>5XEzqV0`lEyD zN=8eIg9!3L5CIOE>!e`x&!EQ0lRljy?<9jLUh{AC{$PDOfI;CN@4e^=T%9!2(<=n+ zGO8M&HnfVMUIb-o{3tW-(eCHYMiIJ^t4yasepgyjD&4CCjhWBo8GU)ZM41!g%|#w1 zB=F(Ot?le0P-5F#c&*b=ZyAakGQFdkz}9w>95)%$H`97V`E&!Kh~GZ z99Ww?@h!I{M)984EBcEHu~M8qBvwXyC+vvTsI^J8wYbF`C>8qSH*W#9ZU~f=u9f9T zENk-=&-Qcx-&BRifJ-s+Pn;)sgJxW3EpyK7(J`tsy6S|Sx0><&v8H>fI{hSO3P5EijLlS!DB@&p~cj+9Bds2Uy9gA<_Zuyc#@2a#B zJ1!xOk9CYc`G}dT|M1%$>KTP}0=^EB2Xz-LqIYCO_&3&gYV&mB5T$=E(<=pe8Pp*J zc05P4@fhK=Dfymuv-=sNln$(;DC=8hW@Y&tV?HR$!z6UXXlDtrCy+&|V530W(Axv_ zuM5*eRD(a3{b1E`jGD9M@c3inDVKh(Z<8`6@PxZmim6rEaPW&?CNMuPa^%w`A!OfH z!CLyy`#PEwwnj~RuWPbkV>IORUeiJIXMFb2I-GCNMuNjLv-i2jJbU#wyXj-;g{_QF zLlQKDxAu7)CJF3YE}bJ)SH}Ol?Ba@F8);b|02-AF4Ln1aC?bQ_S zFtyaZS}*qRt<7f2F}#~b;f!}DA$8-~_1qS1Bqf1lGd55m%mEZDZQ?7ran**tOYqP$ z5vic}z!dN!bn)q}o42Sw81CX69um9x1Sv0$u)NO%`ltXJ-&Wt$RSjGceS8 zFXxTo%JTOA+Q6wdcv!8cv7(C4U;3o`yt7pD=mAc-UElSyn0h;+_tpj&4?4E1Ox0hx zm6+SGw}rC(Gfh6Vb+pQ^*Q(F6)UQN4=ejqd4M=IItmFig2neVfDj_bAv`n^I% zOxZr9vmFThJ}n?85EtQq>%%c3M^ZnR=0QR6H^@})*&p&LxU%Oda`kAXN9F+{V_@zW zy~8BH%ZB7#FPb~5_XZ@Y@aWvQL7ing^aXM21LRl<0)p)x1qB6M(IN6pZ{41uF~o2= zdMh%CvJdn!25bF>6x0W#TQy-rqDSL-&_NBnqC zKPo77W+6?bbZDJsGjpQUx7CL}oPJ)meb1(i4MiI2ZKpis1T)M<)Ri62Gu@Aj@$|HF z&@|=ij5j|Y{yuM0Vo$@@qcUfXki_!hhFgU@6+gDv!c~A-j(CkIE!$?0Tz_Cwiu~ci z97P@)!PzIqX_xxtb(XvAlU{gwze;^IQ0gx)ut!QDyfWDSueF#-uD8GDv)MBeInPU3 zkR=~xy@%@LXt4e}RT=8BSHGQ-u5ejq(d1}x>-+g8biKY6jA*yJNnh(Q>yZGPv8JC) z)|80;pn>qY(AKYu-unW(EU%>xbXkbmdFQ^Po1#U(=T5cUM46v8vtb*hSJG)|`OG4ea>r9uy`Zp|w;+6SxhRT$nr zj#Cka6Oeamc*x6`fOZoEX?S>X@8Cug&Im|WPtQdn2RR@7O#V!NQrX&bam&c2klV_B zZ5$p(1!yF6<@4S*^-Mh<({F9E-l@ws-=z|&sKDRbuxM?SXUCY;!9e$@g2&7$xX;!5 zR&ILwt=p#jXUbIe=!Pk<2|r!!EV0@lred#ND%|T=p3^12&&p0=p)*N7(GnqPCrGcQ zReZzuigbQZ*CTPpQ&ummfVfsG_A2Ce=kpK&Cv`#J`(sg zQm3XiOz)#TulJ9JZCZ0b9=@1xoEB%P?^wsBxA(0D$p)28hXnVZ+rt1g;=>8|92xcu zI|zQZWGhjrPLazP!QlxBCuK$s{ z-Ys%Db2vkOW0BLb{@Q-|ybyU{yH=dLtJaRUu;}17+S`^^k#@`71V6F$SRMLA^^vEgSNi}mRIpWi zu+NgrqGV-deS7m-uE}$qBo?U!mR$#%T>j|WrHn9u7!XH6gL9&AJlEee_kEtiXto-;<3Kflgab=f=c%N5a; zGXI3C(l`5u`j4vb*ZVWxV7pn{Vf|IO>MuVCnpjQ%dp-hZ~9@XPA_ZMr3Tr!t-Idpcf5Wo zmf$G3n`!k%WS&1xtRbhofjH8BV21e^gAIr!@DQIc4zIZytR|23&ZvJf){#O!F4qyjX^cd z!^*yfyE40_x>_mUk7gtoC#INJeN8sLcZS>J6BV4`oeY8;NM*?fE~7j- z>rohh(+irBpLsm0rEigR?p_-JB5Y@2`Apu=kcH&_XB(cRM-Et_Fui~OzWh*qhE>lM zRVCwFcM}{AetmOWp>X%)&C{~70RA5x)Q4rdxk;l0g|sd(6BNzf8$){G1AJ_~uTC)} zPJQ{1p60zygUJ&+zjTsbuw2;V>#0&TBx06vKQ|yZb>ggAx{b6xXG7VY1ZmUK#zE)( zAOGQTS+C6$+`Uw2#CPOZw`P}m-{}Z;^&!5(#tbu$Pdmu%2^z=RAcx0yquzaVjCqro z`+n^@?rE zvgh{_2+ZcZmfpC!`LVcN0n@!}gZ&M?Vl8SMhTfKq&k{zUqZg?WTCY(|=M~I+oA0&K zv-)PHUW#l+j__O@cd+Bxjs_EYuP3(mi6ME)b4~0y`z;`&#&7Z^rSB+8dOTJ4{tdnh zTL*5->loYFEM9i2W!z(8^VCIbQ-TR+ZBSRF$IDO?O||Az?gJk7f1|f;Iz!DlRD3cg zu|K768-ILJ#v=X5-r28CElW3IGlq60CbpiDv_Lm6_?X>6mp}S7wpphwq61Iq&GPBK zY}C2CGf|qQv+`tdI11Ml>-GqfK)b>3pMA;1B6Lp_01m|$^kV&@Ur_Ce_x1ffwc`F0}!sLC?<=lawB4+~?1qW4caE>7CLVo3oPp$7$EfPuU|bggdK1^ehwc#n4SIXoe-RM?A^5@mlCxYf1<9X8($=Mfww+Hf<6r1E)Ke>ueB&9s zy;j$Usq1kv_uZ^d`}dw$^YqwnD zKvibI`E1L1uH*Q=-u`~U`X-w<>=tQ$tF+=MFeThKwRMnEY1av^ogLQ_(DkttMRQ9x z^!Jsu$1k{lIu^Kn5)j~EQ%3T)O9@Ylu({V4Xb<7_>OEji3N7o7`IZR?I@z79w&DvU zLU24&ckkYf;9@2SZlA0qeGixBj?V)kJB1Q*Quq8(KVS)Wo2(qA)rH+ZLNCUs5OriqxlNDkFUhWqnnaP@fG1Nl+r)Dz>3ew6Vy`_sLg-^rmd zhi>qA4ffuXJ)=wfx_8yVMZt?d{ixIE+DuKhBn^(w#t7yPlzOur^rN*n9auMcDf;jM z_U2(nPl|ALB<7Uz9|EAG0!17h^r>$z&p1WHFHE#Y{ zta<-r`ipDct+KR7bPa;TlhyGp8BB?Do&Ew9siTco+!7B}4mj~1w21c|>KL(UQn}HS z6dgJ*dq#XE!8y)#R7dsk%Kk;X@Zx51K*6{hf6t`O0-J1yT~8?=xScXpFr#Z4$wPh*w2qf=8SmTp8%;JBT@E^HWwMoY$U^-W&(&S=%e4nOwmFb9$$?dcexz^xezb=)h zCSvO~Bz{p|oYJXJkV`5(QdAcEbk8l#O&fgLwl!28;L3E7F<>C>Z`Y@8^>d9lGZ)YP zq9~o$oLGC3Yb&|{s9#}*kp7QW`lf#R*@snFr^v3~%@%L_p70EOVcN>7pNV-K&qy7ybReNfxkqxPW!3JG%8fx8G7k884e zzervl;?lXL`o!j^aOlGM+T&@`smE%^hQtiiPiCphW#wmi*$8`Q26s1?H_Ej}HBRO* zaqDIKRq@yN*oN8Cgr5-}8}qy|ub$=W!y~XohO~aZ^5CTk{-8gIRA0E`aCqg8O_|Rh zB>9bf-Sh47x?ha{WPnp5#AM_pUYpRrt0}pjhn(vWKZPi0C1OhRBCzk2<7)e|hc9wD zHYjT{?f%yN@vJkq-v;_7TLW#$pw)`X3}?70yF8xQ8tmc;oSLsZc>KlC7e0s66T6av zi_{G*(n@xu8TU+ytFBFdWHy&Im{yc25$iauTjJ5^z0P!(vTsB~WOGY`uZ@*m_>HGh zVR?*+h9EJsqaDecK`754Xi^CE_%7BxkhxznOK6e5it2?fdJ%;7>Wm4)Q&Qn254RmO z2VG59TU&dhla$RFopdDY0E>}Bg8&265y5$Pm`iT+p>CAFTpL2Yl-do|qL0bHFX}85NGBGp)g5+P8zE;6P-3^?{52D$j^PiowHe78! z)iY(;gx~UEFYVgZ%0Z3dVq$Y0g16Lo*Jsi!s_&IJ=cH4rlgjf?9pA9nR=I{>jYIdm##9=(uPJ8n zYHyKgiZ*$?>+N|}41a*`;GZrqq1di-ifN>QX5Pp{4?yVwh{@uQhdK4CLokL+p50)F+p@~Nem&hfzXzbnmy zrx(L@l5X(S3hs~X*4*zWEP7gS9m>hhXLR{{f~)!bZU1tLzYhBp54N zA4+XB2cfnz-pu77rHu?!9jDsqK^_euK}u|E4zr$U<3K?VgbDUTvK1gyD2?J??(tMU z5h&!>W&H6=V%g)HfknX79tSGBXvoNN$u)Gd)$u;ZQ37K@zX5nFeb<-eJuBO4|IAeWr)hY_MFVuLEV^=*Irx2 z%TtHA8k(8JQoJ)8r4r-*97`#24~^bg=f_jQRAKt2X=y)`b=LW`I>*LKre?{Ko0=NN zWO%w#3LTQa*uN=$Z?GgQqQLNzZ7ay-{|x^`g-me=|1SdQ2l&Q+UJ#WeGP?^N?idSy z)#NL_)nY;NIuL&Nj6o7)fn??j_kX1**K6AekLbcdpGS|d(jhq2Iz$^)Fp`SlzdC!z zW}TJwf9`4DLBWVLe*KN&K5cYK;4(o}TG(o5ne?UX&F%CE@yB;be_)9D>O80hLFqOY zzFS8q^c_VyNk%#bfmVK9mJMQ%PIhgB;U!8JXHVu%iwA++nUUoTu>={Ak{cd76)@L$*fw{(xCnsoE zQH)!%S^iDSluKwgZ`OuxLYT?fcw?l$_A+x4s&%GAyKIXd@-bPP@c!<&#LJfFs$){iw_y2r;qj!$ zg{;Hh?%WA$>Wdo_JBn)m+7a6GQHQ?D{h#XIJRIu2{~M+#B3ZJ9&d5^MknD<(U8|jB z%{Cg#*rJk7G?pSXM4J>Dvdv&@$(l9W*oLxX8AB98>Un>4UC;fypW}G`y8pVbqvJZx z>&l7wE}zfm{aVXID%`zzeMg)YQ*}*;d#)Ez#bN@-vrYMlKh(z~{!nYkALS+nl$DA8 zAav1@JJomS>W}KOl0l!D8jhYt5tW|fm7*zPggD(5n6R3H3Y0rc1LEhD};IU ztDNL&I--slN!U-8pTUe)6x0bXxmqu9>pbs%6Xi*#AA4g1MJmSj+iASY9u+^I$zV3N z9%}M-W@7A3;~@1WQE#txY4^HIqWju+-!krMj?sF-L#hcoguMd3;^zc*85E;@h{+gn zCPG8t*N(G5OMU+An6ir49f1tZat{NoloAsE_I(LpOdv%abvCsgwoHi$vLRd}^ogDW zuoyK9cO8VD4Hzi5S7c^i*YvSwr>fI&os+fjD1xaW@cq)dfEUT?Z6GG_c%Vh@W%gKI zJM&Uv8wO7>OmH z>BRTNWtq$}C;V>F3a`pg(k6P4vanZdrR9e|iqOhaT0SMKYP+wbY?TyxNGOU#wabb- z38o)+e$W*0r&27w#GZ2J)8g!k*?{~M)EZ4(vF)WuQ!B&Of3(uFer#RE2B_vdbbs0P zHtjuy_RFn6x!)vOi?fFfWL`O9(0j4dfV=l_Fdbl~;#q}%8F|>}5ijD6MsCYzaKmBb zK;jVIff$Sb=@GU!3H$f1nFPS@SUi+(xRA5=mZmnRi<5}Ozkj)LJ6R|mOLS527@b{* z#5Gv$04s;X1;j!?QD^cKNPT=9HwSwRf{9K00RkYu+AH?E8dwm7c^BmrKU*d9?<#|F zKI_F=O#tgnClqidSE>VBe-%(CKl=Cn87TNxY(T$|TPEuRg!?}q+?%8}j??Dsj$=;Keu%jL!{e%(aDU(UoH?2a>pyMC;@wR9@g7DZ2oIBl+g8+sdqxRvQ{7v7wf zfp$q4f!x@Aqc^{IB#-vyYMV#6a2}1U0IK0|Vj^db!?G`(NtFYT@bFZ;^CHV0 zjoE#N?rUczHdq7dFcIk1D1G{S2wWqej*@@9ft>+BCO$zyU0{tRQ*>Z5g+Uik2Vi4B zZGz|#0NaBDpRNc!{QIEr2vD<63Afx_T3?bCBae(=VMi|E6W(TD58{|RrPg%bJkQ%Q zEL_Ghs8tE}&6k-?uy3A@C)gW*9?CKysPLK}3xA?{*bv2T`wUx-;Tj~U42V!LA>bRY z%!{UpFMfSE&3%jZHML-!B;!`BXp7XBlR<}8=!UUGtjSH$G!>J6Vkw!`UX}_9=%~-26 zz8l)oy-6Ad&61nIm*#(~#Nca|&Kx-(SV&-eV_}l>dv3Z$b|91%c!miVenZgnux-qM zpMMZnEvMPEcpVLGo(?d=8wzkUb1EKImasXK`@^5|MJb2lJ$-$46_3&y}mgc~4LR(6xg(IBr! z6ru<$!KFrb#o{J9BQ0W3J}UI$D+vuXYX=aWB}2DKx9nqu=Y19-xJPh$&09N^$vniQE>OOk5}x&96TNtwB@%d>GHJ& zKYN?OAFUlngjbz$CJvz%ALkm6jVubK$mUT7s#Vd)s3LH25 zZ#^>19FybHDSAici_mO+$eff}mc<39E$?QYY?Sa>g4hrnZ*em}abBQf?e$^8m0_<1 z@9gbmn;0+aSFhi$hU8UiGU0Z>>fIb@>U5~gzzm%1uH>-}mek9PWD5arExlAZph5@B zqBn#X05S#Q5~Z^KATYeZ+3)#fN#RVhM9JPgbxh8NU!h?edh?KEo`d`DNv3T8E-k$c z;tsmFpK0w2B2NMrNHu$ft{y}EqKng$0AxGJ*xuN3Q)1?0(M6KwX8rhMtq)73Wm6F4v}$Lk(2GPe?(_7MQ0%BfS3^fA9>3|JYFs*# zY8E=OXJo^|y9ekn4x9%`YQOqbH6)7H;=GP&I zB-Rv;c}USToP{An5J7t|o+^7558O z5M`lP)4xz~e0d2@$y!gl8npJ@g_V#Qd45*>{GML)GtIgi#WpDa4a9XRC#n64hvkmQ zY0rtt`h4*aQFQF8Z?{5Y!Xb?UKI&K?IR+X5Nq{1K4`-7ozF z^0;1g&ESm5nh2+GFaORv5~}d!J1wCrIZ6X|Lj-CBGx}eV6C7jm#Lb2`rMdQiR)Gu? zopb(Zk_jGYDXVADU<@LO_%>34FO=>z-Iqz_;Anv5#FmJH507x^PIez(522X&XR@@#}|1^%?apL(q$2s*3i z2SW%AugH#j6my%EVHa475Y-#90|C%1c%zOUfjeP~QNZOF+t7s-f3<*p=rJB7n5+vT z-DN-bVA8nf7Z;%<94Ui0H#biaduAu=eB%Z?Xjnke0d^q)h+6_37{W>FR3PyHpg4{B zb^cfstcJh@fN&ByJHoYW5;y`VkLm3x{uW&m&W^c=mT<^?(0zH~QnV&#EaE&iJ-Mpe zk>%Z5!bR3KGXh}QMCR?R)!0>$gRAD%=RH}RINAtaBzx7fRmt^V-Wf{g*?(Pi!ru(S zN&vi=w zw(KfFpZAf#cGwNC!cRU?4?K7FhwIBsbtVsBW$%XFw5CQ2qEwzi^cB;^%;Of#zgmMh z-oQuE&&QtO4O#}>J%qUK6d>}kRF${4DT$3~B)8Vhe8+h&aAF;DFGJa7EZDpiwfzN~ zW9DFUw*2BP@ox!UF*NCoA|d+2m%Pc+er;R*g&8l2?w3HNm{^WF<=QT5`IjVlNtJIq z4Dk5UK27oTqQW%hZX9I}=UkR^a8xKTy!Au#qk0XPrL8}++>2DTOypA2Fe&h{?RLHT z^7;8t4xN?OK0jQLt*D2@p`ssLe^D=1eUb~-E305jvg~SOW&YkE%^?_0-Cl@@STwt~ z>E>L_<$L~CLMikN059V2ZGmY4En1P=pw!mzJ;B=?O90!zbUVDD##QyB4KEmPcQt=9 ziO+1!_(^r?@h-zGoRzld-749om&xqtU9jpD3@#PwkGm7<%J7X7DXwX%0Dy`I7|tkv zs^=NxdNC#5y(*HMc|cdAy!11fWtU$;bvYGttI~(4;uroT0~@ATH0mHf>a0qpK5i{_ z^S2Y-p<{7(u6q@V-c6qJ0-+T^{{SI2#5Ow>UD#cQ2}$$w7V&KKlxdwT)w9NiRoj#0 z4uE0`79r^12Bj&;B0&Z#nWA7g^21S<>kVi-5%FWv8~nOc|BbdasTg%jitw#{oR(3l<+w)!d0CE zE-2)H7jKYI&UETMirJ~xjFmN@E`tOWr?GwlBgO1TZu1G^d5LOMo_DlUQ@<$_pKR}p zIK87>-CCcxN0XO-c>WF7U))~>^FqLTXF!Id|J#6P*Uh3vFt2mzyO%PU$5T zK^pGS7)4y1*Vm9aYz(>?9p{)z!?=ZPMT{!6V7tiMm$h`%&yj#(omU(B{HF9K9;pr(?n3?8En40PRxfF zHXj(ddxqdK+Ei)r=db|BS1z#@fn~ZQJNmd5R{i`2P#*vo4C@^XhNA8KaDoL^L^9=m zYZV0F>8Fq$#>Qxde6seZh)Zma15u$@+VBGISzZH1!Gm)hy_E$3YU?h!0lrwC+RQy5@Nc7>Jv_b+%6F1XI42F2rt;?sYkX$i6{RiSq-+jje)<(wVh4zu9>61;nPP}L9K zezG22)%29As<~Bt29V>Lx!g>p?H_t%VtPr!E= z*Y+Yy7Xk+*-^SV*E70b9uJ;foRS>DH>mYbJ0ogbCXKIs>(pd0PYH_pjDdyLUm|Knk zLID+f8dZf|iqTx2$;-bzrVb5%lpeE5n0$Of7=rtNqfTZo1a1UId2HXCI#^)V>-_cb|IhP&@CSPxN*^7|!A z2Hrd?Ypikzz;JD%*5!Gcwup_Q=)bBiMLwFtG4ySP3rT!bpvyQDB$ za;$^8B+kRnbc_FbTSW$y6z*NZLPU)@I5ZEAOuZ(rR;`(q-uN19XyA{~OJ^S%-Z4=T zUF&q%0Ab=!>eWB@L2C#raus-25kdd_ybkR2&^`rTWmvYY#om-z4J31o+<#`=)iPyB z=1=pU1FBV_+L#bPOmgSsMal6ECQDQh8Z>#ZdR(N%Q`Z>sB)Eo6$Ff+>Tz-LBs;|DS zBP8F)vlf>^3i4U*as+9`X6(tM)X*aiI7m|!Z}Wkb z4WfP^ z=iIkj_lgLc6?gH>_E><*u4ryJ%s5)9HAvl-)LG)h5}=7`sSPwcNXi0r+u94ft&Xau z_`XmMaD`ej|F)2;=~Kt#mxlYmB}bJaim3J*rLMMil<*o&*K6R3SlOtG?UG7s z1KthM3$(N_f$$fgV|094IZYp3vNDKeAh0}Z_~9W%qNHMQkHXg)KJLc}&NDwGtc>7p zS1-0^{kYMi%_Q7^o!6Jvn$2rkW&oL&Zut-8&+xdsGI{;z+HOaNs!0{`0l_yswU~vV zqf0(AIc0-|)0nY+>HOzxGQVe!g_pHO9uO{}N6v>T3i6OB_8%!Gyjq{&m|!JsA@(o* zWwC4Dp(0lQWsX@WM1vq{#IW_#g-w){O^+sUJ=#_X>8ZCO9o$8lJRs)p3ldc8UNNl; zM0`o_)^-HkF}g52URsoI=nJu!Y28jPUgL^X>FabSlFx2wxqdk-#{;vX$u;T4#Z8sW zlPA;KaC+A`MgWR~w;1HcunUU*i366h(FwU)AY_9B+h5oKg!=GUgIodxX28$no)MYE zu^B?1A6!$!caVEo0oyro&5b3Pza-$gh0L9T=0obg+79yuvT=tk>nxQt3s49nOYdcL zpIcOgyyZPqz%uPDer3c?K}zX8_5$0ilPNXG=Cl^+?hyt92|lw;eBMyk|+ zYyu7X1yuN^P37hI1@eTI`I9LPNAroSzho4woq&dn@&@8pfR=}owYXCfr~wb_sf>tF z2(w;yWO2b|_s;Q1H+-lg)C@Lr0Pp$9Nc(cv*;dnbFF9F$7GUUddxUpEiv1BD>@@(` zfggUP$Fk?{Uk?IIveKU+tr*Um$!GO+ibNi^ytye1nnpN804*h%Z8Sz%+oE7Rg1BZj z57WYkys@jNY~!d}gJMJ8cgA9@g0`xIr}_A#ItX9BhUhb)!HDs|I?`kxXi(rpAq%|6 z0im}23ux`M*4r`HcGL%6o!s&ZWk!V(0t&*rARF*5kcj&vCV09g>OrG58f>62X_<}8q~@_RB;5}2U^GqQKHspH2%y1McTRTDF)Qhi;GT}yEE6!3!(4rxy zbucBE;*^TcAnJ8`nSnVt{QzdMGh1B%xDFmea4sKxXb|VO&Gj|}#y0Sez?-OmV+E-v zm>E3W;nE^>HR5jUy zmQe63fzoo!VnRW#)hy7d9C$Q@F6v!$(<2+6tG;3U*$wMiP3K4|qs};nxeg87Bz7O) zTL0{JW!=X6TXFVCEy+TW1GO5}ei=F7>$RMbLsK{{Ww`|CcJ$W!a3@|}JPx1@eDuk& zF;HKQb~n0myvceDnTQD0xp8w$H1j)5vamzII|Y(`2;}XV3+RvP_#Jb-bLM+frk#X9 zadLI+PabW#`m5%9rd2q`QA5*ZDSEcweeRg?2EmK#p1-8baUUi7c?GriJh#o%`j#mh zdr@ke&fDO?*1@CWVmVM|n;ujni-&mDIKDJShvl&zokO>aK9gTKc-P1fysYP|qbf44 zP-tqX(_TU&vuieeT%#CBlQlD#{40#pzNjz<8w{L+V2`=vCvzx1LG7GwAP_eo3U6mH znons%1Luz1z0aSYp}~sr{iR(8W#XI5u2+u43MKYp{oqW*ABzM?Kq#KsO z6vpbylKd={wl$m{A9#8ymsS@GQ&rYE7f@MNc%w6FIQh>9r=sG7kR??Tci4Ga53Cc8^_B?SW%;)A#(K#v2qHj5N(OWRaXxPwrl$Yz?fumNy#o74q&2 zz+9b^de`ja0J#wp|I#W!&(s@RWMwblbBRTX7?rtV#(g0R#3hkM(!Y!p%0Z$9sv_Vu zlbZEJ9_ERnCnjW5R6#dIcln;Ohg?4R!;+)*Sp17Vj_{A_J!z{agx7Aqb_hsa?l9BI ztA4-mO;XtsqkLg$dS$F->CFY=_oClIC(|jJ?8SSLYjJ$I`aD=HZrqx&GllNL9QyBX3#ALyyC_t*f4l%=Q38BtsSqoB$c-X zSsw$h9=O1d!La5P24}Ew7%{BbQx%>Mzp_u%1ned84xB3DPNPzc78o7E3kk}p+r2WU zst49X3#gd!rj;$+^-`h$Z!vT30YD#0_`;qYY`gKQnB=wqkKOSwo_PJYNrbt$8O|lV zA|MVVud#UsFZdYX$|_1XNzeWfFUrKZEX8vJCYp>YLW}We`4Mue4Qb_)L`n(%%ckme zY$f6Lb>$%1=2#2EeRiE30sPIULjwU}8MHAx?}h1YspqDE<4V`x*ApB(nm2vZ`ox~K z%ggU%`cPx$p{x%gLaoy&qvHbaYjQ?*6*i2SpG+1`pWZ;niDt_qi^TM7Yz&hNAnZ;> zF~Ec&I(pE@A1Vc#hWLdiOkZG&f*UMVnZvzn*0U-1kFMC~&%Jki=ZPMQs5RcIJQQQ^)Mxqzv@gV&g|7*V z{aL(aJd91A@Put2 z%<>mb_lU_IvAmDg9wfnrEE?s(D%sxD^l)SoLy4eHf10!SeecLPC~u1q3E0l z2V6clq$4&4kX?c0vaOQo7P>4EJZickIVaN-n2Goen$!r zYSFQouNw;3Cjw6sRMNEC%;&Ii#-V7N#0$=W8RWiAN^6Zui$#Qq}QLVR66*ImdrV9TXfcKb*+1^LB-U+lezI}>xB zr|q^?eYn;?aLNV4j^V&jv0+G&HtZ31av1&mN;Y%{w@g&6J%8--_Ca2K^t~l8_<*(@ zq`}9DM_HKb4pa4jez8P>6P4!bt=Po~LbIUcV&1-uWcN2e$n`$r4BPRnhV2wi_& z0}@i$o^|Zd0-qcE&x=dJfpbSqzYh-X0AB&g3S_80;3bHxo^Kr^RI;eT27yAV7)M7s zuyF#B1c4kZz_}hz5$Cy?>owBbdl&TmS6uHACpT6$G{9xpuXD$YGkfWZxu5Q1ub{K& zhYXJM-|idCG~5NdN3Y4mNg)aR_=n=F4V8jNE3^ZcyP&+o)rZ`Czno`D#`YQ2*&9pJ zZ!Q|yFep_aSDX)syg*lA5lHO5ZS!T<=J;wug=PC6-OoHFoqyFJTsnuEd;bs_|nnA0&_*&9(QK0HC1!@Ve134cg`e<`&J3MB29i1Jk76fRZ^m>O$k&jPi)-c zYeK}0dKk-IdQ4FU-;9*-EJPCW+DHy#;}fl7laVnlGI@djOue>^x%6_(a_y(HZTo8Q zl-mYj#52{6GUX#j*A&aua~Qd-8Xf`f0R4s62KK9_Kzjy|^UCv+U$v);&VqRq8H8ZO zQOo?dYX-4G!{tKAYgW-zuhv5Vksz){c>ll!kf|O7c30rNt^oxL+C}6olwIU}Eo*VG zbyxmUFFt~lRXSwBEoZzJjK&9@xqXt4S9vy`qi&?Qo(;e0AuUqUWrS&uN38mMZXrk=@ERa|b~S9WMfpD7oBC3Xcj zm*R^MqtM|75kC)2#NQ^hP5Y?=dk)J-@_93H?Sx53uw?-kc?O349WYbbr%hLn0mjX< z@c+OcAc?_Z4rd%#hLAi$*uTKt04^aAI!{c$I-TTb;QnUsc_ojY#l>Nv9>SYhW(Zkn z#~iZPcq7(bdWNV+v>Z$cw(}^Gt+##9h)62-iP$`Y`IKuP_r81WK|;x>umEO^T}6zq zP^j2Q8OXX1Xh%?u)$WlT8?E?}%t@y5>grP%x#mR`cN zD!s=q#3!hyQIY+-y~>GE7$DR-tHuy1uBagkH%d*g;aua@<$)ftp&wfgW8Wt}$_`+x z8B)U?!f)6zq%I9BjKv%w@=AJYDwAXim*RW!Z==?J&b@nZ#(;DNaTgz-sDJI%wz(ZK zna;Lwc+k1%F;0X2#NszV+j+3`gS6CHl?1~*WRTl|F9JdL4krqbn}GXAwCyXt3Kb83 z`nPJyZPSmIeD*Aom5tvFriq)wcgC5_S1^%iPmiazh3>jsyFOhljhgaH5S1)Di_7+l zmM!$S6(E4(xh3=1%esxq99WU{gEUt1_h%RP(#E9L^O z0b~k*Z>fCm{qw2ekahs?0Jw`?&~l&-S^;SVN4&midk8%DFgHNPFpLh-;LWB3Wz!KP zP99EgtZZ!HIkPam->^@F=LSyWI!mnCiugdN(1+`!Q}?k|q8rZ{0o>|5c{Dv;b)c;= zET3Ft-`^amWtnEz>pI|~JxHsj@%R?#m6Zbj|CRuLc9^gf6I#p9A3JE0q$1PT*k-)> zOq1l2|A<|tVsAK*i$x>VPu4tEh7DGqmPJ-O!L-9bp3xFK=Sc(ync#b3?!ix;EF9c3-f8Sfj6sYvSqDvrHY-aGm#W~e)Ekr1+k#){6{1MaOWzC^CL#{OAk6a^o3j6`Ccx8BF8~l zEZ`697oQe2oFJ=@(}IarB7|cDJm_bkqn}O({^p=$5uQ*nv^CmOhsCv19U6D_uip#O zdHWe~Tf`Qi(-d!_b5rZ;m84Udj^`K(9vLOaCi2WWp@RhX?jSe(fPF#A!GB!4un+>z z1No~!Hbd!9p2F%p<1V3S+182MnypbMor=ioyB1OLOQF5*FCGQ?TFK`8UrniWG%4n>4=g5RRwUMu1hIe zEm5p*2r+F8t%SIi)xb5(vtb!?aobQ%eRy$&FC|7ax0x3*pq&HDf=Uvo**Dfsjl7Tz zzrGqyJc^S4h8kfOe);zX(e&NT{~gWe5*gMD@B$1>Rez!nnDiYpPfU>u)@p$4|JIk9 z8cV@z;R!bfEdqot9gF=JQk4k33FyXQ8!uA%h=sfI+fwo=A+G|NSvCGA!4|DW;9)&A z{n~c$QHJ~Nk;o61FLsG^0s#=0`AJ+^Z(*kn44)kyN^f8^wdD9lA6rz*WO8)aKj zmQnre9^tovMi_u=Aclgw3X`Z0>_K_OsJPP-QP;(t1wGd?6|J2*9avX4PliRj46X?F zx`qn7JokNpZ`H5Dn}P6%2~e-E$*nH!=ETIfOMX=@A!HoI)w@WqVuWv=yLoYXLar+y zzY8Vu3{ma%zBirw*Pxe9@HB+)-+-`hM@nTN5<&;zu|JN_i=nYg zyD`m~pn`-WzOxwk9?*e*yhq{yfjjJ@tEv1fO!9g&yD17di0^1O4L7*|NO zzh`V;sEEZs&i}l|537|LHb0>zWuX4iv>2MaFH@ydmf-Q~Sys#`%Dd!m96FvbWaKEp zn(_1V-qSHilLm+qGQOyjnlp@)Q9w9#I@Ix*^}s8 zI+Ipy{*E%?RWY5oI#i)awCFV}*oZROi(Fzi=!Of17N1yXS+MlE3TeC$TG7AuKjqe~ zi(m8K6Oyoj8VC2`to&n@(_*8U&>XWO#bE0D>-|F#LXpOxCWwFCs{x%uNUSeZ!=y;2 z1=+LMGgOuevFg@_G>WHsw3yXCtOvI#+4L|>=rFdqMm_y!ogc)zQA_qm$F2gI(EW(R~{4Qre@k(C}qM-um1GslQw?Hiphp~mBobn4Sp!JcVE}1lU z_&t{y0yTraZGTxmWN<;6@X_Sf$J+|AkdH2IcHaDcfRpl8I)Sl1YqsxWu@sG(q>6C2 zZ<~b^v=@hJnv-Hs`Srb010!z>Dl}2$GGp@}#n-e7sbRFEQSCQ(W-d%Qkyu$pRpBCx!k-Aq9zKHQ5x&$~_>?KN9 zhKdMp`MibE8>7fE+3k{`?Ti+9D((GVlP73hzqpoEQ4n_J=R0CZMd^rhf}`C0Jcgz< zL?cR$Jz-bSglvi4BmiLq{aS>tjDQvj&du#>5PeH$X@_P+@LEv0K!fN`=LJ0dUvpU|u7EG0dOJehl3S!N%QdJ4t$+|ayud%A} zxl_amJbw`d~&>V{6?aP{y2lhE43d>y4wkz(sF)peXUaQ_l{-EHRVCu~=!_OO)r7#<&5x)M}y>uPeikB*xfI6j)7 z(&Hv01lS>^aB#Fil1U-2KO~Q&bo7(O5^%$akhvX}8$c6|zY^ayX!^|9yRzXHbqV4m zCU#Tocwl>nrD)fk+jAKvaaH0TYL`d}n!qlCE(-C!UDEJ^^Go`*;^yabl95frzH-E1 zA`NAMCZ3S8edhHAKiS~r>PTFtZuO0fO0S33TB;IaZ|30ZrF`oyuVWv~-R}DG2ZFr# z0RgwPBwr4CN&6&!3kjm4QWXo*Az*{)hk@uWKq*1$z<}T!Cl3it|CSX|2e&^&HU$S5 z@=1)-KlcBjbK9|YqO?qxC4iRj&raqOq9;U8>R)p30c;rt=j0!kB~pP30+`b*P?4CgKQn$w)w`TXY-}={;St`TL%M#EZWHgMTH5hl%12lz*9N^x9m$_;Na4hK9L9*i5 zqD@!Xg$6_?tdmlQMeL-hgg}QH>TfUlg5>%+dD8-!k-(9<*=}i~yCO&J)e)gui;JfV zyXBkFaF&L=)r-3!b5nRy9}B{Q`*2i%=L85l#bplOfXrNb&!>Q@gK9UoLZs zQ6p0XPOvrmvN?mU9hOm-FjuTxY3`TrvivGc9k9C|@BD8rpT*V|Y6iIO4P)!Z7E?6s z8Op6*V;f&sPsyYhW$h9=lfq6855RJhMQo1_gn-aCl+>P1OB#<0BWc{lSly=2yJb%6 zk+uqQ9CPz2#_%9n z#|?#O&T_CS|4`8#4z`JVCmLV*Kcst!A14@-WV)-@NMg|?f9$0=o@AU@-yG{~sc#FI zlM}v#C~RFu2Ls(2{bc_U3Mwcm!ZMy6-GdA=Hw<_%KJV9xQ)5Qd-nkcx&g|Vq|JMTG zZ(4M*B}$T=31)1Zvm$8wW$dg4KTk#OOka^XTU+gi4^%bnbxcIOH>rMfO_8!&u7dCI zgCJA!iFEiG=zo>m5?OMS%(4f?Jn0=2x;5N;5A$Q{NVfgo)38jpm@(;Qc6)l?dVj&c zoc@+z)RZE*Tv*?@seiLA>SM>grBfcol#w#(vzz2*>d*H88Gl(EA` z*qwC5&tGrvd9fH*lXq`#1-n`j8Hs6(c1ykC%ZI||tvhL9o>NPX3itt=+hMDj76Cia z4D3Ce3!F-8x=m8naiY<^$-=l3hG8P@(^m0WXF_~}u-|v*mDE!GflLK_hML8t<0h!6 zd3cZ~TCFRlGfQ?Ud_#6-F3H>Gv9T@T4AHsK%etGU=R}NKim$9B3ZVEYfw$bZoH&F# z5Y&KQd9C4`5zcTz`MnG}@ljckb6ulr^~GF}QPQ7Dp^~yxVzE_BD2_D{(AJ@Nn~+Q^ zeUa{$pRV_>im^oihYPUc%X}Kd!8Apuu~b%@W2@Xao9CQM-=zfA#&H#!N=DBzI&0=s zlpR0q^998cFk)-$?w$61)H7dMq+P(?(W76d-D>z6PmoR%q6dbDVe!_m?OSeD%%fiqWnR5w~&KGYP%1hn#)qXC`BJtFIQ;7O879-M#mQ`rqeSd$sLo}tMiWjGlT#~K-$(HDy2kYc!b(y0 z_qOu~ygHfQ@S;su4A26|*rhXRRiZ`6#)dBN`4c&nHq(b!zVHOe`gkb^;bIKKVAY4& z5rk9a{$?Uf!r9@RNkiLj#OBLX(wxg6jqLJn$BC}$yr{m)e^HmPw>CV;CPb;&!Ga%z z>Dqy9{6zq^~uF_sLquO>2<$+{z&AGW=;F%qxGXqBYGTXMuUwFKf z6?Yx@eCV*gq|>pq4s|kcyhN07?Td7I(ub0m%!o7Gg~yVQYfmsSDqDtWUMq1P62n&6 zavp)qiI=$RY12gnAKT8r5Obf30YEGrA_fHZddt(q8dl{u!|*>>Ldk+lXa{F6spsqx zE(^%4ZWGDcH1V;ZvB0W3Iy&0Eq=;JX2T5hTnJVY(_9$U~Wr2V>Mo-tMit(;Cj3Sem zr(mU)O?%iiW4-i*T}OJjf5n4TL_S-OTWCOm?0bQ$ORhSGnyRi>P&oO~)jN;ZauJiC zfhE->)I{L7gx(V0>wfM(mE6FwBbWr7D)tVOKS(bC9D~Che0GpFz#jy;Q=0+KvOite zdQ|LaRi^hjTZtc{=jKzXKYjl+S0ORgH(XZ#djBgeX6ZC3GsZMcVLDBk* z>Xi>a7ESoeqm&*j3N_XMRhzOAmvas;y(yzB!3xU0+S8SZ!N84Tm{E{Q*C8(q_NFwt(xK zHH>DOfoE#8D{xLp+o_jg^k!%PLh`O&3A4`^5$UD}^)8an2@n)e51^haZR`)%VH&#Y zbEnY#q3`ro3AfMO@B2*T2=Ur6;X06KT%6UXsJmzkR!{xMK=^`i2A*Vi>F|IhAEc z8Z;^mc%D}_tX{+*>niS` zppD$g&eg~w*-yu38y$!Z=*{55(3!HuvXm}uxyy0W8r!~MwPdYAn2C0{B=Lj%-LJ$$ zfC2z(0V2EslQ0LUFs*7EIb|p8I6GP!S_*LRPtpKKBP28;3HjSP3b);=`*;I0B>sjj z;oT^onpX}7aJlt<3AbrW86`Zzo*|Z$k6=Gk?pJ(fCE+AoW9HxI*hFpE z6RJGp?r8v~>S|A%Y`ddbG^?g{9{UH7cTmLc=yRfXg2_(gmF@?wMAeeCj|e4<{Dd z%V|HMEQPKQvv!C+^%5)jvXt<^0?CEM1q5v1KxoGK9`LJ67}JTK4_{8a_%!?G;%T){ z8Rcb&D++@ACV(V`^XGNlXILxVJF-spqhoVDfcCxg zxcs?e&s(rRQH)i{oSP0a-B*BvPw}ee_M@mHF-QjJpd7w%0CTu1Eb$6qHM$s_KuC~uteP!9H)>*iU(q{ngU7)a%=NBDC^ zI;<8@7?Lw?XR+1irD8)AXZqB~+VIs5^<@~Yub19m7JCJW*2wSS;niTY*yNo)1%YTs zQ?Dyy{a*T*iza^=8{Ke%0VHo^kuoU#mke#S9*oc?bUfOh*M7dz+HBjW}Fv|~mY ztvAOF{s?Sr8>{169{3W!tS#YIf(%=9k;{)$o84wuS* zE!Y+MyBq%##Q!Po8;(uHLvnLaw(FJsraX_0y+Z=K4nTs)RB(>^Vb~ zCcjcXdR4k!TF(<6HwZh`Q9e%OLamSGXKY$^9d$YNpP#F+|N2R`a*Njdxx>kt7es0t zSj)Tx+LT_uR1s<$*TrvTlhpq{s_-Ke&houw_bwG*ce!9ZYdW=>o_lW0d!(0H=8XXD zPe7IV-xyo?G!Hy(^PGtvIK4NWsAlf|y#I~*hOP`aL6R;dZt@pHyKaTI@ zIi2~3z=V^Aje6&f9-2@|*eIu!Bxbbskzt8(C^7trg(^l6n7E+2T*U6j8 zpknw>F@lNzp|Uu0i93DsQ=WL9uXw_BWv8NoGD_Q}`nUGVyqLmT4&~>*R|^M1Ix5?^ z6f%$_D{`ahO;WcutrRneMo-EvPAS=tNdtib);}M!uK#d*+Ga)hRd#Q@WB)jzskbci z!B-^U6Zr-A_eu&6dZrhZll`N8Pr6lG=Vb7nRXkaITl=}0K{zs&!k4*mICC3!?6co` z63gS>!8Gos=O^=i%)nOh$|J8)>FGFaiR=*gz$UgJQdatf)JFrP18LAmd8r0;`nc6j zs+2?dw>-F8fqX{#YN4I`as6Rvg1{M?45;+IdE;Sm4BWaoS`IX&&`w=@U&-rJn+{tv zT8CDe#jU2ETidF^PFNIf0YzH%mNu05tkfHwkCB)>q#djnG3vU|s_0Uf_c_(?D{ z^ zzY(Y%f!(31xq|@wG*Y=83=NN(BONo}VR$ImL~iscE_t^8Lw7x`Pf{x$k+;^Z>|eaD zr&V4lov^_#xF0R}a%tOJU_-$FtemXrjGmB=I13UDw1F5KkSa1L>1~3xv&h7VA1}*) z^cX`6T@W1tL$lKH@$tP;^L2ErTkxOUq!ylow9XAA>|p{GiM+(FfNd@(XdgVSxZA$T z`^9ip-gjoRY_amU)Bv)b`jO;lJFA z3slY``nqi7J4UEg`n3+;pJlh zpa=N+-h|!2+k9S)x>EgpSV;MTm6xqddVEQtL}l~J!U(gR@Yrm~#JI zvGGH)OrZ7c?eH3Oi8|MAukEIjwkv0cpZNg^b@&ZK1X5IG1?u+fwG_BG?JtFzpW95h z=ht7aWXcQORXnuC*A;G_=6>q1+Xqcfg)2KiF1R376I8$QAsy9qL+(%G0omn5>UX~Z zr?k6nF=ZIa2)`gTu1L2w($kpkRq!4$4D_0*`*^CG!_G3SI5c@)O6Gy0y`8BmW;L{n z@TsOO?BAc5MFHVnkj*N{g!E>NMInI{JEX5598OicO1X8aD0-EqM_CMd=OUPJ zl2&^&9~^qeriROXw?{^pW{y{zJyiJ9Q%=6Y>T}a%KUIi|`DOBYtU7EvzL-r7M16m^ z0Rw@f6JR9_Ui5XbRorq{>(D4E(iq3JTm2#YipZ%E$>#XaDmBc8CL!w32vzhV@{DIFTND+z!HOGGBC_e`j z7s?8E$|fPgq+tFroZO)U53*Dbfc6Xl$Kl~u(9=Bp&;O)+JEFFrES>!Az@Kji5DPK* zU6RC%zd`6bZaZP}Gg6ZT(X4<8zk}8-sI$JdFU*9A|A3;aV7iYpdmTDvl3myt0w38A zK=b|57fAiCd#$y4e5Z@^dGuk3EVFQ=4&X0Q%H)es6g7lihGyGBq_zxzx+q}#L6 z*H6W3(*gf>HY#p+H2cKIo>#_5Utszzkc7BHQodh7?eERw3sk3zQQN9^rEHQK{iKl=6yc}5KaWVH%N$|3hdq=ubXRJiltQ}^^HDR(nZfmPS zLhFlQ&?q`<3u`->d_ z03H1tf<(4ASE0DG;(u13T^cMIfZ$68I!GH}q(gW$$d;PVf~NU1NPSH2e3m7opdp=j zSJvs_mk9gzelNQ3!nrRDL5KuX@6tPbFN%Osh1}uLgJH7Esz((a?SC2-7Z7i24m~{6XltJPsAfY;-5q z|ym6ocYKf3WP{H#rpTHpw@oA{dSQ)*65&^?e@yvpt>(m9u2#=ntL9+m6KGU81~_-(9!r z57!{W8N{UDYyYhO!GOHwNwCIZ)r{s6`jpS`$-vCR5^D|D#Ye~C1a@==64TJ23-EJ(xdS II*yV555oKvumAu6 literal 0 HcmV?d00001 From 7c695d01cb9918e15c6b23c43fec1c1f33b0c62f Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:31:46 +1000 Subject: [PATCH 17/20] references --- recognition/vq-vae_s47036219/README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 2d8ae6cc8..85c0636cc 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -23,7 +23,7 @@ It is highly reccomended to run only the 'predict.py' file by calling 'python pr If all goes well, matplotlib outputs 4 images: the original and reconstructed brain with the highest ssim, and then the lowest ssim. # Data: -This project uses the ADNI dataset (in the form from on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. +This project uses the ADNI dataset (in the structure as seen on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. # Dependencies: @@ -34,4 +34,10 @@ This project uses the ADNI dataset (in the form from on blackboard), where the t | matplotlib | 3.8.0 | # Output: -![Output Image](./output.png) \ No newline at end of file +![Output Image](./output.png) + +# References: +The following sources inspired and were referenced in order to complete this project: +* Neural Discrete Representation Learning, Aaron van den Oord, Oriol Vinyals, Koray Kavukcuoglu, 2017. https://arxiv.org/abs/1711.00937 +* Adni Brain Dataset, Thanks to https://adni.loni.usc.edu/ +* Misha Laskin, https://github.com/MishaLaskin/vqvae/tree/master \ No newline at end of file From 2260b31c9b5865ae446fded014c982779b1329fb Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:43:18 +1000 Subject: [PATCH 18/20] read me overhaul --- recognition/vq-vae_s47036219/README.md | 40 ++++++++++++++---- .../vq-vae_s47036219/vqvae_structure.jpg | Bin 0 -> 70849 bytes 2 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 recognition/vq-vae_s47036219/vqvae_structure.jpg diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 85c0636cc..bb8932db6 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -9,11 +9,33 @@ The goal of this task was to implement a Vector Quantized Variational Autoencoder (henceforth referred to as a VQ-VAE). The VQ-VAE is an extension of a typical variational autoencoder that handles discrete latent representation learning - which is where the model learns to represent data in a form where the latent variables take on distinct discrete values, rather than a continuous range. This is done by the model passing the encoders output through a vector quantisation layer, mapping the continuous encodings to the closest vector in the embedding spacve. This makes the VQ-VAE very effective at managing discrete structured data and image reconstruction/generation. -## VQ-VAE and the ADNI Dataset: +## VQ-VAE Architecture +![VQ-VAE Structure](./vqvae_structure.jpg) + +As shown above, the VQ-VAE is comprised of a few important components: + +- **Encoder**: + The encoder takes in an input, represented by `x`, and compresses it into a continuous latent space resulting in `Z_e(x)`. + +- **Latent Predictor p(z)**: + This is not necessarily an actual module as in most VQ-VAE architectures, this isn't explicitly present. However, it is useful to think that the latent space has some underlying probability distribution `p(z)` which the model tries to capture or mimic. + +- **Nearest Neighbors & Codebook**: + One of the most important features of VQ-VAE is the use of a discrete codebook. Each entry in the codebook is a vector. The continuous output from the encoder (`Z_e(x)`) is mapped to the nearest vector in this codebook. This is represented by the table at the bottom. Each row is a unique vector in the codebook. The process of mapping `Z_e(x)` to the nearest codebook vector results in `Z_q(x)`, a quantized version of the encoder's output. + +- **Decoder**: + The decoder takes the quantized latent representation `Z_q(x)` and reconstructs the original input, producing `x'`. Ideally, `x'` should be a close approximation of the original input `x`. + +The use of a discrete codebook in the latent space (instead of a continuous one) allows the VQ-VAE to capture more complex data distributions with fewer latent variables. + + + +## VQ-VAE and the ADNI Dataset The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection of neuroimaging data, curated with the primary intent of studying Alzheimer's disease. In the context of the ADNI dataset, a VQ-VAE can be applied to condense complex brain scans into a more manageable, lower-dimensional, discrete latent space. By doing so, it can effectively capture meaningful patterns and structures inherent in the images. -## Details on the implementation: +## Details on the implementation +This implementation was relatively standard for this model. There exist other extensions that could be of a great use in this case, using a gan or other generative models in combination creates a powerful method to improve upon my implementation - but this is left forr other students with more time. # Usage: **Please Note: Before running please add the directory to the train and test files for the dataset in 'train.py'** @@ -22,22 +44,24 @@ It is highly reccomended to run only the 'predict.py' file by calling 'python pr If all goes well, matplotlib outputs 4 images: the original and reconstructed brain with the highest ssim, and then the lowest ssim. -# Data: +# Data This project uses the ADNI dataset (in the structure as seen on blackboard), where the training set is used to train the model, and the test folder is partitioned into a validation set and test set. -# Dependencies: +# Dependencies | Dependency | Version | |-------------|-------------| | torch | 2.0.1+cu117 | | torchvision | 0.15.2+cu117| | matplotlib | 3.8.0 | -# Output: +# Output +As stated earlier, these are the images with the highest and lowest ssim scores: ![Output Image](./output.png) -# References: -The following sources inspired and were referenced in order to complete this project: +# References +The following sources inspired my implementation and were referenced in order to complete this project: * Neural Discrete Representation Learning, Aaron van den Oord, Oriol Vinyals, Koray Kavukcuoglu, 2017. https://arxiv.org/abs/1711.00937 * Adni Brain Dataset, Thanks to https://adni.loni.usc.edu/ -* Misha Laskin, https://github.com/MishaLaskin/vqvae/tree/master \ No newline at end of file +* Misha Laskin, https://github.com/MishaLaskin/vqvae/tree/master +* Aurko Roy et al., Theory and Experiments on Vector Quantized Autoencoders, https://www.arxiv-vanity.com/papers/1805.11063/ \ No newline at end of file diff --git a/recognition/vq-vae_s47036219/vqvae_structure.jpg b/recognition/vq-vae_s47036219/vqvae_structure.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56d6f189b2ff6e0f055225f5a32cdfdb6ade2e16 GIT binary patch literal 70849 zcmeFZc~lcyn=c$iML5Rh4d5R!n%5J?n5L8e>h?Y@22d%ymp`+e8C-QQYWEUIdys&?(C_OqYy z_iVvu!5rj(g{iqIWXBE&XaNQ>$Bp}!);QlY?v`#||E}2{H`m1$t``fw7-%byV zDJD&<Yy{JrenI6}W^EyoX5c-Ql!zOlXHBWT)f~A;}$rP6!ly`|cfo+y3?#xZAN) zXxHvN!h1zT#lRZ~2Ov9l2np@nCA53@U$5B_30{ZnlH7gp=$VUqq-<{q9}7Hm_VKfV zy~>x~v`O0yGmoFU9rQ#*R7Uo&oV>~j)st%KI=XuL2Imbg|8~XH?5eqiy@R8Zv&#)v z&pTdsy?qeA!6Bhx;iv}>pGHN;#Ky%Zq@`zMKF|8|MRs9PaY-q@th}POuAb09eEY7k zy`!_MyXQl1-^l3L_{8MrDKeEtpI`X8_-$#K_2cLI2743O+Ww0!@O%CXS>XO(DEq(Y z0zblzox64k?GpZrt{pqW{~}y+*Y2Zd_8h!uD|{e$)GdkaPZMUDZIRvD(H|7^mQzd=2i+r{R z3y|WgmiIYLL1c%OuLh#hokBN33)jOkekX^8TMuZCH+=yT`G-00vFmQw00D&SYQ{Z86hI35NH+u!$FmaK5CNpW z3cR9ig5+2#2p}-hk31;f3Cir56$0;@=3oU7+6Zi}gv66^z*f8v=0UIv6Z!&3RxJCu zJ|Bx+FobcdVARxRVFBa{ku3ll>?+2#Sz@ zF#dWX@u~pwX4~a|U_S}--952eC@jJgyST}>F9Lhk@$Y`D)D8Pz1dx{!+{SLOA58uTco`q*%vkQ3%|~D_8u%0e1gK91)WENKB@?<_dQkxR6}lCg40eOcD*+EU%9eGi0HPrQ zc&>pI4*T&)0tgEMW!h2riR!TNYft%)U_VyCQRwgZpTYQ_!TA6A%)t2zAZ;f@Z6tlM zZ}Kup@y*^Wp`OiFSzZ}z?j>&jI!Ov`2Sn4c3A08yZ4@y9WbP%bezPMrcH5+hB|)<$ z(Qt|Yo-Y}&6Xn*CFFmR&Aq6D5jLVu0*K=;vc)5j4Z|Qki=Yyp=?^tdBvHpe#AWU=I);WeVf5@3H`cwd^(c{%1nMriM=@g!q z6iC`mci8CXG^uP~bKEi9w7}A_efW6r; z_cG1qDOLpS$rAqU6ZcYpX>7%^$B~gz5}|h7Iw#m0%a)suJ1&6SX?zkHwG~u^9`E{- z=Kb{jZPxuHFXIcMnVxc#=hmW2mYH2sTRHq|JcE~~TJ zo(@~xU_$Bp-{C(a3@5v=Phnzw6{NgJCOdVtMc;un`9ueH$X6bd!+!Tli{IuQUNI2h z&9039ZZdQP6Ef1@GJR78ar^Y2H-Ae@s-%8%%J9Uv^i>-SlteVCp2(bGv~)R%=DGgi z>?^IhYnEYZgOh$$EX)Ct1wHgUG!!A9Okyj*V&Q9>- z%krCiht2?toZkS2xti4#);V^vAwCxENX72po?-QL`W@8JmhoX5hNpTlW1|S9LsLLN z$E?Z#AE&t)CxE&n*JXw14q8E9rtcx7g{ZH8dZo3l;(1+m!GL#2LrZ3RwZ291QWGaV!@ln3 zms;ctco9JWDSn8J2JJbC9*Odvxf%tOS=U;jyMm&Zu{O<0p;Mw5z9Zec{8jpcuzOmM z=BH=2?01asIp=n%TAj!aRU2Cf%Q{Xsai03I%FQPQYMo7fS9FqsEPd5fS+c#o>Ntr} zMe8zKab?T1CxGZV?EWQJlL{S8xcwyh#>*vnOlEcM74>iAWCo8bGhtYHMdz%z$@0}# zeU?&jVU03&HKro(CQ78`f_nRkj}>Wdx%ri7goI(-c1BH`n_3Y<(!!CA!!zjWZ|Eit z3Nwun<;9ZjtU>u>liMCISn8AQuHG@a@ck%d8`i6IYSU1h9$%Ri8*O|F-r@RX!f-y^ z7Qt>+e+tiz5Z87{u@v+FE$~Z8U*O#bnZ`sRXZbPY$&&{5j>^K$`6UJsX?50p=xx

vh-x9~0HlTNB3m)k`KvNBrd-o;nQc zYFu!$elho~An#b~>ueg?EXDy<;*jy@ys4RJhx9ko``LC;MFuS;GR1ksI;(0ZZYEeJ z*F77-_8nrulrSdzDLAoYlY2;`lk$3THJHg^Ta4$l2_SX)(J-Gp?@j@vK83`>c6>!z z1$S?Hh_sG`OygpnD;>RofAZb^7;Qo5sEWW1@D+7*BE zrwSlMGsZJ!7MlPla8$8vzw(99`9wiY|WD+cHqHFoiM7<$@etyIOXq?GTxQxUO z{bcSh#vy(fvat->vPeQkK{HX6+!HIl$ZjM2n1K#)pqX#RiF4&%jPhWPHo}JN${)%% zX0P?K+=g!b=+DY|ev>rd-H(|hkf2%iZkoC{YR;1{S3$hMb z9mH(^BCVQmnX;c3Bh*`(k$yV+Y7>5{-_^Y9vQJUl%PvMMM!m99E?DPVP*9o0x%-HB zx*FwvMb19o9AHx`j#)b1)*-aQ@l1TgjKSBRrvM`$E}6z_49AGEX^tA_m6wfK25uB znW`+ejdwa0sogFzm6mpWV-dwGzL0x&TyJ4O!>Z>-C8u_zuRGLpZ?C$)zwsLk_W}Hz z^Fu@lJFy+#s%fv^-wl0AjJGdLX~UFt7nl9 zOmdV@tCyqxIRE>vb{DIuuF1|S*42h7hUS-w@6!1yGUTn2pKtd6P2AxLqt5G%&fLf_ z6KgwmNvzahJc_$wKBg{I!~AGfwmOvosbRrM(y3Ci-BscZ^yUO#9x z+uP*3+Tc#PeL)rei9jt=lyDshEhzT2|3)q!3Dvmp8zQ@_UT;&@(5ZlZzLXZj< zUCYdDKvVRE0FrJpj$Pfr&XvKoe+H1*q`iZ%dB1Gz=N`~GE!VYq(=LECfxh)h7nG+j zwO-mxzyFWLPXr_XWoF}I09TsCw|&M5#ZJ|6?Lk}H|9f-pCjYl79Pja@05Y;a7s{D{ z@>@H8UdUogfZs%smjs&SVlza~$uvZ(3=my@BF}s0XG?qot^(A^GG`B8vkr)}?O*XeTy;V8 z(6VjyDdYQxPmWm^+l7)7t8YY%)cYE#c*_*0^jR05{?vk^A_jvqyp_UT>pWH(V=B%} zA)z{Ja%ME#RLxvXv@vM=@7e<6FLbm5(81KFl46a|GbQM;409Hg8v8y+{0`a*z;cYy zzt-}yRapD85X{L=`RAD>rbH95ajiu|OhK{F_6eLUbY9eq%Dd4{w)SX`~+9*(ZrtuN@ijb+&u`a^{-9mZK`R zvPV+g8Zlr$U+zOtRJd+MkdAbAQ&lQmnS(NRFp2C~USg{z7mV&avEk~1`(UJpIKW|Y z-a`*GKcK>=CD^wKpIXnLMOcLXZxl(41W@xHb7aC;MYT&?1}TpYYdy(~?CNDflhQkt zq!Z{CwPT@sl3xx}10Bvh2t}VTvsBA8c2sxt?SMYD?YvbHLGW=X5xF=~He!ztSI=dR zFur8kN#Q-D&jfXr+k>*iwDNv#Xwn$&efp+H-^k>hlJC$r!mIWieatA7K;%l}yRr4~ z4heB|U;}fJ(NDJ#2DIC-QdRa{t-F|(%dR4$pv6+J=|Hd5vCYf&3bTEW) z05(^euQ1-MTmocke|F=GDsZl%<$0N*0a6{jCYr=kxWp*?1#qXN&tZvi7oG%#r<;fS5f_(H>AB11gFzYdj0ffo>%+byOADLdurR1x z2#!p50U7Ei*Xb$Sh?Vsow;Qz$15|}O%9}0AxEN+?vau&Mv1YGC@~iY4K5V7Elg+TQ zY2-jvYpQX0mb3j0E!=MIi7!JsXRrKt8%JO0+yKpvr}&M$!gA|_ z_Fosqn_QO%H&cs*uXr)p!{sM=zD^Dchg$=@CtsMmPA)}ka$w%*vpg_BfdmyPA+^nh z+|TCniuf{Uix%FCQ|SAkQ`#!Vg#hqslrVK7mM#&OtN^{;0=EdU#NP%X6`bt@5+JSgE6|$!M-jR@nA#0VJ6~ zf%f2+AMy!ROWO^Vp~a|k?TuQ0e!A%6x5#{rx*F+hJfJEImwMcD_vd?4=Md&-@k{_x zG~71S%W2RqEh87vex|n3aCtndzKuFMk9SRx(aoMM%xdbJ8C|d)J`RrXN*HPg{D@wp zcmd=f_IC*}RH7+3#+R2G@m-W7XoXLFLmx1VV`t^YUg713z*B;_Zj;7mkSr-nUa2<7UZZq} zI1a!3y+ymdafs0msxcuR=;wRmrq`~4vNy$8mD$4=aRw&_xyJ!8QjJ@;k>)A`)f{^i zzK0FpXN;h?Tr^gz^3C4`NGFK59?hc?n4q=hsHzeBp7FCq=V=qOr)a5 zdygY5@#N<(3Ive$U}}xf^amXq;URj9;mDBb#4+YZN><78C>lm+LY^z@!UhLoZQJMG z=4iWIHBCbP{w7m37XN)DYQ&cPM{x_;ztH0BeEppz)yDKR_PECH8WE=Rbf+}?=e0=Z zz6E5fb463;yQ;jZ1?H=#H7k>#E)zf=VRsy5nGol2v2emBSBBmVtAqB~90agV?>_0C zVkwk(NM*bJnqG<3Y!4!7jjnfz&-Wc z^5Gtq$~C1Ef$53cE!V6N4o}h$J>;O1#E76-t!1;{(za(Tp4(*}5v|@eK52Yww`_n0 zW%ADI7c9~^7_;obmEl+|6*et4;ij7g%JH3J(LRNv-UD-CuGO!l<-nEV0?ojennHd@WpWh!>{84Jn|9bI1&Uit&K1J&ORtyArmIEZ== zoH?2zRpgGoI8ntAK&Zzf&oB8OKNUXtBC74y=`GTM@uH|g{NtiWl}~L;NuhS7 z5|W1bgVTm-9sC>;@7C3H-SiMvJE>9umb0(RHwdXaM>05 z9w5A=0p~?)dZ99r)A?MuGwo(;sQ}`O)g0^etW7!Ms4>P)ACys8IfHGkqER2)F3;TGt6smt*I`A|*+GE~@jWkA6kKP&$8I1v`g~A^?7-fE z$FR`H!NFAcM%iZqEv*iwiPUQ<(H&Y1W8<`|LS_sF5RdWD3t$exew^Tbm)~zT`g_Y* zs2C@wGP5qKUs-w!LlJ(`K6jF{Pv!7%$;X9$g;s>_p+!|yH47_KeZK}q`MhJLUH!Y| z-a#h<<;w8!2GIr7*#$n2+j|7Q{ZD2H{xzk56^#Cuxj0FfVYra73dag93cTT%06%S6GU;H@(iLP9wzU;|Cb@!a8tdRZ^+mUk7XXbqe?JS&qS6Xu2TYb2r z%t@}~2y71!{3|my8a9W=F`ZH+P;TvwCo-ud@o(~Gj0?=KJ?5X*#rL<1pEDa??>}j> zEgJztYlZ`t`66mm4S2jPm@dZ1{}m|IZX?Ah88C%A zv>|8|E1(0+J)4NQW7#n<5wt3DZfGw6=DYnU@g}lJHjY_Odd;w#b^xQI_l%9$S?|Wa zuZ9fhUmquE^jCte^Nq=(KkvFcBp_@{63Q)FRE*)TW9y<_2OUndkhD^Y4;d2r)y|ur z@?5x{Cn}ZXrDkszm5m?#PFFCPl_hHr&rTW*J;E_9wWrqDpo6K}TzXxMH%DqyF2;y2au_imM^xSeNGN`AWnDdj~P2(Hl z7VIfaJ2#C8@%!&i$ck)y9xad0Jc2%g{8a8tzMOr1zS7aEXWEcp?;o5y|0TzF(l%+V zYpilLH`C;y7BkngZ#9oDlfJN!IAwZf_`yx=6YLzAl;3H5fpZ*4XREW;I^kpq`dXW& zlZ|-l-ixdnnn_L-&@?xb`gpb8G9Y%_rfo!lZe=><%qid`ma?;==niB4r?iT) z2O}y6_J563NqC?oV<5LM?n`^)f(VMxjjJdzHY_bHKV`f$Qn*xMt=187K_LTr-2C>k zVPCVSb>0R^I$q+*yC;y3wK8+J#QQ8&p|!Ox5UCE$&&MgO$k6<<-ED$J8)FmN-BDUego3BmSEwT9q#7|t!xqv;Y@mwta#+REHOHstGI;8${BWlx= zL*ts7x`$81m|N#PG)OYlho+*u2V!E*)~nkW8QpMPh*n2$*gbT}Nq3um;h}|kX8?*_ z(}Zk-nT5E8FUOte(^{ctbC+OJ4eaKeLYwgNjNu$J)M%SwPZGh(o}CYjmZL*i>q_cl zA(jl-RaRK}%WtzvZ%3Pt%A`|g_S0>cv>QHQX&s2F8PrpIw{k7TjriL>1y7sAo|HtD zCze|~s+{saO7ks$OOQ`F6J7qJUHsCptNEbwtZT8ST~&o?#+|~mtqsKcbW25f?KT)gBKi=su95yVgrXZK)jr|n@7IO}CLn^5mA=;O z+SQ_p)Nwdyq_y>@J_5}%Sd24Fw)Gr3IQ{b5@e@$VRuiDcoK@4waMHBA{r?c+adDKu{vTf;IcBRwZ8=6N@%DqE6J$4Sb;Pwzjcgt>$J}O z5Z0lkcY}v>t(B#19q(t6;t*pAw)owv+oosj!~D*Fn5agK-_NL)wa!)RdA?$S6?;gE z7zo<5b%XAn1qbO6U(}MiG8AQK!!Y9bp#s>JV;dSx1DDwb!zSlluP}A%ku#=N)<3Bm z(F2sH-&;>2u={GhtG@HQ+%`dylrf5V`;1N^-bVJLjbaxAHgwgBp!na9S7)c zf+oxj$l@4t&3U=^REIIUxVLyYTze#QktW-L6`NVoYgAc#ygv0^Co-a=5&t+;{A_!B z_s{n2#n=r6-8y30v`wU;5$~$UrsvsQy)(DSnP8UkSYSKtbUNG7r>*PCsP#x<>6qR9 z+&k5U3(X~`%5Q@PYJAyPvh%|~DsqcX2rvUSzhyd{JR*5mpQLZnLOr^+})5V3mvqPWfC_Sq}=$Y&uzNd3Rw(XB;xcOYN z#QRFBrnb%7$H;>Jc2x)VzMb6sUm3Gmt~LR`H`L!u*|X zW9r$jiTPS~QRVHK^6C#ALSE9ok}@0S$I$6SEB~!XjLm0N>H?neq8c-uF9p=FKyPBR zQ$hieFFwN8MC&o(sXA^FPvG(eKm;`z-mWBqqs2bifbR7rH@pd^CM&pP>O3DYwtsmU z8NAC`L&!Y10_ziE{3_w2%Af76>Y1(yxyWI>lZJSlL%~UV_SlS?#m#5Wa|uViUu9(( zN8a{myM7;kva7c}D3SfgS6{pN@Y*I<0faGa_Ej9t-`mghJ=q*SM~NoY4>5|ELhJ|t zSB-33OM!V&p2Fnnu@a!ckLx-p$8|0N?)KbZYR947=ivK*_6KXzT^{mGd}7GV3BLHH z2yv5X^VcrbqX{=J#PS%11|NIGqN~*`O2s7Nl1?**=-2I{@YYg(Kf9%s<=l(YMhvU& z@usXZMG3!D%M*TCxL_i)I(CQ7zL{?FiWR#m0@|86?YLTrNA*^)PPjN)j3bG5WkS0p zo_HJtBFmP-TrF0x`|B0MJIFH8v|eMgLKMz^()dCsf~LS}39^j5Y{jyEQ^6Sf-h6#B z7;YXqW3SQPcUaWczQjhe#-dgJP_Dy+a?|TczQOH98Ll}&bx8}F?rt;vhiU3+PU*fu zH6oXaOInIY{caTJXNOo`*^Lcn_^w~u`N$9p93KK8vo{x(H>&XuF&TBQne$59Lg##wt;`FftjmxbaJE8O{88zqVx?-EWUniKVo|j z?^OXjTW`TciuPP5FxkYwTd@;9Jg-~l*WyCl!*|P>xLWv%2B+P8{WaZJ)XD-~yI^H% zt-rdk%ByY}^tYTrYIyYeuM!c6(1;nO^fJojlBLb+hOkJ5;?Cr zrLuTA%)WRVQ&YGiBO~9PSV5jexVA(;FY*dT?s3o<=^Vz#73kV!x|Fw@>xOXf2vhSJ zi?cU}(*nbMuUH?%pTFcgl33@qvRsd>bE~$wQXT*I=&Aqr%r^etgV}`VLCIsue#Luj zBEcWWy%t^_<{bMbfQWARVcCs|?81@IExyJQxn=MvH69~_f>CFiih%E5*DkjZ#s<^p zEF~m?oGw>l%qq9CM!q5Q7Bwf*#yopVTlW3Ho6lSmBaOB>GFN**>x-`XxuW>W8-rt!i8qr^Y%Lt8SNQ%w9On;ABR;<$7C?Lg zrW4sFyb3TIAb@~IMMG>VMjZ5*!O$;~7tsmZk5NMV0$xm#J)lXWL{k((st>Mo$-|yB z-)0&?t-<8vH3-jDL*#8`F`}X(RfL zvJ>lt;>ylpcP`0d_P?Iwo>3XO>9a|ae4o(cRu?nfrF0DE@y^dQ^sQym*`X@Ba-Qeu z@mprn@N@Y}$*yC(D&^?52*VKi>KHu&@8%7~SX;9-Bzw)NYE~8@)L8qBV_R2Ak;_b-m74^4U2eQeRVES`jqFK#rI zi`EV}X;$xe<@}A~Sr_VuLrYD)eZE@Zn|6(=*)dAVKBJY4iiw11x`^ok zd_>c`sdtvWh&szv?j-XJ=^lIvKNriD&SJarQZNcP(AK;vt{5j6p!dM{a4%G$vf7jm z)iOzK66ER2%*B*eCA70!O`QB!s(Vj^<2Nfz74-j{;e4m zz}TmxVElez-SJ-NslJk%#$JXYW2ZlV>#kN8Kw2`6YW-H>s^vOp_$^o5H$s$fIuvQ% zo>XRf4q;(6m>8IGVHTmmX(TThtwi*IuH)|yKmh=sg16!JV|Kb?+fHKCedM8W+;Fcd zZWyl+?ZxrJ^tPU1#U_HOqH6z6u08K1Mw#rBuRmsNPuC2)tV@a0s`ooXP;^)_^h=Fy zHmNo9E^Twa0iR^*e4{#`n(^w#5YyHIuWh1zCr zCN28P(S*2X%+ET*$1%y_HcrK$AuVAkj2Iq1whjkQ@Sld$*QP;~G@awtrQr@-rlm6E zm_6)prV1Z6tSbipyh*J?8?s_b3eVFPufAoaG?gL5vHl)?KOLCg3;jx-6}%0bKABkg z=i*xi)lJq+#C#pO!)(eSaMYzSEwC}&`g#O;{x)25pQ2)kXmGj8^t4U3{?7&HZzExm z3mgsjuYrG$L05!v4|?;jaU8)c%Q)y-z67y010*Dwt85^EJZnG*Af878_{HNUJz{)D zEC}sBgkAq)Gm`x?x&0q1V-gZf{x^{f(Wn)65O9M9rN_PxAS-vdIu(VpF0=2|1Jzzk zyaRC}R7?|QP6URj1FG$<7u?Ovh%Zj*mX7K8N!i2<5Dwopyz{1+jd7@L($~GIBKkI0 zPb<*BLDJdbc=1R==cUt?D27J2`eaLEXyAyhw|uA?{=4J+q7&uWtX|})>FMAZWbwxz zCjdp}8eg^;R`1vP;=wJJ8($h!W5OF&z8&^0gf}eJMguu>aYv`+vb8M!LqB1RTR zM$HLo$9TmpxVjeVz+e`_bpYSkI9zRC8*qodY?%D#SO|w<{AKor*osl*xd-^0x}^m^ zS})Ti>Oaq%)bx9>M*z{ApkDOxzB&)d+)4CZsw9MrkJdRa=QkjWh?Lv*bv6-3Q>#21 z13-ldrw*;Mn3Zkt-Fzt@F=|aLf5*t4XD<`Zu~1`c1l6Tvc8muhm8h3renDMIzF~Q$-<$iUpkBuu4X>d7nB~9TfC-b zP7IrJR8>#U_`0TML?jlP-#DI`X6{oz(o;S$|6k4J|76idKM6Pmk4K-M8~W_O(yerQ za@|H0W!a7+Omd}cI$PCPHu0fuieer4^31Q%poTliOAECMfKP6;fMAeo!|PTG*9h;% zLfxHh8ynt?uKK;w`u_bWF~(eqq@Jtx^?{#7da~jDp1H;IgBGWwl;esQ&i7fFo=LFk zvno#WBOw2zNj+C;zMbQr?2zW{_tyM)T9aSPLCdvw%uTK_4opM}AZD8!X)Mj2yH5c5 zx=r%oPxfKIA+oSke-OBrg;U$OXTuY62IF~-u)j+1dO;vW9&{axedEkmfxRE%2bBH& z6S3oOVC&*=kQ9f1siu=q1*K64c9_TSf&&=>h!AY6wTJ^(6F>?|-35^7w*g?)o)d_< z{-lfLO56XfK@$6m(2So4<+=e_#PsGai((3d+BD z8|FCyBxe~$_<`G&_o4u9F@IIm{SPJoGbaDTUH>iT2@XVmwJ91~QitUD9bv7fnHW+b zs;1pTgZGLwSCX&7BqpGBD?YEZ9${(6>QVqTGlpM)khnNJFpjh9VHE z89F|YQs5aJ5RM4cZ65vrCTqXeMPC~_TW%d#ZjIvJp&FQ5NJYJozUeJ; z7)?DD@Wufk42mv?r3womv7x*y28neP6sT)|q18l4TouagpT;D>0*pYvgwfg}i-x9o zshlt5!`aRMpky@KdYC^-A%wTpJf<874G{Z`n!d(N?bMcW^LaS47PVnzb7MGU z?SR|V$Eu;yS>IO0$y2|)6-Itp3V)XLXUZor>|O!HkMs!FfbD=Mk+W7sSetVJJpzal zN~j||%t;E-da9|FsUO>V42Zori3j|=SrYc2bgGfg!(5}v6~oI7_|Q_H>~6eFs#p0J z7esdSM@h294!$prwcnY0Xq2x6Vo>&F2_WLK98D143}TWxjIkXbNYp@VdnYg?fb8{V z!?`*zt_GX{!q-5?5f$biqyS)UfLTYPm2*_NN1;qG4C;l&gV*hOC1AER0Rqb= zdWHWUfFtptW?H`^Ux@|(D?+I`+-wWx;g1i0huxL{)I{En z5O`WWE~DPc^d9cP;fvWlwokrJ*h$Y^MCFLC3h!%czPa(1s%Aix-T2ct567$wBQ9=2 zZQHLW{gh|rda7*5XPc{O)2RVNplO_L*5Az5a2L2y9KU*r2mL+qEa zz8_@b;(MxF#M@6%$9Z161|;q2Fi08So^D>%!W1MgZw)&0>Lur2c9P zOV#qR+03IEt_I-9xq#|se+fnC!VbfiZJy7RN50QHJ;V~>tosaL!}Fmy*v9gU#j!c= zAyRaF=O~uK4z=65Vi)$qZ#z#B-<1le3rE{*TN62Zxj`}7?-aN&fWtXG`{gC4JhZHh zrO$p{mqLx%MGR{^`0G%kOZ|uI)wQ9Qh%lUOzh?Sg2dKmIII6@R6IK046b-1 zd%3l-;LkZNPuh-c{5A5h&2}|!Ed=dt@5*QIdXIT&FIo!_VN&* z8ZvT4A|C5Yi6M#j-+gw}Qk$&6KM0*F1QUe^PrEQYI@nLZlCs&JaOKwtMmK(O%gT(h z6v=$z?4WL6R%3lXn?IjH(%4+v%#}XrT*z=rcC|q|;7^jcFU)GPob%yV|3Jp+Bf7b9Z_1b@4U`u=; z0cwkkOBlJETwUG^>|Id(Q6xjx4^5Y~o#7 z7Sqsh$y&1v3=Lwn?L&5Zm|ao)@ZqIIDmobS!sL06TIH98udqmra*pOVlVe|&piv9J z3X{Cn9d2i~NqfWe&!cIO4lPTm?kIRO{9VQFS9KA`->q#E{H=}?{id|t82F8Xub@ZO zW)G@|L~alB=T%;r$=NU1eqq3tbC^Hgs?1!N+(hHzHc5MRu+g4i{LXn1_^P<_;(ERM zj}sNi2((UxE+x&0vr5wcVN9Nyu)Vb0;QhFi-t3{C3hR?h&)G5qqg2m9Oa$|~c2XmI z9`FOd(mbzXttbH4oNU!Z(b;yuMQTnGZ)75#^DD4Oi~jS74#^+5aqmObE=%LWtD`P+ zZw_*fkZ2jsE1@7*nsl6)mF|`Vgr#8_gH>5mC9$2>{Pg)`7&o79%W(;Z*6VW*Rd)T7 ztiXQAOTz4TgKZ~Y?gG_TY`0B{Az5wdM8Vve7-kT+yB>*1H_&TlR zNs3DL2v`EbmV+%&*ce`ltm*hEI_$?l=vpcklqx>%XZZ_89>k}oH0KReug2{$n>c?D zGwuW`b7|;6{}0ak!Ve@W*}e4jmb-euR^@HH^J1^4j^@%J&m%^02$ZJUopk6SYyft; zhNj;8k%_=b@sD7J*sBt=F|+U~eX8(#MW>Mi`#i%04!ve z7vuOR5b^Mh#VKm*T4j4mE28qjmmfpqr|eK34zrJ|0-Oge6zrUH+fcNL8DkS5M*&Udirr+j|2xpweKUz?{#?$sG>cK>a@3){a3{7M{(s0AbL*p7!Z$iCf=8!YI215Clog z54N8)SA-=z&kT^=u94uHGS^m#EL=xT>Ratlr~+o_AlmNj^rm?%Ew3QcQZA`>^|@A= z_h$mjzDSoZ*Tv%wME3eEr-g!B76qj_ z_aQH(Rdb020y3bOev-6_0;-T6z~!M9-(%}1ZIAhh%!d+co93K#uP0h%fuiJ6 z>FsvVar)tGGs$SBP=7ApM>>bRSz;$!9BJ6d@Eru3-({UJ&2Hh5&^mpsXL$)GI{|i| zvHuuf1oN5vdD4F@a#2YX=-}w0^@c;1_Df`e(m~ALA^9o|=n$0a)aH)+IT2GReW)D+k?WxXy`D-lA^=$NZLH6&eg`HR6TVvFK zT-tMx)rH?Jj(s@y(Bem%LmFt(1mPWdV~e(1pxFezM>>Kno3zTpb;0Dgvd>U&L0EAj zFCP>}l&SWS@!0*|VyflJed)f4S&kbDZ;RI<|tScDQYqLAZ z1;Jt}+qu&BT^iLlUcXJSZn<`Fw z`Ep!806~A}6>i{=qAdpGI6Ny>UHI{y(zNi)+05z`1%!Y&hi^mQO9Us2DGmUr)qeoWR?KWD~uarc^-ywb$$5(C&RGLp{} z0=szVr37X9KStOyR%$9&n7MY%4@nbFZdRW2lM{MA-t0-Uf_50*j_baoiLC|~Zh4e& zG01DnO!HCjuTc3FEYpDms5&J_1edsiD0WD5z^kcu2|qhvdz&Ll(0(khst&&UceE-q zk{e9Bs^#)~Nqyt(FIPn~;)y4|7A_Rq2lVQ1Vd3$47Gpj=`QQRD1>A9UQ%TPJ2p97+ z9b}RxIIiMY8)$FcVk)yK!288Gw_m|or^h`#`f3`mN(F@&0>p8NGOHO)tc7J6@+#T$ zX-3PPeJfS>Hx>wL+(~VIWaV*~fCDR(nbPH99lQLU=2g4@UUA`AK@dQgO|`D`-o;tATItcCfRyQ^?3bRNi}UNw(l z67(IdKW-5J7jthG)>PJY3uD55fCCEpdh3Wkxob{U6hb0 z2$43gBA_5drI#ksOB*0Gr4%B)1R*3TrH37buqBOWS?7H3bKZ0D{}<<6d^h^Q6Oz4m z)>?DSF~=NpLC#Nj<8FWK`$4+>n2aXREt+LW_VV&13+bE# zQ?YkGBwNe0Sw}JioI}VRN+h3TGKSE_~Bme4y-QQomcT8s=u=*k89{PFS@9DqAcSJv72na`7FgU(^ zej2200H^%OGs&N!-AO;B!hobNh&=Nr(!Q1U)!{Q^72uA-T{(iFM)}pd|CNv+e*A|@ z>i-2A_)j8@PkX?qOrKxj6VEf0n%9d|;FkvG`Ymt*sKsj>Q{M9F$X{kQFZ{1wV8-qY z>Lq@4IpV^BOaVYOPZrRXTG6Pv`Y+{^M>6vtvlg1U$S}31AkzfLs77@_;V$B%x}lT{I+r>M&C%U4v$406V(OYwvHaEo zkW}9a5Sy~cCyU#jI$07TuTxV^is;yhw!ku)*O$fj@xwUyfb#u8#8%QMW_|Nsb7fJ3 z2!Lps(-2-0*+K+uyS$|qITW`3W+EH0Vb=N2;3V~0sjLfJ89LW92X*}7AEMpb0ED7U z7yr08$qJlEGr0+X(tWThQqGo;m(yp8`rJqZ7{~!D`zk3|5G+oT92&Fi07*eK*$^rjYpF5GSR6sZjDL(yO$0~9a3&Nq0zW9CR zxH(cyXac(?RHmM@5;bK~C3e)Spy%TjA%RQoNPK&#auq8+@Eui4JnOxjQMgVHKr z5qOhR?c`X}L5SXlP!Xv@DC&(W`gT~k%f-X+^+?sY7q6GuouG?V5E(84Vt`S+L)|h zim%djm+?DUieRmR{@lBuEcBWt$u(7j9i_j&PqL1vC@#=joJu6JuO_soLGO9*Bttc4 zjdjO%ZwzPbi}aRQ`R^NG4tHFE0(vx|01 zamd%~RvRnKFl(B94Cc2XIJ#ysS&$Q3Nc!gM2YSA~`-j?z;=un+hi4gdzm>d@Ft45_|x$ zAnS+M77=|h8%gq#cgQVOGOe=-g@ouyYzHPT>|D;?{R3#NOP>sawc6VcfMDi;Q<$~- z?y9?3MdPi01fN66c+2p*-_nQu&hw~B&rQ8!&nQ81J+Z>Az|CzFj*IJ{j>+;iJR;L; z;5ebbWU!KObuOGO#4^cSh)xi#Z8Us&s#UgJ)&iL-Yl80H7>o1l9(MR4Wk?%GTD~;P z_~g^>guihsPbD(rIMFb0=7dA4#aCJnd%O>#i{ym@HOQkt+yDf4qw<1aN_%cmN(YPD zhFBjkVf04F$~>G0Ji+Se@$}2PDql1VeK|P4n0NVOO?l2)jGPWi7>Zl+llV`Bn`Mj+ zhwmRFmCoXx|EDJJf30@uPFgKi^ayt! zBOn7F!FSDNYoz2pu4@eoZA9Bmw#0j8sP2u4;5y)I+DoMGQTkNjpF%Tl>*1_0Hw7LzF z6*cV`Blm#w1M?zpj##t9yM$olG+0$?Cx>(!L^J=GUgyJObYVgN61I)1 zB*MdeT$M&msG;SLnegnR{42mr%FF`KDb>9=yga@kpuUnS8!=;&o#(%97Q)K?b?DQWtzt)0yL~U-Z0Ek$y;}?S#rt}ndArZy z>^uISiGNPFKK|F~=Q6&)toN?jX<-PVQlhlfg}cv?A$ww2KcoZz$i{}l_c(N5%G|t% zsLxlPKgpr}LT(G^;-kniXpH#_-#f&7fEi1HYZb-u)r9ZAoO4E7B1+p*4xWr8B>Z zeuJr;0QKujZOxnb=$yQnk#Liu{(_o{HwV|PVNN0UgR*2Tem-2N^meik;4Bu36W(G) z(kysz4LV3X$5(H@J8_WO7WfhshqII~jx6*o<=Sq*rT&m|2z4`?>T!R*3sVUyh%N$E zae3qAGFVyQCN7k02c4&sSTi$FMw6zH)ZzRw_aQ^7-2ObzoSNzg{FY+#-|O>>3O+ui zB7Ga9K-$}a^t~YcMOviLCCG%cpjW*DocIaD2PJR8K>${7QT#jvcaJSc^b@DNB*cH? z7&EEc{L*ndFusiB8c@GGMk|8x?HN?SINfI{6%sKyb7|pst^k#%cFba^uAHTH_27*> zDWNe3wV(!R^2})R%k&tsGI0-2jGeSh3`ND#RBkLUTpg-rP&7;+k=jb+`mT%{ao(Yv z1?SDY48M+rq}0kP<(H#RE{|)yk^ZvU#fEh}5;!PDQX9E?sbwh{tqRu(e<23KOOZlV zVo(ZmyRT|j)VvvvAK;k6WN#v22yR^oxfaEl7|IVR6|;RHCn1Kb7sR1N%C}zhxFrFR0h+eOw*fU6>H0x(61gS>@yNz9J)8Vzx-sxT z_3#8~M=I8eo`ZSz|EI@<2Lym|d}ISrBmm?~WI#NjpAi3RE?rFmoO?fk-SRJVPBiB4 zyl(Ph@DHgAhb`yF3EE5l#$?FNaOy9;(;#ki)SX~3(w^3fXWK%m3yc%J;P(qDx_2NAgY zD2`|o@TN5-y~6;3+l{4pjf%{L+KJtKL~_V@P`By5`qvK> z{Ns1QJ#gRyAmH*RbaFoOFgxB^|T=IC}~<1kgUC{dD4D_e=|OI4U%%j(Cz z09bLbx0DdTafvw_=UwFAtYPdbX^?yNFTehPQz}RZ-`h!76#3XoEyPQc#Y#6iv|AqGSa%-S3Z-k&EG33P%$M7r*4`T~c`mM2IH)Ir-u=wwr{ z1JspBR2Ci}^=arsw78IfBE3*5f{@9iZ6~3N}7w9ZaE{X{jkE9d)gi!SN&=%(r&4njLrs8s-QlKSz>>t!>Y82-) zys3$6fEF8nUwM1C!yIi#^F~6g(;#s@@7%RZ_{ER}caz0o_Qgs_QqHx35Ol^Qh@@4> zcB&$|^$ce64QVG_&c}HS0aX8<4RGZ3ZahWRGO_U-vz4(~q*+4hP502{Kl#v^bHs0A zS0Bn~v(`xUsCld3mkzC!fkMkrsoBQE00V(o%{cj>^XnwdfkU6Nz9FdpIw zUhxa&r}(rsWQB4&M%}}|)GF!L@v%sP=a9K}$Q>=%W@SubYMx{&t#s_-~boj=t@Pr9=jU6skt zoz*ykmVvgV6xqClUG+T~75bsTTFXzVzPUhgB`ramcPypEAbZ`H=AGbzyGZS512G6% zzIfJWqAg#$O|o8`V)TS)EVLphcadexf+rbCY-uyA(A&ALuD%|JU|af(GiiV9+Cp`M zN}$&3SfO2=#o(UU?{vc~XV#zVeSQOP%OJI6EYKG)TwQ$P*HO`7F}0*r3+C_}oFII{ za&HtXs63Rb)+EsOQdQFu;6(&i^5xGC6K$Y~He3~m0DOLcfRN5hk0qWA#77p-(WFB$ zaU_+#W&G|gpMp)I(**l3y&KO;8y1sxCGUL3|UgPRL zTz&KTkt;K6uUuyZCb60R>-b>GhoY`T){Vm$6*6g%?UItD8?6IVc&LrwL#nmsGr4R9 zkvSYH#1L}0jC<&P65yV02Kv&Eaxr&_n%o1#J+CgiiST@*=D0v*ibQ3aX9o}Mer=Ba!+j?(hv z#V9+_^e{m7x+5h7Sn>o|lEAQ#i5%7DASp&kj-0cpLUU3dL$UEZLxyn;HQJ`HYc(YD z;urH=;vQ`nXvE}#6*l&Xwhh)jJLbnl#(Nxp`g@;9lViH6u{qn_fp>Z!RBD@#Lu-j~f}qWJpVtX85h^pruw+ zbjFop4tAaILyJJc-?a^V^@Y-swGxx-Y3tU=r;xoP3(vyIt*s2mqJw|8m2I9dykB@qdfHf|d}K+02Yi1tUgTX&dh0NK4BUGlQCq0Omb}G6?~HBK0(8i} zxTwRTla#jC>hb`3obVf^!6Q@Q3&rG{xp%h z)pbL|;XQTdE;nC~yFQ`m_4bs^+$m%>5^_R(H^Rt*{QoPsB|XN^!g2XZfH}NOo?F0( z^));Ij@o;LE$O&Tk-T&|xxYD468!Hp`oGfL{}flO=V8YRBvz<4duYvCo0Qv&m{ zD|lfwP6%Lv-Kn)?fDay}>;I6d>lL5sKvsrM(i%bB_@6A+|1rGb{}wYLWPu#xw9t@v zl&?M&Ezy+pA%6k>O10SJ{y_7#31hf+60wDl+^HtKM8DzP*k`n~{gQuJ-^3_(<9Kcg z>*Xg_j{=k)|6ACwp>EHCGf9^_7IX%lrPK)gkGNtm&5}j=4$mU1w*37uCyG7fOYL^L zxVR+dXVizg2A0*LL?^{LWFJOC#&}n3vjRAzjrgYRVHwxfhKBD!tH6%H%&&CJS5SK_ z%*CX-GnE4(Wi#b`ZR4P!&}!-Ldx2N$nk%EMoZX(-zwIlv4ZfJ?yrYSJ-1?xnjHeih z#MSpc9cbsw*`%fIO}ec07tgLldtX`->qtsPpe~EG!f2SUe)#%-ssPvZ_)i!@VbOHh ztu13J!OZ9-oYN)mKP=e-O_Y|++zDgwWR_(S3xQz>S>M(43$y57GcEeN1ZkxjDS=^Y zp_Dc6$=Hv#Q>~BqJJ_cr2L9C_cy_V->3rF;OHBjb&*qt?&Kbu+d9i416?cw!Vv3<`cACeQ5gmXtxwIG$DWsxAhk)m-CRT%V{%79Xeg_ks z&b6e5khl4}Qtq2=m@wdP6|kgZ(mN!-)($kHCp3o_fA2RuU6_S+$S^ha%Q}U5g8XQL zgLSF%%o3s>yviStY-aLJ1u^0pj2~Okii=35AuSh%gqvaf`RF#5#a|zY#-LFrrEQh1 zxYEs7N8AN_0q(5AZ%vzmsX;AOXSw6e%(=3{F-reJ)m_9enTmuQlL9!54&+wSU*ruy zY|xv;uBI#`8MonNiQaHEgy3k$qZR#BmFE{YIE5T$fgABE{F&FQLax-T z*z#0=sJ@pA-#wptlg_&A3e3Rt1%>PQQws@vHzpgp%z*cS%rapT(?+LY6w$j#{TR7M zUBLZtaUfQanKi1)op_K*sY>M8(?~Uu%;dPGZGW|G_^II#cHC5Jy&=eliJqLE;ET{| zT-Es1iP?#@;jaS@6|bcdxXnqzv*3L2!1%CX?Ql?u1d9RuL!QiLR8d<|F*GCwk78lY z^QZ2IjFY+2v4%?s-;4@LXSOWVM2UCd=F^IaXZbOW0-Q?YWmK$TpzLX=`tjPvg1At9 z2l950?Lt-Y=*z#n5?C4ax1~UB$2sGyjUd##IpQsKPTGmsjX6jnK^LhCb zwnYHIPR&>I5HfQf2dLcZ#gjNW_x+VTTx_;pU*lo=jO*DrqaMnGM)T0(0C^UCaV$A;}lR!)?VN7N`Ubia#d7Mdi zVj%ma@0RHcp8<;W655kzsr()(k~#q^(1R2Hd8j)8yOP4LGM=Xg`*CPv-vqz@1YbVv zWJU!x{?k4`Zlv7+M>+N?OaYMKrMZ3un;^}bx?h?P1y$#{dwl$8TS9Kb94m;}l%Pac zY?1!F3Tj)_E;8hR7`2gXiw7bJ6ZmLnvmzYBUutDkPCTc{;`Q)e{E4J8LWJPf;4(D{ zC$llyqJTYn=hIGt8&{esJpQsOYO3AgJM#K~`^v78-RrAhM&W6`sYFKkC6kYpY?A_f zB=w_^YuwTs1E{BY#&vcq8^Yp7GutuBe_KYknE>yvEd*uG6oqtT2Dj6VOT8NkSGk*< zG{(N}Z?xo`G-vkG*~h5(+)wPWyQBm{3a_jE4P)|U7^6ZOz?CFSMf!XIKVlLilBWv@ z7-a??&!SFtfl{;3ne@R!V@ON#0lgj$=PW6RE(w0$$Q7_YL+{kI5C&Y0| z(Oqr8j}aR~L_v#P=b!{48lojsPNL-I(A!@(Ld&d8nVdLK3J!f+eJM2Q!g@iML^c?@ zD&*iq^2!pK(tMHH$UJESaf3LOafAHPyaS_X7U&6$oQEHBuu%^(BqPHs)LnBRx5e*KQ4T0qycat%7!B+^{YeSfaXdX-rHkPLb3VScG^WQEK9&lhYF>7I| z&^JK{go|xf7Tto`9QqEC6?C#H2iq@Fg9_Rmrd=kESyf+l7_+i<=9g8MUY{`JE4^AL zP9p@jH>>lLqjL$xZ6=ZgE?pLmY8!Lulvdd>qxHcJU_wK}v!7c$mM(NnkUN{=DK=ZSu+tqrB;#>=sGG)gVPX zdj-41O^w0O5*N4N2!*@BaXiNTHKkh6wMp97hdxG(gt3vFNVWEG+2AtPD5{kak3*E^ zr(AsKWUz#P-oSV?WRcFYc=AeDMTyIcypvaJS#8WH?r?rJ+;x<{EJas!8lg1dWJ^rn|4b5<|@CxiD=n+>=f-mB(FRi?QiZ&yR338jnuSj~c z8TfYUPQ-naf_Cg~Sl$f|EQGVCI49HFklUq)MMpW^a!1?3G8oYznWdWUuSXU`OSj05 zww#<}Vm6QtiTuEOULm*2lLImL*PI*pctN!|zImtQPev7*k~1;)2bPCVl1TAaTTokI zX+a@;84+*hQrHy$x@gE9$QY9Lh*6~4r4f9BzlIsq8@oVXdQm$_57JpLNEZ;r56M2L z(XwtXPoxa{n;pe0?BJ`lV^l~+{@Gx7W>WL7VB1@^BURkYm1;`}UF|gaMhoteg2!)y zw*njN*6vr9oSsc8_`nc%HQQu?;6w|?01X0PD~gENI5O`d+#vZ2x6ud_i+PQ|&+yk> z8Y>W_)dMfbghsbXHydh$=3y25S?ZKdOp@#HQ(j;&@RzEERzZ%& zA+LuU>?^AWs%c7=J|%Za(ElYtx&9%)w;j2TcnC;;6VbpBtPO)O)ABOPYg&yb26Nrj z+B|kwzG1vc?(dW>#p9Y@59ohstWVPoIQON_vhK~JW zs}IF?7~=DE-s*Vv*u~&~yB%eAfc%Q(4ajxA;YNZZ}fTTTQ5j`H7nUM zxHLatUp~-?&Hp%#z5N_FG4s_j($D7=GvH?M^_kGE`1Lvcwm_U85RyI7)a?B(>)cuS ztP^dDj;SZCcP9EO-fcrx?kM$!bQ$7n)F; z^cGXq+ghsDiK}t}Wws!)670sAn(qLUBaBi`A(chpAXqj>F}p~+21||H9pu#7ilM8M z{>NyFji-xoR5{qip|Ym3ZfJwa`}BxE)(O{nX?+@Y4#u&%ydw2W8`Ep-uZSyjZEE9G zxx=Qda&PLBC$93f{;ZAey}0xH;h>MtH5~bLo=+@kJ?w(zlj9rp`OzJ$upF)|PL7!` zQh@jH@w}<{=Z%d6)$Vqt3+4c3ZX&Ajs0x0~$YePUt}J~g+wYL?u{%lcQ}f3Cx6i7+ ze~J%V2NGA2S@dMf9ONk3B86E}#nrwA4bB)J7YQv%ihKmKi!r%mN*$#Ngz$qQM+aIZCk6~LzYwU*HRX0>6s;;f*32Y(S3j5!Kt&RF!@L5&N;taRMfLBDq|VG7 zTr1>WYUozHBY6J{foGBbU1WtZrYihCCIHU@^~fIK83Lk>rXbn@jHG`^y#@_T1y22D zhxs(lVjaZb?$4BCEC8&xp>ZH9l(BiPue2RVqj01tR z863dL05!LsBdSA{9TJTp$qrH%MAQZPm-#U+mUV>Hb-4<5^3;ys*Z&jq1AEp~?U;g$ z&Pes3C4(z7k$&K*L$_auowh_6E{wiHwXvlF*U}t}7}? z^eZe|K)eKNLWs`nBm7V{0DGf9wYQbAmE=jG?tw-N=~#TD=Rcz(f{fH)QR>SDq^PSCf}u#yw&V*J}e99HR>d0D{e-j z4T@=3e@JyU@5$@{6n8TI+bp+3k2)?oefqvfr{C^T3b`TibMIfk z^s47wsftzSV$ZX=V}E%^C3|h%s2r1|y?Pe5@MOeFS+JcXx;VA)39le}N^ng7N`jeeW_JhC7g%J1d`)pC0u&?=?danY>f zVA`I{`K+k5^N*U> zDYt5BoYC%mjLF=ePZ25F@l*mpi;U9Irlgvf@M5)vV^a>Jlh7YUD}LNB<;s6Q z&as9GPC=iN?T8BpTn(<2+k^+8JuZE@oMnylC`j)opj{jcryc~Tn8@M+b{!)uoNacpc>#j%BTi4sRE z&x=Xdva$$%=NQx%WBBQIU1>z~#hmLC&5J9Q*LYFMo6?5^c$2gC_owDLc8`I1k8`@- zvx}(icPY0Xdn=aMe;4z$xuC!n!6n0b2+@vS4t^mOlI1M5^{NwW+Q~Ka_{IZ#fdqbxO~-md(^Bl|`cx%uG30HG)?MX$zwdaQGV0bU)+6J)T_s_0F#c&l{Hp zDqlIk;obe~py%xST<_gx-dp~4p=eX_e)_x5$|~A9wgJioU%#*DdTBkowp{+?PDp_a z&u7cygC>s;c3!%?Kc{!sVB+LRlZu6(PSv=WT@S<#jf_ABOS^-uQjLYRm*$HgA8tVl z`7UNh<7Ft*8IA+(WogwA-4*`MSL6KMe~EYsVgr+5rep&@?5TH-uj-ecO-wGbUEReN zVmvU?S#s{sjOv`_8sJ`W3Oh48T#D>UFLf4Z9klj;tX-nzRF^;I5_HJn(QMj-r8SAE zyh$&rt*@BE9LhRyvE3yV4E=EI&;4We&?D<3t4`L`G&|wtuMk;*Fp3~V^sXB#S6GaPtg%*a7ALr~J60-+rqn2IBs?~OE zYd{!0(>DJ`UZ zhm|C~q4<0b{MJ(O7)>v0S5ZjN!Qw~mEXM>g*RqZOcycOzz`-t8GwxXq`Oe|OX&tMw zc)Q2_OYek8H!BcNfSe}kgykw@@&&oF?uS$vNplczSADKFWZaGG%@5#5&u9>?7uLK& zj*x8G0Rb|6~10w=qwN-YkvCuMlEiZ z4F}gohMhEy!JhmayS|??D!!q{eki#pFDZgkvTCxQ#_52%9yFfXKMbl@n@bn(qJj1( z>UT2mq?wFkR2na!gb_qr+!#;rgm2r`q07FTEtf8c64#RU%(8a^HkNf}goBSEVmDIb z8Z|js1o0P$N^MljA)*u711A&o&AGqzPr&Ktg-8MVVeaV6(cfB-`LuOl)ngz)v&}On z7XYUNz~30Pg`@@7Ki*A~XjNDHt<(mr*s++Phn{5pEEM!U0aO7he}! z^+6Q+Vj8U~era~{CKo4X@#^EOD=mh;f=7ss)eK2fz^%l6XmkRyoR*BG*ibpQGmT-d z2g@8TAZ#l@t>#~Vzl(L%pn2uakZVzQt9o>8Dr*L+#*f;-r}+XlcA!eAqghi~{<@+Z zK2b>!XKx%>^s!?;Q6o8JQtB-rDR(JC=3TXh@UiqHMHRsJPwfsDfd=_Ta6JePqWEb){@in5)gor9d=#=~6NSPsXz z;ko|@%bN-&$tgwPF&`vPW+;al-|PsLYZ|mM+nMv-<&e4Awa01u9=a>6Uyk}|fVN(_ zJzEW0N+g=n3FaMZ+W=VubXgWdX2x8qESzFfK4NCd#crERJ35mw98LGFf2QmjRQ_^K zHE`%Xn6h^#rn&VqBe)V4pwfhH^}7BHxqPQ zGnCRfWEp}Bv;JgBn913D9L6@^v@}&T<>id?1*T8_jH`QUX;M;&{n*%P1)T@acT<^5 z_sAQXcLuO`m^neklOUqSdg#Cm?lC^KgSL~12BTR}o4`6WyE1_LWE|hTH$Y=|-*S(6 z=Y%HcySSKD-MjOX|DC6q4vfd+%1v+lPMHOq7vE)gMk@2PMTTdSgldv*7PjkNi7i(P z2FcH6*wc%@$IK@9rqNJuycal-V)u+_@@KN#R8VaRT`o zf|UZWCqrm0>84e#c8v$OmK)W^gi;f6n~59wNXHL1a^D2J4H-Wo?5I~N4i3!yy^emSx8`&GuByW$j$Dd=uqv0_^$*#^-$ouGd7I zC>bx!jd^kXtkieLWcvJ3p^~J>Lw8sdE(D?*awWN?R~3$*a61{$L6bpm5t+b|TwS5e z#0j5)eZywDobiK_ll%R0l5dAR(N3^^#36kUcP2qGEvx9t_^9jNnA&?% z>+YV+>16yu+Dx>9uhTY?_KMVCC*D%@Lm+FuLY8R^fWEb%55U(md??Mm;%FdAUFp#P z)!%Ny%M0?%Y_$)idWM0Ba{~)~zgG-|bnCk5UUIq`wZUS`b9IY< z>J5L>L`pe=MA%4ph}7k=5l-j#f)YeDtc?{nkS8-UIwrsgy?Hgh(dM%s8A=5n%W0BP+u@x{@bLnP+-LDi(0pJA@8J6g z{!D0R%b`)QT0482=;n)Vp`qks`LP!qhLWGLE4nbP)=G%JJ*;})dDZr2zJFK;%CPCI zn){je=gLYwAD+TwFC_x;5BuXR`C|%~J@57s969aHN8ncBVPEn((yy>GN4XUVhapaS;ob3q@wCM`2B&M^~#vfsc>tx=YkAg1cM0oT1^$5(3Vm z=}pt!^#T`=&jU&xbQGva)`L&LFs`DaFd)?4Aj1|UVBaE60Y%+d@(#1Z0Aj2PodReO zmB4NRN&%uShpymD8}m?u)k_Ew&XVrosZ zpfl7pG+s1}ACm_zZ33V|jyINUwg+Bu;0iK&gjdPS=nvB0kWhpKaa{boSrM?};Atf> zHlvXeFrp($2` zENvqcGsBS3x89GU%uM;FS?`Dx4A(eHo# zqZ+_$Vod&BOaOY_$yC}-{h$}|UszK-%={4kCf>}?P`=d|aB-U~4Ubyvt-95rcjN88 zppxW7r^iKm>JBqn$UKVVaU^cju}YS{sZnfa)yUb`LwdahB!Hn>K^1ut6d9Bo5hbug zuK)upLu8M-%Ez!5H@t;R(NG38c-Ty50#XDGy8xCT7sUK;QE$Mv678-P(cJHXj7Fui)I*?cy)CFxG?V~L4 z$tVqd(`Z@@gaMV%T>IK$%5ylgv-$W)rgOsS4Y%DGQIksJ(le+ipcYR+FXjan7NY zqBh3@yT>;^8@t*m=$vI)A9hZ(7c4R9V=00P*afttD~L0}b9>IN>;kJH=B?1)hFgl9 zM0B!8*bONgU~NNR(C8QEcaS0khGy6&w!5rRE7MT2 ziOja7(6hHL4)0IMb0|6J=%ngMX)w7-90+qx&+Gumc9XcC;kUF~6!z0qZ9WgO1hq%{ z=m-}d*DN;+CP)DegqD=C%6uw00iN@Qmm^#Rg(MA!w~-M1-G`<#Hsu(VWqR_KR0EVW zc8!#2c==w9aQjo=XUr)R)IgMXS#@RHy7}eNxP|UC&3Q<&Xv?TL(kQ2g61>MKS}G>K z*n0jC*i27XPk#%mFZ*EV!82c-0|j)?^Ye#||jIyg5|B!@nd zKqFQ`hbeh`6V2kGa5f;abdMvCjE;Gi2g16$V<`bk|8aVhTU$QHW zTj>YT(=dIbiZ zo;O3k62taa-`IHI>fv4IgESQUFSfPjxyVzp4pw{V^f!7R3{}|wd?}l$9sYsWeWjsej6L(4p4F4`ij%9Y-rvlzJ93e&mv`b;fkAmvUDk;6 z!gay)j(4{{X~;`Icz^g!hEe%)nkFKq)sy3zXQnrnq-VD`)PEo<$x_AIaORtBtH-RJ zJ*9;KVcBC;bNECFIC2P>Kq@PMQxGqaMlpftL&gXhkKAm4U#2M$icj;GVkJBMXxp^l z+d$uA`>6LcpEz+4rUV3VmMG0y@wTY5e2e@h&iJjczIh{==I+#kMP}}oJD6$qUy>J$ zG~E1}Sbg6=tyJD9Bc!&~UEfr7*n=(OexY6G-7~GT`Fow3s?VYQ1_U8)S-!50`JCf< z2P`z_mwsl37Gykaw3tcSD$Zd9_I}LapxSyr3OgpWAv=%YM>%7M0r(E4HhBHH1I>T*ela-MN4+UqL>7N@e zcHa3%;%DsG=>wyv@TY(E!*9XQt$F7afoHB;qj^_DXOU;bbr|WSl&da}pp4ONiN|Tp!EjyTWsgqJu zGTZ;m>uj1v#-m%Q=UxpIop{ssC_km)8I~2OYU}Kdv`%-gt*RQ?H*n+HaA3&yYuTX8 z9C0brzP3F3LPd|C+-QcW-!-R4=bY4AuJ5gP$rj%reyY2j_il`O^fUe}FH+5V#uV?f znsIUGPl-q?4YKc5o&iz@AX@Cm7%zxNRW+>_Oo7OJEg*fkc`JlTLTeJzB2WnMg<6um z@!Q$VmoSR6c#linMM$}n&9tho?K3;c8JE2{BC;*B$p?a&JJ*Z3?g5tl`0qCvi$j=& zUatjKr6>VHh5UqvIPU{f+>FrL<<-|3Lb;m3!e^J zZbEfr;K5l>F(x7V-WL~88*Rn{H9f%W^^*-VwH{!&Ev2(O zpLu-aKh{)z`~Zo}_#{3CR~18TRYN6WDr!#MS*;$^r&V<;yx&?h=);yAHkn*ESDbn)tU8f zG#Fh_>c9HP6^y;>(|hD@={|yAUQjz9#k-f2N$ix#7K9i=AyPP+DRdV`9`o=9jOxNqBg%&qUXKTVU zzT_0n8lOUBJ)?U^zpVmS7!xCs~oyiL9k|(qoakB?`b*Uk#E@0p1sN)s%JKZEK_A&S#}3X7Yu)bAcL}6F9@Ui>*^9l5S7(SBhM6?kAn#yiN`1s1F%2 zZqm3kz1-JeTB_Gu+q=-Gy$}P@1yA4;9NfoB$rg2p(rye2GM(d&mctBR+Nf?PLq?>@ zJ&Bd$`pcir!) zEZgGIoOi?>PzpDXCQ_@5fUKuD>FsK`#9nY7y@8`4F=NEbOu4}8ea-DKtIZB3|Zgb`(bt*zT->o*(-<*1b+9S5sF?j6M03{=}bE4u2(ecjB;GD%Q(lt1gVFUPvXyGL5l zkzFs&YGThBylJlt6e(VVy&j|6W8)3fRU43y}_f@iSki4~iYyRK# zchr*Qr3aX5ZIL1plvHa*+8#p2UC4^wc}u>$z>W{HrWg%vaV{Ff3{JscIor_NN3BJ?IDsqOrh)mw8hlLi)&PCqs6KG0RKhnq~!}93VTdQ@6qqZHx^| zpiB4!VX7UCfFcW+C>i2~TJg)0#Y~PJ4a%3ux~qX!l_6BMuG0tANE@rz{@Dpxx6zfN z*c=ymy)&Jj!>V~Mm+LNmE>%2m;+;m9b+Z40g;v>Gpoae?k?q#4ha9?Z($%e_+S(31 zpHR(xf$-i^EH6^D$WyVcsSE12)$rsXKQaU{;5hR@iFPssaeZic!m8lq00wN@OfGi5 zN$3v9K;>L39wSboR0(%*9r?Cqx1pRiWFYkdOvxu0b3Mw7eN{i_$0)dVblsmX!7muD zRMfq(-S+uGPSVKh_P%{M-H2h_#oTawN@ zv~gZ>%0s3tSx!-F-^r70s$;(v7x%rV@e|G7cuBUB>8}pVf z4TsM0DzCgMHB&OkR=QtK94h{hrTac#rS$5Ojw9@)n)2C6TYFdVt!CVPqP}n|=^e5{ zIbMPh9Te9B!7QzYy`awZBOMpn^4TqTDy?EMRcJ%Bg>aoqF_`T{%Lp!RBO%PM1T&Fa}h{6QhPn6MK*y9>!R*5{BZH)^=Q>uoD%xDC1l>Z zdPln5CIF#EbmgbV8_{;pnkeOLOYxfQUfMmonvq83pg`N3J@x#dOS9zv!Q8vYL)Gtn zgyFoMI|uV$3Q+In7nV6)_c3PE(HMFcaf&mCBhA zV@4&1$(qVonVHq|b=~{kzkA=$-uw65d;gx->-SH+oM+Zr-_P-V2vFn+R;3+}66|1z zuC>28_!zBhqzb4)NhDh zP6mV;H33+Nh7uM48uyhGFMODq55AA3=UMP*Yg+T$s;N0SoBN$Q?|Zn4-J@`r>T|BO zWudAbdXf@OQ5iE^S%_Cl>SDT2m-guz1-TMW`Ww|+z8naxt(uk?(u4;ksi%RYu?UC0 z6@c{>A(}4lFoaO_(TEVXb0>Y3J5A^y$@ueIj3_cR^f~b7B-7_H@fNdah4dxDK!z|l zQQ|!P13pNYABS^a`2Uo;hfpBm#C}CfkrDN6x4BjYM;b3@McOf)z8tTNtmmeMWS1S9 zSJW&$a=tAo>08parAY*7>QcADd{zVP8oXKR(G#%=eC=3a?ux>q+(%%fyHrX>%3FFr z&I0xw6%!{x{4oSC1*q{&fWJ0jE}p;WxM&($!BlBRi@2;SxeMDo4Ez=PY!d!cN>Sps z=mVsHV!;<*j3_Mppzk4D$jPeD^n{0703r3YqXfSAf%xH&Gr#t>thR?t`$;Z z5(2iDOYVXaT^n6d3A54V0Q%D4+8S#}Od=?VwRi*>N@T4-DPFuu1-sfTGLjcaSWMZ` zXRzJi550|*UrAMUXeRB(jDhviecK5YXIK?d^I5`K$uk4!opU3$d@eDJ z9$%BubczWi3b2a^XGA)>`c2AA^;C8Qy;axkWLL4+KWyJfMqz2LH@B~AFfdzn8(+_G z$sKzgk$uuz>A90qWI7)!Ab_Fa5_GBfI8p#}M#mEXOi!Jx$u=_K#8KtQTjx2)1-?x| zHtX*t5ET5J=bR#9>q}-Et+%|~S>?QJjqFnWyN_fak-Z(6{h+AklN>vP#YT^(R5ely z1Mupsn?B4AQl*?+WNp2&(D7)ORlOe1e`oNPQ)AAVvOb4p1FraShUL`MhWgAmmRCHh z*%edqhc5T>v8BdRPpt3VYzcY0O=gS+2o(I(JkZJ=fWWMXRizO>YHh(yPZLxh7HE|F zj(RwIRDl+S`yHN3q1cVy+4#JF=l5Zap%a0R>-4wYyLGt0ZCP%u)x}bQL8-LE7{o8g z1KK&21TXr#u9+7mDXb#uP=qtzvd|ATtLiT)HkgTT6YUb?;R0Iax-i`p9=kt{H_7up zn>lD?oe|cg|88*S+{mXere*qCDbc0>;!h+q3N*KU#aRqC{ZAs-mKn6(NN98q!}^mF zs59a+7@(Ztq#~@E>r?6bHZU^ac|hq+{>S{+*79#kdMjSRpnN|#vlc!^Tl>`1Vsn}P z3P92=c;~-oPlRWNkN?Ao%&=l*6KZzBkh!n7w@=_2cA&4XfAGzrA=jaUH&!ac78_19 zC2sJ0bl-8&FklsnTg70X65~J^&0=DakHB~6x+E_P(8z(J$qh(J93?o4IJaaLGnXLy zxNQtogyk$aqZ-DzTDLkfu#}uA+Ft446-x0LW32jYGU#Tpb7XzFrB>Cim`=>l3|R6J zuM(fiUZrjjD=CJAdWam5C2c5gp( zs>6D>YjL@fyiTpun-a_zs9zWrKc&7`T)sRmz7Yb6ls;kSZ#d2${3~}3(~L#-YH}G( zGYKF*TVt*>*C6Hq&tU|b1y!&y>P4hWUBM>9HoESxK(=KtJ&Vgp99i2JR+O#ZIHKG= zYkhitpXu?6=k;~B)Q-gU4Q|)d^NCx0Sutb%`eTW9NqIl+@z)M@3Z2DFiS0w~j-GA4 zj)kXnI@w+GOg)8?h$p$ncIRkZNVq+@x7pKLt7NFtQ zHeIdm53J6^uO?4mR`1CyI9Q(58N$A`ompycb(ndBL%KAlZWb?hB+vJE-{%JBL9IZ| zPn!R-^?XWMtg7Q<0@Z5R##Lu{yG!o3_OW*HY8JRG%R#+O$3Bsb3uDEr&hY8HnE|!2 z_5{2c+Q*TlZb0a6aDM{R*dNkRac3g)c+BJ=o-r!#>nw1-GHY#Nk;NTJljp=$($)OX z4YxYvbZt*YDbKCv-u3IUHYvUPzVL{N@pdt#>O#14r#6EuVGx%$g$mX)NqhU^+-!eo zWG~O)=^Rwr&h?4?c*?f8FvOHBasQ{T=|!opa}~U*#O*%UdJ$ z-#gnbJbkS%mcPu~-V%qEO)supz4MT-&dHq3B`Xa)R;30N`&M$rV+K7Rh7{aWqL$%AKfjsm1Ht^P}>O7VKXTsYmjkxh&PwWr~^{lnhyPx zeNR*GpZo3oNUyP#LV=Crq~-QkCvIhcS#PIEf2UR z?)^c-+GL9m;Z-CO#B?lD1k0ey0sM{Ph%DjGtXW|Gmf`r?JRB{DAfchFu{xfq!bpK6 zrINePwn1ZE22rqw;mw0m(A5ZKaA}2bdHAZHItzSxp5LXS$M0MxYKk&Tw%HYJ)3(pH zFtRcIx^HLgOWml@$s=p3tj$%jKJKNsb2C2e-R8CZ>|R=|_qJy}$*u{jV&c~1dy+@q zc6_)k>3e?;?S$4awV?(V<~AyLN*AFD)R(Nvj0VOv>{3D?;To$F0%@sLmYuJb4j(0M@gKU)+63HFtyU$&XlGcMZk(@@dIQ`bdjQtRExAxa1qy|E74AR<#;n; zNCjZHI3hjqDgmx_80So+o%mcmvIQZGy)kVl^O&M42nu3mB^Z&Wd5yqeRWq+ByryU5 zORHt#@p{(AkMU`zUS(Sz3{yX9bJuvLz0SuswA)hGa4N_+T`zg1NG^@{%a1r2`5O%% zzwds3Rt=jFr))Q0U=p`A^>N#Z+l2?+yB%kyhjW8{4JdAT)K+Ze41Y#s3N?_;jK(+v z{jE?rtqGuu3dX`;*e3pzk`l`c+>Y14Z!!|5nacja>virbMN2!OFA@j*y_wZ{Wc`D% z5v|f%5p0++(PF4%GJQV%abU;;O9Pk49cf+(_A%EAkFAMLzcH{iO&BzkR#_e~9by0a z;&g#pW94@5?cRRB{_?CCuUD#ZEH{R+qavV37(A=F?p1MNR}y!9Z|=H5T}zD%CDb;~ zQ)h7R70CLdb71wniNR0v5!oZ%9x(Ol7+oIio@emyk3s{q(c#o7)c}CFicg^$S_!&1 zY`i`kmsnt4JVMs6UFzvEN!1hvi-LgwjW>f@ezny$^1eBW>g~Nflk4m&OLq z$3$V^wrU8fuMtKW9M~Rvy@tky)_*3h6kCf3R5q%J1PL(C0QXe_I2q3=T3mvvE4c_@ zhLR1fY^UjpMDnFGom_!dX*MvK8}wlh$P-`Y_$AIZ>YmoYJ3Zi99v-sr(aD=1Dq%T% z+Vw6WY;WY&T-Ba&*L8JW`4PMBsvo0v$;DRkC27fN#pj1yUEL1eSgR})SghQ&xm9(?juI&)xobD$dp*(a6t}*slBD2I-RUNUdBZ@q+Fm2EMz69ea~879 zJ*1bXMp%osfvHc0khPX|nlqRpVrN=h`x3?)^z3c~YmxRA+Q1yo=PiA$Q-Ql+37VP* zI>hBzJ7#?y&4vZDVn%xn-#9c?&6DC@5Q<#^Mhh6TOi_0NqLr!OTSK?l4A>aZxdkos z_&L|O>>_tWjn{tFOjn@R(63rbJFoXW+Xe!rLC5++T`J}EKFejBe6>;x@j1V^udI0^ zXTK_8HC~0HkJOKPkhoAYEgpbV?!)a(ed#sO4ZJ$=N2bM#14oIgPPurxPAWUOa;{OV z3IeFu-XLAv^f^*V5aavQ^y?|hsh7g zA&saq{5H+&9umR^0+u`Pb%zGY?(a0Hjr7pB=&VRK8606B@A^LF7O5tuL)H!2n6S)l zl}zxTxU{b{Vd+n)+qX~Vbhd_U3&IA%FVkCyA=1W)ol)9M)iKHL@ zlzP0*dqC#{t@BS@+u!OS{LM!{zBRJG-sTKMqOWDp6?Is~ZObyx>V&-18qbIp(8^ zFbCKxtVcO?*o^?F#NG&9t1r!ajEHQBf1xuYOCGoMU~WU41x9R6#gURC%048QEBEYU z)pLNdjIOgH`4k=y2DTCSR!e$9N}zhYgZVn6BJ*Zn|X1)8leb z#W3M$#$mTBnA4I<$lsJS=#8$(7oA4mU;&rh7TICZ+CD%O?f?krZ(OXjco{e}p=4*? zGr0OXb1TIU37{+2xq;J^Dj+n>U@3b9Z2_06GD`uqHioI5)pQH-oU-f?TQP(r>sOzZhtfka-WOJVEG*%EU{0wI_VZjE!0{kla z^O3OCV6E%zx0wg*r4wBqAAcBpQ*iJCR z=6kcLcmd{$Se6_*$OMRJ~iXimJu+GXkI1 zI5TVB#4Kt3#E5i1P{Arnj`%5c2>x{Yk@SxS@M1v}iFPLgc#3$H_6t@ewuvx^5f(i~ zrnsB|kuXKjIz3bZ6qoL!+r5?3K%V(+mx z#O4}&H4nEcfV#Qo`%n0K9Kx1cyim8|z1C0?Y*8*D#8Mmtq$8b`^|B{`McwJLJ~NzbLSBMWARskhYk<=}*Cbn> z4i+PJ=m;+L(b(1%>b6#}uO+`Yz$6Zk;pdG+5m3WkmOo+;%J48()(n%Y(dCAquwM*A zt$uYxFCHJ;>>Bj}TjAV@Q>BoRGfWHi8lh@tnkX_v z`v64=x9HFIVY&hAz6PuLEV#Bg(>aD|D2@WmSVpsJMl7gQeg_MlY>n2YK`6csAoXRP z>tM+z#=WUbkJ56I@7C-Kk3J8UBpKmC3Oo->$pZNeScnC(tdcF|LMbpM{X>#X`0?R{ zG}u#kf#~D{by*R9#fXp72Ee!G9G0TUa*4DB)knajXt2WTfPW7WxHGILOJ>A70lU!R z1yqg^@4z7bxPh)Aks`W^e%*))=C9YrP-R&$E|A8@@d=fHFab4!hjN*9{JT0f8S?JNcc>8R+zOidwU&>US zkkTjsslieb7X}T2&{)+v4Jw5tA0<~H@Ag3%9*I4(f?^=KS3z0}Xsl^eJV5Q;3&qBt zl~7fuJb!`r*cDTOm9B#+S=jX~7*fX+I1T;+Y^{nqXV` z`fTPZSUbjmjMJU2Gxi$uW6W9&+kZRJGuwZtd-|M|+1%I+UkzNXclDCz=wdKy2p20` zjWVWVMBWHSs0+`qv8&MD`KW%Ne>X1N0#qU3Kz7ci4BHBzRdJO>-F*siHU~P1Kc!at zy%{4YxK5&bK<@wEzJfS#xL}gH#r&SduhL`S$81Q0zCumdNUSU)oCX6ypj*L!wa3>Pyj21zK*&?jDZ=gB$|dMl2}S+I04BZ3cPPT$$%h#7*6HdvWY7YxfiQk zDW-H!xH9MQ!Y=IjF%H;tHTAvNSLxR7Lg) zLbwF69svO>^X^7g5SYL}E#kYqY0RffI`N!kv#4>Q)FaY}Qae z7s!|3#++M8*9jFf)4c*2P)=+Yup2eNtGLE#oo6K7e~Ju^)>@68`NVCRMgaqlvDS;o;rU8`{aKdK(C5| zfb=EwD=CGr60bnfMYfC1qkmx6fmKzm@F$4D(Wk&%+7O-s98X=C$dgSGAC}wzvB^+! zQ+yaeAic=oX71?~A1H#(kRhNvGHfd%*gO+0r9BK#&F)Y0@d+{8kFDb%r|91^{|X`h-}2l4lcSOe!So+nSj(X4h1Wis z#+v^|A5eI{0EL496wX*?a<4vm%zdHPnw42gebWzm{=dAYwbK8$4M_gd&N0Hm1HFR` zKl0zyXXjT0={EQFbPf%BqpD^D?pePhiob6@%m0;B>2WX|8!UU&4R`*vl6A0tO9eAufL*|<}2^$ zwdR_&zq#D`UFOF|1jObgU<-527mQ?)3nEpFN{}RLCte9R?ZdM&k7Dfy6v<}G@1z^^ zqy+xhw>8@RzY4PB*|aP6c~|yig%c9=$Mx^+$4rgToM1jinS;?{XBOMj;a$tTJeFyy`GlfwS=*rABtfJ@pn>f))7or{q zIQOq|c{k8t2%dlgiZxS*^IcPeUjo8mq}JQY$e^h?jo^^wJ+Jg#L%BQBwA07#FFUB` zaQ9x`hAg!s)or7hS`=d>)mu^kPqm`U1;VVSXV4DVIiUi#_T3Ls zGDBFefX>S&->j_WpG)%H%)D`I_dvntZZV6|gkCB-QYqW!n(Vq8b*&h;mUJZ%ZIPhM zwE<`Uq}cw=$^Fm6vhYc)Ds`Cf)snjEKlPGypxJRknfk>sXt)q8Jm~WmWqZ#1if=z( zQ1qKzx=oVBSS%U+>r~_ao*9D-T>v7b&gdIspoImrhS9ZMn}Et7d6DUZMIIlj}H#hw@4*?o88^MY*+;kR!)k4&?$HLb4(3&d-U zglgSk#-VDYLB?o&qM4Pn9N`*MjXZZ4DH_{$_txqyFvNDFDU-9e+)PW~Hgt@mzx@>Z zm(4X^S6ghkY^U;pnu`h46idBq9OBM>ocXaLb-dLpbofZc4$Fv^tklo*QFC)aL)3RQ zMmN@JvoQlKAr~o}K_84Ak|a~{lzknHe5%lV#*5%k2PL)#4}0)Lo>Y*5;pwe4Y6%H}ZF37c3qas7jb^p``Tx9u4ZDg~|l zW?q#{0}-}iT=!H?utnxrlKY^5r41Btdy@*OAHKSYl>vf-Z^Sb-y~M5ZBwv(2X0SKw zc-*fYILF;9%~7q06U6u9^zoO4vsSN1i(1whLf)}Wj#2w^3xG&0f~hE*`qAwEIS7vd zg*mWjfx0(KVyH_jw1>%JOVNI`SAVc5)V~DcVkER*Ytm$QA5hL+e7GSQw|>uC$qDYf z74mz+E#mtmuOBt(B9w27XRPZ7!mW1klBmZfRe&?+eGK4$Uj6Zz50>DA-7#4=-?#1O71|)p&)YDn!74%ZhD+}`O7azu6+SAX$eg{ zH-~gCyS6Uz-2SEjcFeUv>pNqr3#v{+Mo6|&fq2avH$9KQ`;2z6s-2s^{*)5HhJ?ZD zR@RbxHH0SZr;=N((^CbP^~mX6I!DiaJlOFvSU4@HcHAun&VBNhK)rBBF3}ha#fWaM z?n)uWeT?RmCftrNb*(x3k3`*ioOdDU97b<1RJKLiN9+0tXNcFZinD6|%8^lSV{el$ ziw;fMxo>*q@vu@b@>7bZS6uyw9WfI1WLQVcJJD1b+LFCj=isVn{<=KY;##q|((c=& zQ_0D1CyVTJXN+Ij3P_9}QaNy2J$3oKnuSfrHKU(Lxt*$?GW+jq%0%j&-u8Xa` z?+47~sKc`@xe7N)rBhWkQBCtllJOyuytn3>RP3=db^jxO^RF+kA6No8 zblC!rJyL)buN|f62+TMuMkUoKD7IRlYB7H66Xdy<#;wOHEykPs#BM4HDmKq(wgO#N zQzWN)V%{#n-(D$Y;_UWyP9X6+Gp>cRR?p&MiSmY+B-gDsK|veNTm;9qH)Ap6NPF5T z7`-7-2$Hc)ZOQb!T6Ac&;XZ}drl7}e6RPnUik|+!33|(_qcba{BGmuM`&OFS$v2d9 zOH>m9nRK0JXR2$;>!NkC+e=LgP9&hms@kAr&I7gjbh-E9xd-*b=gOt++Adqk9cceZ zIQxNV?^(>IqszYuLJqWvL1!GDg!W~+2mX}On3rwFE=OO{*HLyU;K%P=juM$6L|!sy zoeREKVBL0a@w!i(3wlpmc;3PpbXzlLoR!AFI{Z=8aMu*HU(4W*i)p8N$gEdw^ zrKB^EoRqx|V1EPKljNcSB+Xp77hwfr=aWraIORaIBPQbe6-eR!7;zc33sN#L{*Zcg z{GE7vSC?k0(DLfCT8m!_ncEtz+U^kEpu|0w4eQKaJ)}Qv9((kX|7M5MueEwaZyPAe zg^K^8)Z8v!OKbcU)U$RXHOdksk)&cpzrw28H;g`wh(TSUMk7HtScR(OO=RHS4&e~n z%D~VcAw_#UoKJnS6FFvnOX?PH@ctp`KMpLMLR_}OKk|*4H;ZD^bS~A6IaNxc=r8>~ z#$OQG0E?y<1h|6mD|U#^{o0B$r;ft1jU-yt8HMt}!=jzy0OVuo`=KXlJd2K?*{#Qy ze;>O1GP)`SAH&JPKLx-nch=hW9o5pl6=@T7%9^jzE^ntu>UXSW-AYRrw;%Fa(0Mw< zI+Kk-k6~$I57>Y)|4np^qJVr8?QZq6;D~%$TS2c&l;;Q8{St0%FtxO^-e=cu{K97L z$I4UU8LUJH#Y|wgvg9r-#k$_{+)SMP*wsY*7@tk7;an=H6z`RQXmxfb>9h=Zrec6X z*Kv;}dkVO%%`|Cb)IDAVY=V!3No?O@#CR9uX}8Ul$;U1tzRRIFOOyx`c5gn(lWZXn-Q>^uZu_$X_^-j_->xJ2|GylgVGIRxEu7BDeFM z&YUh;yoVzN#KX4oc%yrFWsx=0Nx!TUs;C7PuD@hbz>X_qwPJmNy{s`+M7f1L+cEBP z5=T=%0NSVLCY*U6kalTf-qdbkE)ZHHd`L%|plW(kJ;+#JfNI%an{})zF_(tqwn>(!O!#`|me6CtS>gR!;Q;V3VUO@I2{y z*em9v>QN(c-6ud$xQ zll1hDu~#ZLH|h5u-%Bk*w{8YCj+-p@9lc|x@umffs;b7$p5~K`u$QwUx^@I=uxI3MR^Kf6i$R#f*JF(FF~>9>ROqy?CQ#jzcT9ATx_*NVqQ^N*<-a~2?~ zEfynlj6yn?e*o3qL1m@j;8508Xc?Fb(qmP%`l#QxLK+k`gg;iK6LQP0f}!C*_xT{y z)Eoy(PP~Lyxpv(i>Cd*52<@v!nUM=cm(kKIf9b~00KI?`T{L1l9Jx_9k(1P z#u7$zJeT))^&1f0(S_bwO&vjB#omDCiC-!xNTHfP4@bsZT=Dh1iSs|D>Lg#v22$tZ z^{kAJC7r&NV5xh{$*NPARoc3;z_N<>Lw!;kfupyTmc&N+F zGUhy;lPARP}%6p1KwWp^~IV}D=pT*_d>27Pxbt#z0`m2=f%=UJSegn-Vn=+ zpmwmrlx_ZlHA5Suw%AVX$Fh3inwrqgN5ogZ#pHr{%r3?K3wJZ$#V#@}v@$RUlzkr6 zW@SlkKc(oqOX4l(j>Wp#bysG7{iOg%7LaD-*Nn(Xs+qFn$u2@|fjCT(9HvtrERY>e zLfE6#d`&pUqFX!Itc`^@WmfEcuXxd&2B+hUU9pPQ%IZSWrzB{OQ+qEd3!qYgEE}t`P>YE7UYJAM3drHY>)rSr?xPg2 zKvgay>{=$HC?gY4z5++?p}jOjEU8xGq{x>gT;T<|no~(j&Qy7JRT{I7zKV(d(d$Wa zSOE=nkETQ~1AKSfV9aPO*tTPhu;5sE69=nb;5Ssaj%f1~ieNIuu=eOrsgK2jq@)?{ zHA>eXkN)5nU-;eMpRbE#%RlvH;M)qTc)B*lU}{}fSvI>Z_r(CZCDerJ76(L z&9eWd%VNiWOUOg<4Pd>lTfh$&Z55wK^hJhbuzXAc8)JemZpfC4Nh+z69;02Qu6z!M z-(dbCwwAOZt zr==ry$Hi;zfk$#fE?T7>%=38e5MI~aZ$*q+TVI^GJ2#Ct$B`(UA6}m`%$FFl~n`2OSQTu_W2F2oR!F@1M(&1vJ z-;4pZ2)*Z$X$YYZzS1w=h4S|P*VO{7}x8?W= zBkWq-hvb7l*08t!AQrB7TBKTRl8TBaL5Z$)`y-Y;)147ru=?yQj^h5H)**JsrSBK= z^3aQ{K##BAojC9@+jp*74f%Fmt)Mhuv`+Nv;DecNJ$@dT2Pyc19!WIMt*qpj2Pc4xh@fxB75* z(CVej<;h52Htn50SP$;ISj}0%@@5S{+`hcB%WsMh{ZXUF8BAcv`|+(`W?Z|?a6yG5 zBej&TBf}%sVLIqnm+ZHb?>`3B4@HGR{j-x%ZW}H%s~%pn#qLnzh6^o)H(OP=IUYJJ zZMS;=$&=D2_e)7h-829E-v^ric)j|!-zfvj@)l;=vb)gN$v>s`L8G^@s+)gGF$w;1 z%LxD`2HfKQc{}~TrLn*Kfb@6{0XYoQdCfGTZ!<0qi@|I8GhO0EgjV3?!AUBnJls-2 zJR?xP^QhtDEz9lY{yQxv!ICuUG1RaFG@jM^wqaZ_tJC2U8rwv7G%n8qWNL7}Xz%1# z_dl#GFAcI@SUYf;1;PRBZp#S0^ob9Wu~+V2BJ6Kd*B`?tGhT7%!;h2C?ZK{k9@vrV zy05V7TL19gw)q+1NkKO%QFBvcHEFp4%e+oFR_zU`J_4uc%Q#19fI-VA*P=~+Nu!a=Q--4zG(l_4%mHPMX&Vk^(w_~8jp{WOEiesc$K*X|5?1N-c zRdS4QU0nySLh^**g}Z@jA+%BKpw(!23JR1oUx5CY6-2y3RL5^7$2i%l27spuzjac% z55E|>z}p@w!mOn1?yr>C+Kc3qtFC)~99%+FThZfdd&}htI z$u5c<=w}}Hgbs;Zj8i_nnr}y!R~nY|tWmJ{(;^R*t4ekLqd$iK6&Oon2irX)6?i=Q zHe&(gF+?RS7VAiUC!DcOrfT%+GF704U5d@lIXe)Mz)d&|ntNsm=^OnJEVGdRJ0jfm0#pV&Npj%>@Rxc@6Qn0DHY$=OE!ph3O2U zy+JjnZFM$xS?d@{&_#N;Y^QL6Z6~ zc|1BOH>4)Q0icCW@HFGxmj^B-rF^{-a6PgxOMuqI?kyd*a&`7iQ(J;Rzv__={GVO` z8KH_mwwZ%ifo=XZmNL^sykGKwzy&V=gnb3EE)pq_<7LDFPaI$1%@a+|5^6{iQG)gT zsVAV(wSsNAXujl*KxT|Y!yfM$gVi5CmC(WqCTeo-KZec@y+0w#`Tlo9+yBugWdWM| z-;RF(xC~HGTY=Fj+P-iGOu;rdxKr$_I9Xj-zn;XRd{#akP z>(ME0wAz6Uc1Loh?f3TDIj&rCV(G5^*yA0tFR%&==3!M93+7>pwtt$3t!Unlg}`b8 zdk4gh)*E#)pb)susx-qY;S2%~2y}$IVJMcWI1t!4Tp(aF3Id@Dc~&AtcGS&{J~GAB ztL+1G+7TDA;RCI=PYbOI$)2b9!SdO)u2NA;cy9;|#5F+cPhGqR(0GiJDTm=1Zhm^U zK%3nUh7*3)56C@Y-Ex6?h3k8_7e>{4R4a4JSG?-lBJ79X4qdw~ZCIKMi!3Dxp>HgB zjcwX&IokocWh!s+DtF@Gdk@4+tY2xn>fK;_zQ?XAHBV^yZpOI%JoQ>_h@fsGWpGLFk6b z8WRrLVXV5P4W}%Y$~s#96e;17u;iE{NPW;4gND~+#<=vF9n8V>-0vCjFzI$KtAOLL z8vvsiU-u`y3b`uRDo$M?9IV8sAr_W`MPvti!O|=rp0Sxu33=GS|H~pGK-(Bc=1L^f)UFvD1>d|!~j7<&a=LY5BLjC z*u^aFAwTDtMdYHEtU6sT9Yg+wLMXePY~=x(Ia^*|HZRI`PPEjNfgC1j(LH)t<@cYf z+`1A0@ht>0Y}M8zRMz$$#S?xo*Z2u9 zF0M)7csv0dZQEnrVsEb{lljf1X<)>-*UUe6peDAwYj(=?RLIJsucg1g0kbSvb{gbN zZ6y%dz}%=DL}xHSECpFII^f@p5r2b2{XcDV{-y#HgR>W1?2Z6Waw~Xt{Awr(gV0*~ zSXB$*e{j5EL_^>U;3-QGXAglR@WyL@hY~SL4Ky)I0{0^NjAy|7%yq+I*Ke&(_^t38z(_MLAlur&=MwQm$lFB>bE7glF4mMm-2k`)l zt^nQCksy&2#A^fY@96E1pu_^z&*)>;_p_WQvgpooVIrunY!w#-MiOj`SV*EN*vJ!sD|$FjD2KlWx6G_=hhZ^AFyxbO8_O)od$! zDoG}c(wkFF*MYnH=bL)@%&Q7q{>52Z;R*`N{uupqMHT_o%aUU^k6DUTfuwJ?!h-J7 z5*^6{7n^08J2kX@xwI7t8-X_iYpS{U{a7|?C;Kdk+;SwTPfDeT1(7P{SG_Wuz zR&4!D!V<3s1S;G$0M)Y+U|TSY{561c8nCi#U79S36{rA_V&+m{A44R;Vppev$KXZ* zRu@Zny_s*-Sblaa%Uy;w@|fz>BJ)cEUOD*$`<>={TjO-Y68C`ocYiW7Z1`dMgDH*OOK~et1>~sfBM-J$9*~XO zXd-FJ-m-r~^>lmSm>*>|PmQ6hojmChB1opmp}A-x{F2is_V9^+QVqzQ+U2zD;B$MW zJ61cY@b<6PTS=Wi7H4lE9e42_b|pnRzNw#}bT6xsvl`W;*umI%AdO4HP<)XL9^c#j z87$kxNYNz16;3LIWoCxOlbH{1a*MEXdRc-+LrooG0}+nR-&Gl<)(H87lD5M7*!Tgd zCM5f@_y`=|LR6$U32ah}r-(}^h62p!+_lgNF%GzAPc??;s8{#JM}Hs6KK*T!9$S(N z`zGz=i^F(*AQx317qnil;QKnl^>y~AKAXI~tGl!EH_Q_=l0g43tenfu)_3fnZVpr= z7fuFRsnHfwb^|Cu1>fYT_LFLV3h=`;We*O=$La#hMo+c&)$aaP-3?15O}pz%!oIzb z9z!vyErSr>OJIJmA4ojMG~-oj5yMf85(37>ee<65`-q9PEl;IMV!|7;s^~h$#}8l7 z%1QHD^0vCz@Jo5PQc0bnr&?en`LxtG+TbzFT6g4aklG8v8vJGs#b2Nh&!r;{LRV~+La!tFQeFqaJkLmW5pv2N1NFgXZAh&7YW3((lngWGZ* zQv-%=m>85miR_y#(QE`u7jG^t@seu)4W=>KlzZv<)^aWpE5-y_S@(uA)TdH`#iS-u z-k2V!oD5wa^$TUKMp|E-M`Bz%D7%W<9wV35ht&ekK-43dK7w!5u_&hUR!2O-=o-doSH_A-PWjvL$Zb(svrCxCeXJ(?qLYXO%UkUfIPIH;?hxE zXF3KU+f}CZ!yZmBH+I%J6zsVwiWF~w@kQP|48}d{jf!JMKcI(auxn=P=EFV>)%qmb zA9b4Crg!|T-^Ng>E*MzlGSKDvqw1SI6c_T_U9`Es?;^+gk--SKH__K9yO7kP8{=^! zDk-`UPrIdk?NrCRLbpR+-J5z+t*h^p>gn=N=Y08M`NCudiNuK4w4t|L7P9Oa>RY<_ z>o&k~0*SoA8c43E1DhejAgk3Gf=@v2qWc^0nv&}v*Wnf6xujqS%<`^ukU=%%9F3YS z{74vW9pZaLu#!oz)^rreUi(hhKvWzBdy>5`<8$x(0}JPDVNsBO5O1G`TL%V=iT>_2 zTNVtB77^iNQ$?5gXTdv4r@qLse!aT^mn!n5wqNS{5u$knJ=t8{^v9Qtse$UL2eyP9 zdUs3P`J|5&Y`dKLjs+hE>hzbsHEO7VpH4$x_T{XtgRSS-`Ec)ySV_^g-#}Nt(P*9$ z;hqV_hn7SNZ}v>r>4y6kG^J2&YrzG7%JA1cd9EYywzOwH}3Vq@@JT2;cYGgD$l)eXPXmk$C>UqAqKKA!0LtZVw= zbLXDfN3-(I_Rs5k4CS4^f~l4r^Z7F=AqfB<8f?Hg1HTPrqXXa?-=;Qvg&IwH>PF2b z&hH4pxU2KHjN%Cm=-rt*v>AB{#_55laD!T+BZ);()rXj`qFA=izusRV-43yv2&2Dk zfLdM>yuRQ!(G!SYA=V1khl|E<+QsR#A5vc-UV<@vl+P7h-LHxVw5RA>P{U^Y7C(6| zV;x|^0`hgt2~c;o&c^0K+2UQUZW4Bk#|zG zMnPQJ);}$sM}}(4tPkHBckN|V4m(}Og%J~+Yhqvb4^|aKMHyrm**{O` zHl`(Q-Y5$mg1`TGVE-<;>_3V<>Q2f|WK2LnS0oCoMhlu)pyXVKUr$~jLBPIZP2y^* zGSbGTuQFTC!%Y*Bt)S+kv1{6eDv5R4j{{I_FRRS#KwdzNa)E^Y=zUboscu90-BYjX z%brJX{5Na8zkxUXOA-8^)_PoY6O!u*XT;_8n(gAMG1R8MO}ZXQd-(Z6fL!jW<&{B4 zMp~@hnoPc(nxwzeSl7Y-l;tc~`cxyW2*maDkSuj=M`mE4+!Y1qQChP<6GwJ->es7K z+b4ShB)KGt)o8N)K9!~mgpCvj24P1d541P+@J^*rMj(<9AOMfJ&;I-aU_HQM7fhAZCC<@r$LVcg7hjuw{Ui9_vb<2km;8>Qu&zU`Z;qLuIl!b zli!Mdjf@PJdW(b!E&}IWhgR3Ba;|B;Tr)6Mu;h_sk)cKty?bWP)lC{|-RHtORbs=u zz3#Q~baW&4_kOo_IG8hI(Ls9XVXtN-(YL8fAm5g0C-}AxdfKL_zRk_G<$A~*C4<2f z>xQywd<=7w8|7#h|2uJmd*CX6U|JmeUPBdb7jK4EO~=u6Tk^Sn-5zCDzW&8^2O^$W zR!Gy~u0fhui~iMC5mf?IGX~;)9TqhF1;RG<9BQ$!T}Cc~F6g zo#$*nEW6HiHjT&$kXSg&`3rp%R0}FePFUz`n|uAC+;3KoqsPV@^r{vvY5$k_(=)O> z{$lK~pm1H+i*FMx+d?cZgM%sJ@GtCq(twL5kxb6nd1>Y56}1f)C7>*0?aIAA{asLZZ3Z8|vb3MKw7&YTC~i*LtmupUMGS8xVUcz#mZ*%@ ze+A5L_<{LpE{4nPN;mbl^@`h)C!bW{HGq4`&B4FY*>=4cm`prQn4Qb3zth-L`-`ndEUta}Cq7ufmfDTzPqwT28$U)8cZAq>(C|^xK0P_gDB3(uvz# zA@gTzf-g0%0(9o{7=U&ReUa1xASCq%2bO5L8bBx)SegVO0l6gvU<7iTF9((B1qYoe zG&Ub@?S79VfnWJLGu5w);I7V#k4qRYa2d+nW;mVj@!D7$@BN5#b0w)wL?^U+8TD1E zh^J7G&+3qy;^FdaHnk<;Dm{phAu-a-33!*)gyeUGxTQ`wN=*hSi(PB$a}=-7c@+osZgD3C|*PIx7uy68GS1szK2rfY;X%d zvQUB4Hq<9gYY?u~?<>2PWYn3`FL6iS6r*mR)mxl)d*pRhLDmb#QUy5{B6idFQ|`49 z35ns6!@G(WY6DZ1nZub1&y6yb*71?YZXROM6fIYoVEo|zF-~lB+Nsx8o>k|+jvaM2 zz>2@f7xd>X(b@IBagI97yPHiFX`wP83wLtZ(_3`{w^=%HGLWr~q@w&pxuR?%T z-bAAH7or2RN7evCpZ~%==3m-Jz$S+O0OWw*TgzkOY(Tv|!Uxg}A2d9TIJbiKJ8(R- zqemMj_lW*TLa%6v&jMK5T;g>0*iA;TJRcZ*(%K6R8&HGPb>SI7-Ia}pWhx7LK;n+a zVrLVK7TRO=meerQTW0?8SbUU@<3V>HjU@n{r&flbb=8!tRE>~j$ z0%qg#RW=m5fzI%bWDROtP^CQ!9*{Q}!8GeX@diAJbtcR(88p;)k>To_^sinDBnv?7 zU|cyi)pi+V+4CHG)@qH|*!thl<3HpBfR+8n{DBRO?HVU-IKl)0mShXidVC2-QH`)O zpI-{^b5)tr-E@Y&Ab0(QvE8;B>gzm~*6g&n{qf42AVu&xhs^ok+RX~?3(^zg$Ewka zHh0(5v>ATPT;}v$t&e;>Ksp}(RZ|h>tTQCtJSNVp8Yb61Ii3d;kS7)GzewZVf9-$k zYy14@)6coM!&wzlAkd_m9gbl9VJ=~k~4 zbS6nLVNMrE<_X2mek{`q`mw#|n4ZDd6l&jiglXffwKxURc}xO2HQO8K5g~vU=X!k- z3f;GaqPzf*oEig1W2&#tKl;XZ0znVDRz{x@Tg)%JyP* zgvFFQ>pU#`VUxw%*Wx$qh~i`MM>d86UH-MKP@mv_=dsv=H4hd)eQou_I6JfC7RD*{S;U`o>=_FIm0fbE&x|@YYAR77Gu+@=H(V5NFk=tWI0*~jfw<&^%(@uW((+*hE&I6R+ z&hMO!RJ(yc4vt>mL(_b4);xt<*7Dzl^x|NFwXT~wk>flx!w@j;0N}`b9J_LhEeyV& z7f5s(kDw)kD}N31X=7PG@^34R5p6O_HiO@WmuWyvL1hhg*Wme=A03KZh=bx%6t?O4}-!J%!d- zgbZBbL9I{Mxb9;*I3XpD10b#sa}d_N(yap+?^f(?I51~mxOO)>ltcu^jP!4+Sy`

bP$)ouVKl-<hkg=*A|+(AK&i;jQM?){C^R35lgoUxyL$UVh%|>_54Vje9ixJ$IVc0o#a# z{e{agZv`|Tqo@xd_2(uNI+GmsHz186*RTd_l>vpGuG(88TG7oo(6F7A{^lrl*!6n= zm>Ii_-gi>DI*JtLm(SUYh*X56fX4Nrk*QYQMvNr$bCAR1cm>&NG(M~tHQ3gGb)(73 z$%35f;!$@XkX#Mld2}ArU|Neulk_ZJT&vF=Fk&szBpN)n=(1v6SdWF$+tzlwHbF85g(d3G)aI2?+3dV!2o545C z%((s>E|y{$I)Jav2TGUblTlFJ*m|FWEw*MFA6lU;0t=;sXWDgMy%UuN^3GJ=GHlp} zCgd$|sJn&Kr!f2~wX4yneAzQfXrHU0oycYXohy?t{#ATkv1hH}O@AIW`L|B9b|aK{%9t$uEF`29&G}JqUq z0AJ`Z$hUu6oOCn$_csz8Ox_JXqwno`%gdJ=NIn!b;D7Ga{NLxf{6i%5S4(vx%Fomq zu(coc5%tRG$wDi2vBsP3z0KAX(eZfJUZ14;BXG8swSX4?)6^#QdhB@QE*pT%AISA5 z4S6_vZsu+*qaFyN4BgSMA7gQ-n6%lQCB&O!#lpN!A& zMZP68f)UYGR$VTQ2wN;7+OL@|wAnQ~rUHA@LnBy?T0O3|s40CAr8jB+k1*AC-}-A)5QTE9_AT z2$sA`J1ozjCFHZlvS+t(IusQI!@-*S!&1LS%iO?sjq&U;nJr$k8GEY;W1}qjIR@p_2P&gBw(u-SI)@35C57_bz1XqX~k%rJ+uSK%7Zk|nqm87PuG{58+ zG}I7T@TjV&;o?K~!*hwwnlG4LW{gj<7_`5$HP1)rNKUgg!Zx7zb|wL(XxE^9Qb8uP zubXIi1IyUel&dh8EP+#VH%9bO+K72a#BYfMIpZZH!+T%V7tO(r9xZ1Or;C8eO$8Zc zWi?XeM$#l(6D;YW61Z%SN<4cf0M7?*4OhU%$E~<2^$N;3X|HHZOF0-F>GEza`)Ava zZ{O?612(_z{^atGPYnvB2k7~&)LMrYKJX^nf&y-%F>0-F5H^_!b?yWcGYS@Zof;ZJ zU094zn-iVoo;i2nVmfIba&#(nLJ1zX(MP1>#8LIBbhyo`;0;%ysUR)I^Jcns9L7e& zXhzgDrXEswNm;J5qcDhQ7+VgTgkkvh5J+IEn5VT*ym4&(Q|+N%7ZRKjeg?#MebYs*54e%12W0gZnS|pFmgz4L%a+KEEmQ8?sq-R*PpZll zPMtqcXi`i5cpXbVO=}~VxO#4a7De!u^lkAP1w(W%+&@Y^`B!n-55UFw%YmkE3B z-HRezk}!}D>7qTGY@iY&g?|`a@a2K9@n2ye}8Yl^{i@(r}b`d5p&^|6_aEWhjd^E zT>@ko`LqpKU(BiD=n<0(VM-k zep6mR+0lsd*Xy6*!bc!v4+mQTrBrDh6Tqvr_`(Ov-w-Wjdl2OonrdiBZd+|0X0~v| z|5vMmx@t5>Hzaf)xFT;_0Xncy#bPem@(0qB$9E3*O~&zYBaH3fuGs?KBni7Gc=z%m z0}w&y0xT;jp2l2)uDe`5;;zmtXFh>zeo|XiWmbqu2d_sA{4ds}IV4&_II;h2J9FaX zdganCgYS;<>o{7|Q(LS1Z&pIGVz@f;sbpDxOLWC(9{92kH}mY!xIT{HQ4GUJ*0hKb z!C}yQ85Ekl(kRgEK$|?lGfFBLcvwzbf4m`4Yo)HYEz)zi07Z6U%b{~xZ-+~a)!?nI zo4!18?|P%RB6<7`MRHT+4rD&jLi%l|Eyy-s3v$&H<8)&hR06SRUR% zxCEBLK59C3F4JBMVJ*nRHUjAHlOg%XSO!wx9<~HKFC&}6hv1e^__CjvHvRCe#!$H@ zDGxYfNoQQseP+%3k>lYJp~H+%eN3k|c|wW(sKL*Xi;qP(5OM>0;Tjy`q69r!mm zVt<`5{w;zw`~APqJoxh`n*FjWjktHu)*jijAg#EI?C$2bFS%W>qSy?q!-QU;Os7 z>2fk4U$leIXr=)U@!qJ10G`8JHR`?~UzQ-%!$gWUj*x%J>r;fpSXGlDM*XN=}W0s+&#J1%2Ytm7lMfF6_ClQlup zcfsrc9>gm;TCEA#s?=ouR}^Xzs}@S}a*<%vHmK$ca7Gh^@GPdC)&kq0=oFjn!w#Zr z$Y~Ai;UCaL9YX4S5F@6;;~AeLaoGZT6){*ID^fA^4j!xuk`x)RFyJR_s$pQ1X8vuT z@kEUegE0N$g!2=XS^!LJm$z&J3?rt^ajBWZbTRyu6c0>|C6~d?G^lC^(_3wf!e$3K zg=K!teISEjbd3=Ns^5!RQ$#)5G8JZ@quJI=+#gvSwnb*npv-LY(~XnT3#?kz_#oma zQ;l_Um;(Oofoz`Vu^xs8np1(UbQVXe&jBlS2aGRd&c}eA&tCBFGAA;3;Zq0;O8o+x zAycvdKZ~B*%-hIA57;i@x<77`-^)BC?%v|Hrwbf+OlxfBpDmhhV&Zk3-YsF!bOk?} zkVU;UM!hY)z*|9c^D@I|rSvm_;1;KY(3nJ38~+>kiG|w}%jj+bon>{eDmHq~h$SAi zR7fw4eb-FIpJ4MNX$O!e29wK*J1PVv5xcRZw|>2GBeg?|C!4%|L{YJ!cA7xav0Q&J z15bEH32W?$9jfSdQM*SCX;NB95~iN9=F)E+X zKkaNuW=nwGIcQNV(8W7|5TNLD1JYYlU=?7i#>-NGxMa0FvFn=ns%6rsst-*sqL8%-j^ zxoD~^H=`E11R0KL_bQq^ywRPgq>?=<-c&iAsh}lC10(l@!E~83{{%?OkeUNI7NoRn1$~N2?%J1inFEAe zQy0KSG6!)MMlX;sHfIQ|zRIZ&vrh@#K||&j0QtmRrQcowl^a$85y<>0Hce|9cTx)|WQa4lhcyN1Z@qt1gO$U}Sm`Wgd)?fQDet(A&@4XO#8 zRBMlIMyT76=&Yk?U9qCmNNe5a<<8jFPb8rp9hQVIxu|6((HHgKAsUbM)b9|4L}jxF zk6T#XH`oex_&Fn3ADW=3Z)I6AB@j38;(J^K=st9vJPWK^-g$v#+-RopVx2cq_|cRE zf#+cvKV9dbWegN!O{g87K%Zc&MyZ*^)ulrWsLKlsX#MzTK(!(RZ#*RNJ9w}d_caf+ zAW0#NQ`|28(HViD6%ftIH_cPtJyl7|bOFF+qv0<*6n@*EKSUZd-Lcae8pBi_m5=budP}JKS`#e%2JrSKZYTjwgu;&Jw{KQiCOl(DX{BOVYPK_f zhK!)doy^6$Rw_48NJy7_3L<=Bbhr?#Ovy@~Ad8KJA*nN&n-}3ZUeE)Sx&z@SfoWer zKf_OXwu@c^oLdN{m<8VAcz@xN{-eGpp7%6+ID>W*+39?dZ|Wi)$_10b3%^9qJHYJK zEuH?65rdG0z;6Cz1P>TZGwpQup}7nrL?EMPYRwdPiTk~Z70>pz6gnWToGh2;oyi93 zuQ*kh#T&|y@!|db&Rd{;TEkL|?}LypxUkZ^R`qCOlugb8cMFyh`gnwC89nEi#0m8) zyg~y~libe{+Nfl8^hXizhOgW7vm@jJMJTMXF#2AUQeB<_bN{5*_it2&f2kVb55SoC Q^X~MwrP_!44SGNLAI+6Mi~s-t literal 0 HcmV?d00001 From d9d8fdf9534d923325d65d98f1d1769cf277f388 Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:46:35 +1000 Subject: [PATCH 19/20] read me update --- recognition/vq-vae_s47036219/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index bb8932db6..1257da2a6 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -35,6 +35,9 @@ The ADNI (Alzheimer’s Disease Neuroimaging Initiative) dataset is a collection ## Details on the implementation + +The goal of this project was to: "Ceate a generative model of the ADNI brain dataset using a VQVA that has a “reasonably clear image” and a Structured Similarity (SSIM) of over 0.6" + This implementation was relatively standard for this model. There exist other extensions that could be of a great use in this case, using a gan or other generative models in combination creates a powerful method to improve upon my implementation - but this is left forr other students with more time. # Usage: From 526bcf702fc44fb8bfd3002f37043ddbcb255b7e Mon Sep 17 00:00:00 2001 From: ConnorArmstrong Date: Tue, 24 Oct 2023 23:50:16 +1000 Subject: [PATCH 20/20] typo fix in readme --- recognition/vq-vae_s47036219/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recognition/vq-vae_s47036219/README.md b/recognition/vq-vae_s47036219/README.md index 1257da2a6..cfd8b9897 100644 --- a/recognition/vq-vae_s47036219/README.md +++ b/recognition/vq-vae_s47036219/README.md @@ -6,7 +6,7 @@ # Project: ## The Vector Quantized Variational Autoencoder -The goal of this task was to implement a Vector Quantized Variational Autoencoder (henceforth referred to as a VQ-VAE). The VQ-VAE is an extension of a typical variational autoencoder that handles discrete latent representation learning - which is where the model learns to represent data in a form where the latent variables take on distinct discrete values, rather than a continuous range. This is done by the model passing the encoders output through a vector quantisation layer, mapping the continuous encodings to the closest vector in the embedding spacve. This makes the VQ-VAE very effective at managing discrete structured data and image reconstruction/generation. +The goal of this task was to implement a Vector Quantized Variational Autoencoder (henceforth referred to as a VQ-VAE). The VQ-VAE is an extension of a typical variational autoencoder that handles discrete latent representation learning - which is where the model learns to represent data in a form where the latent variables take on distinct discrete values, rather than a continuous range. This is done by the model passing the encoders output through a vector quantisation layer, mapping the continuous encodings to the closest vector in the embedding space. This makes the VQ-VAE very effective at managing discrete structured data and image reconstruction/generation. ## VQ-VAE Architecture