diff --git a/recognition/README.md b/recognition/README.md new file mode 100644 index 000000000..5c646231c --- /dev/null +++ b/recognition/README.md @@ -0,0 +1,10 @@ +# Recognition Tasks +Various recognition tasks solved in deep learning frameworks. + +Tasks may include: +* Image Segmentation +* Object detection +* Graph node classification +* Image super resolution +* Disease classification +* Generative modelling with StyleGAN and Stable Diffusion diff --git a/recognition/vision-transformer-4696689/README.md b/recognition/vision-transformer-4696689/README.md new file mode 100644 index 000000000..e836a8439 --- /dev/null +++ b/recognition/vision-transformer-4696689/README.md @@ -0,0 +1,79 @@ +# ADNI brain data classification with Vision Transformer + +## Summary + +Goal of the project is to classify Alzheimer's disease (normal or AD) of the ADNI +brain data using a Vision Transformer. Each sample consists of 20 slices of 240x256 +greyscale image corresponding to a patient, which is to be classified as either NC +or AD. Experiments were also done with data augmentation. + +## How to use + +There is four files, dataset.py, modules.py, train.py, predict.py. The only files which +need to be run are train.py or predict.py. train.py is responsible for training (and +testing) the module, with the option of saving the model as well as the loss and +validation accuracy of each epoch, for use in predict.py. predict.py is able to load +this data and retest the model on any of the dataloaders (train, validation, test) or +graph the loss/accuracy curves with matplotlib. + +Key point: Inside the dataset.py file, there is a directory address for the images +(local). Make sure that these are pointing in the right direction. + +Key point: The save model section of the train.py file is commented. Make sure to +uncomment to use this functionality + +Key point: The test section of the predict.py file is commented. Make sure to uncomment +to use this functionality. + +Key point: Since the dataset is so large, training might need to be done on 4x p100 gpus +(rangpur). + +## Architecture + +The default Vision Transformer upgraded to include a pre-convolutional module, of +which there is two designs. The convolutional layers result in less, smaller patches +so the model is sped up. It is also supposed to introduced inductive bias into the +model. 3D patches are utilised offering massive boosts to speed. Data augmentation is +done by flipping images to result in 4x as much data which is said to be very important +for transformer models. + +![Basic Transformer Model](extra/ViT.png) + +The standard vision transformer works by inputting embeddings of patches of images, along +with a positional encoding, into a transformer model. Only the encoder is used, and +cross entropy loss is used for the classification. Switching the order of normalisation +allows for better propagation of gradient and training stability. If using this patch based +model it is important to use 3D patches for both speed and performance. The later design +used a CNN to instead reduce the image into channels (similar sized to patches) which are +inputted. This further improves speed without impacting performance. + +## Training + +Training is done for 100 epochs which was found experimentally to be long enough. +AdamW optimiser is used with a learning rate of 3e-4, this was decreased from 1e-3 +(which did not train well) but also increased from 1e-4. The data is split into train, +validation and test sets. Majority of the data is in train set, and the validation and +test sets are of equal size. + +Hyperparameter tuning was done manually. Learning rate schedulers eg. cyclic, warm up +were found to be ineffective. A learning rate of 1e-3 didn't permit training, but 1e-4 +was too slow and didn't perform as good as the final 3e-4. The 20 slices for each image +correspond to the patient-level split. + +## Result + +Overall, the test accuracy was 68.0% which is ok. The test accuracy was +the same as the validation accuracy, the latter of which became stable during training. +This was about the same time the loss had rapidly decreased and became stable also. +This could indicate that the model has adapated very well to the training set and is +not generalising. This was the key motivator for data augmentation. However, it could +also indicate that the learning rate is too small and stuck in a local optima. This +is the key motivator for increasing the learnign rate from 1e-4 to 3e-4. + +![Trianing accuracy and epoch](extra/train.png) +![Validation accuracy and epoch](extra/acc.png) +![Training Loss and epoch](extra/loss.png) + +## References + +Dosovitskiy, A. (2021) An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale, Papers with code. Available at: https://paperswithcode.com/paper/an-image-is-worth-16x16-words-transformers-1 (Accessed: 18 November 2023). \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/dataset.py b/recognition/vision-transformer-4696689/dataset.py new file mode 100644 index 000000000..d2a3ea4e5 --- /dev/null +++ b/recognition/vision-transformer-4696689/dataset.py @@ -0,0 +1,126 @@ +""" +Imports Here +""" +"""numpy and torch""" +import numpy as np +import torch + +"""PIL""" +from PIL import Image + +"""torchvision and utils""" +import torchvision.transforms as transforms +from torch.utils.data import DataLoader, Dataset + +"""os""" +import os + +""" +Loading data from local file +""" +"""Assumes images have pixel values in range [0,255]""" +def getImages(trainDIRs, testDIRS): + """Get image to tensor""" + transform = transforms.Compose([ + transforms.PILToTensor() + ]) + hflip = transforms.Compose([ + transforms.RandomHorizontalFlip(p=1.0), + transforms.PILToTensor() + ]) + vflip = transforms.Compose([ + transforms.RandomVerticalFlip(p=1.0), + transforms.PILToTensor() + ]) + dflip = transforms.Compose([ + transforms.RandomHorizontalFlip(p=1.0), + transforms.RandomVerticalFlip(p=1.0), + transforms.PILToTensor() + ]) + tlist = [transform, hflip, vflip, dflip] + """Loading data into arrays""" + xtrain, xtrain, xtest, ytest = [], [], [], [] + """training data""" + size = [0, 0] + for i, DIR in enumerate(trainDIRs): + for t in tlist: + px = [] + j = 0 + for filename in sorted(os.listdir(DIR)): + f = os.path.join(DIR, filename) + img = Image.open(f) + tensor = t(img).float() + tensor.require_grad = True + px.append(tensor/255) + j = (j+1) % 20 + if j == 0: + xtrain.append(torch.stack(px)) + px = [] + size[i] += 1 + xtrain = torch.stack(xtrain) + ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0)) + + """testing data""" + size = [0, 0] + for i, DIR in enumerate(testDIRs): + for t in tlist: + px = [] + j = 0 + for filename in sorted(os.listdir(DIR)): + f = os.path.join(DIR, filename) + img = Image.open(f) + tensor = t(img).float() + tensor.require_grad = True + px.append(tensor/255) + j = (j+1) % 20 + if j == 0: + xtest.append(torch.stack(px)) + px = [] + size[i] += 1 + xtest = torch.stack(xtest) + idx = torch.randperm(xtest.size(0)) + xtest = xtest[idx, :] + splitsize = int(xtest.shape[0]/2) + xval, xtest = xtest.split(splitsize, dim=0) + ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0)) + ytest = ytest[idx] + yval, ytest = ytest.split(splitsize, dim=0) + return xtrain, ytrain, xtest, ytest, xval, yval +""" +Dataloader +""" +class DatasetWrapper(Dataset): + def __init__(self, X, y=None): + self.X, self.y = X, y + + def __len__(self): + return len(self.X) + + def __getitem__(self, idx): + if self.y is None: + return self.X[idx] + else: + return self.X[idx], self.y[idx] + +trainDIRs = ['AD_NC/train/AD/', 'AD_NC/train/NC'] +testDIRs = ['AD_NC/test/AD/', 'AD_NC/test/NC'] +xtrain, ytrain, xtest, ytest, xval, yval = getImages(trainDIRs, testDIRs) +ytrain, ytest = ytrain.type(torch.LongTensor), ytest.type(torch.LongTensor) +xtrain = xtrain.permute(0, 2, 1, 3, 4) +xtest = xtest.permute(0, 2, 1, 3, 4) +xval = xval.permute(0, 2, 1, 3, 4) + +def trainloader(batchsize=16): + return DataLoader(DatasetWrapper(xtrain, ytrain), batch_size=batchsize, shuffle=True, pin_memory=True) + +def valloader(): + return DataLoader(DatasetWrapper(xval, yval), batch_size=1, shuffle=True, pin_memory=True) + +def testloader(): + return DataLoader(DatasetWrapper(xtest, ytest), batch_size=1, shuffle=True, pin_memory=True) + +def trainshape(): + return xtrain.shape + +def testshape(): + return xtest.shape \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/extra/ViT.png b/recognition/vision-transformer-4696689/extra/ViT.png new file mode 100644 index 000000000..4ddbcd305 Binary files /dev/null and b/recognition/vision-transformer-4696689/extra/ViT.png differ diff --git a/recognition/vision-transformer-4696689/extra/acc.png b/recognition/vision-transformer-4696689/extra/acc.png new file mode 100644 index 000000000..0375c2626 Binary files /dev/null and b/recognition/vision-transformer-4696689/extra/acc.png differ diff --git a/recognition/vision-transformer-4696689/extra/conv-block.py b/recognition/vision-transformer-4696689/extra/conv-block.py new file mode 100644 index 000000000..7e27374ee --- /dev/null +++ b/recognition/vision-transformer-4696689/extra/conv-block.py @@ -0,0 +1,37 @@ +""" +Conv v2 +""" +class ConvLayer2(nn.Module): + def __init__(self): + super().__init__() + #pool + self.pool = nn.MaxPool2d(kernel_size=3, stride=2) + self.relu = nn.ReLU() + #first layer + self.conv11_x = nn.Conv2d(20, 48, kernel_size=(11,11), stride=(4,4), padding=(0,0)) + self.conv11_y = nn.Conv2d(240, 48, kernel_size=(11,3), stride=(4,1), padding=(0,0)) + self.conv11_z = nn.Conv2d(256, 48, kernel_size=(3,11), stride=(1,4), padding=(0,0)) + #second layer + self.conv5_x = nn.Conv2d(48, 192, kernel_size=(5,5), stride=(2,2), padding=(0,0)) + self.conv5_y = nn.Conv2d(48, 192, kernel_size=(5,3), stride=(2,1), padding=(0,0)) + self.conv5_z = nn.Conv2d(48, 192, kernel_size=(3,5), stride=(1,2), padding=(0,0)) + #projection + self.l_x = nn.Linear(30, 32) + self.l_y = nn.Linear(12, 32) + self.l_z = nn.Linear(10, 32) + + def forward(self, imgs): + #input N, C, L, W, H + #first layer + x_x = self.relu(self.pool(self.conv11_x(imgs.flatten(1,2)))) + x_y = self.relu(self.pool(self.conv11_y(imgs.permute(0,1,3,4,2).flatten(1,2)))) + x_z = self.relu(self.pool(self.conv11_z(imgs.permute(0,1,4,2,3).flatten(1,2)))) + #second layer + x_x = self.relu(self.pool(self.conv5_x(x_x))) + x_y = self.relu(self.pool(self.conv5_y(x_y))) + x_z = self.relu(self.pool(self.conv5_z(x_z))) + #projection + x_x = self.l_x(x_x.flatten(2,3)) + x_y = self.l_y(x_y.flatten(2,3)) + x_z = self.l_z(x_z.flatten(2,3)) + return torch.cat([x_x, x_y, x_z], dim=2) \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/extra/loss.png b/recognition/vision-transformer-4696689/extra/loss.png new file mode 100644 index 000000000..f1d8fbdc0 Binary files /dev/null and b/recognition/vision-transformer-4696689/extra/loss.png differ diff --git a/recognition/vision-transformer-4696689/extra/parameters.txt b/recognition/vision-transformer-4696689/extra/parameters.txt new file mode 100644 index 000000000..2f7b1c0cf --- /dev/null +++ b/recognition/vision-transformer-4696689/extra/parameters.txt @@ -0,0 +1,20 @@ +AdamW lr=1e-4, 175 epochs, 192, 120, heads=4, embed=360, fflscale=2, nblocks=4 +LOSS = [0.72875, 0.70531, 0.66767, 0.61233, 0.53435, 0.49842, 0.43119, 0.45669, 0.38625, 0.35263, 0.36537, 0.32514, 0.26318, 0.2506, 0.24311, 0.18782, 0.17435, 0.13011, 0.14882, 0.17382, 0.10999, 0.13796, 0.07506, 0.06944, 0.06198, 0.03524, 0.07395, 0.09999, 0.04692, 0.03988, 0.0566, 0.02929, 0.01366, 0.01277, 0.01246, 0.01824, 0.04371, 0.0791, 0.04064, 0.04082, 0.01846, 0.00784, 0.00725, 0.00714, 0.0071, 0.00703, 0.00697, 0.00684, 0.00686, 0.00677, 0.00665, 0.00629, 0.00595, 0.01606, 0.11788, 0.21843, 0.02893, 0.01473, 0.04044, 0.02642, 0.02621, 0.00663, 0.00604, 0.00071, 0.00035, 0.00026, 0.00022, 0.0002, 0.00018, 0.00016, 0.00015, 0.00014, 0.00013, 0.00012, 0.00011, 0.0001, 0.0001, 9e-05, 8e-05, 8e-05, 7e-05, 7e-05, 7e-05, 6e-05, 6e-05, 6e-05, 5e-05, 5e-05, 5e-05, 5e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] +ACC = [50.67, 51.11, 58.67, 63.11, 57.78, 62.67, 63.56, 66.22, 66.22, 67.11, 66.67, 65.78, 67.56, 65.33, 68.0, 68.44, 67.11, 64.89, 64.89, 67.56, 68.0, 69.33, 67.11, 67.56, 68.0, 67.56, 66.22, 71.11, 69.33, 67.11, 66.67, 69.78, 69.33, 69.78, 69.78, 68.0, 66.67, 68.89, 69.78, 69.78, 68.44, 67.56, 67.11, 67.56, 67.56, 67.56, 68.0, 68.0, 68.0, 68.0, 68.0, 67.56, 67.56, 68.0, 66.22, 70.67, 67.56, 66.67, 68.89, 65.33, 66.67, 70.22, 68.0, 69.78, 68.89, 68.0, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44] + +to plot: +import matplotlib.pyplot as plt +steps = range(175) +plt.plot(steps, LOSS) +plt.ylabel('LOSS') +plt.xlabel('epoch') +plt.show() +plt.plot(steps, ACC) +plt.ylabel('ACCURACY') +plt.xlabel('epoch') +plt.show() + +cuda +training time: 27699.315416812897 +test acc: tensor(0.6800) +TIME = [147.563, 146.343, 144.501, 147.546, 144.388, 143.652, 146.672, 144.336, 145.402, 146.032, 144.47, 144.527, 145.94, 145.326, 144.034, 145.458, 146.047, 143.858, 146.212, 144.663, 144.781, 146.169, 143.851, 146.982, 143.694, 145.329, 145.16, 146.066, 144.08, 145.364, 145.876, 143.906, 145.965, 144.99, 144.381, 147.893, 146.199, 144.357, 145.847, 144.55, 144.047, 145.702, 144.852, 143.926, 145.867, 144.55, 144.213, 146.131, 144.313, 144.568, 145.913, 144.292, 147.893, 147.291, 148.067, 148.66, 149.459, 148.164, 148.963, 149.543, 144.27, 145.208, 145.364, 143.899, 146.17, 143.49, 146.005, 144.319, 144.524, 145.954, 143.908, 145.923, 149.609, 148.143, 149.126, 147.25, 143.868, 145.934, 144.889, 144.385, 146.232, 144.071, 145.286, 145.871, 143.787, 145.719, 148.777, 147.816, 149.28, 148.8, 148.009, 149.313, 149.438, 147.923, 148.943, 149.355, 148.399, 148.242, 149.209, 149.388, 148.377, 148.594, 149.603, 148.353, 148.588, 149.617, 148.425, 148.436, 149.528, 148.536, 148.31, 149.578, 148.509, 148.387, 149.569, 148.542, 148.188, 149.53, 148.641, 148.101, 149.468, 148.894, 148.149, 148.935, 149.422, 148.588, 148.187, 149.229, 149.147, 149.19, 148.44, 148.16, 149.419, 148.88, 148.568, 148.514, 148.583, 148.594, 148.789, 148.996, 149.07, 149.142, 148.768, 148.309, 148.454, 148.685, 149.076, 149.272, 148.759, 148.253, 148.44, 149.121, 149.245, 148.525, 148.261, 148.695, 149.247, 149.253, 148.579, 148.307, 149.357, 147.468, 148.775, 147.945, 149.511, 148.644, 148.232, 149.552, 148.53, 148.147, 149.467, 148.824, 148.064, 149.387, 149.3] \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/extra/train.png b/recognition/vision-transformer-4696689/extra/train.png new file mode 100644 index 000000000..44ae174f4 Binary files /dev/null and b/recognition/vision-transformer-4696689/extra/train.png differ diff --git a/recognition/vision-transformer-4696689/modules.py b/recognition/vision-transformer-4696689/modules.py new file mode 100644 index 000000000..07fabd881 --- /dev/null +++ b/recognition/vision-transformer-4696689/modules.py @@ -0,0 +1,106 @@ +""" +Imports Here +""" +import numpy as np +import torch +import torch.nn as nn + +class Attention(nn.Module): + def __init__(self, heads, embed): + super().__init__() + self.heads = heads + self.attn = nn.MultiheadAttention(embed, heads, batch_first=True) + self.Q = nn.Linear(embed, embed, bias=False) + self.K = nn.Linear(embed, embed, bias=False) + self.V = nn.Linear(embed, embed, bias=False) + + def forward(self, x): + Q = self.Q(x) + K = self.K(x) + V = self.V(x) + attnout, attnweights = self.attn(Q, K, V) + return attnout + +class TransBlock(nn.Module): + def __init__(self, heads, embed, fflsize): + super().__init__() + self.fnorm = nn.LayerNorm(embed) + self.snorm = nn.LayerNorm(embed) + self.attn = Attention(heads, embed) + self.ffl = nn.Sequential( + nn.Linear(embed, fflsize), + nn.GELU(), + nn.Linear(fflsize, embed) + ) + + def forward(self, x): + """ + Switching to pre-MHA LayerNorm is supposed to give better performance, + this is used in other models such as LLMs like GPT. Gradients are meant + to be stabilised. This is different to the original ViT paper. + """ + x = x + self.attn(self.fnorm(x)) + x = x + self.ffl(self.snorm(x)) + return x +""" +Convolution pre +""" +class ConvLayer(nn.Module): + def __init__(self): + super().__init__() + self.pool = nn.MaxPool3d(kernel_size=3, stride=2) + self.relu = nn.ReLU() + self.conv11 = nn.Conv3d(1, 48, kernel_size=(3,11,11), stride=(1,4,4), padding=(1,0,0)) + self.conv5 = nn.Conv3d(48, 192, kernel_size=(3,5,5), stride=(1,2,2), padding=(1,0,0)) + + def forward(self, imgs): + x = self.conv11(imgs) + x = self.relu(self.pool(x)) + x = self.conv5(x) + x = self.relu(self.pool(x)) + return x +""" +Vision Transformer Class to create a vision transformer model +""" +class VisionTransformer(nn.Module): + def __init__(self, classes=2, inputsize=(1,1,1), heads=2, embed=64, fflscale=2, nblocks=1): + super().__init__() + (self.N, self.Np, self.P) = inputsize + """components""" + self.proj = nn.Linear(self.P, embed) + self.clstoken = nn.Parameter(torch.randn(1, 1, embed)) + self.posembed = self.embedding(self.Np+1, embed) + self.transformer = nn.Sequential( + *((TransBlock(heads, embed, int(fflscale*embed)),)*nblocks) + ) + self.classifier = nn.Sequential( + nn.LayerNorm(embed), + nn.Linear(embed, classes) + ) + """convolutional components""" + self.conv = ConvLayer() + + def embedding(self, npatches, embed, freq=10000): #10000 is described in ViT paper + posembed = torch.zeros(npatches, embed) + for i in range(npatches): + for j in range(embed): + if j % 2 == 0: + posembed[i][j] = np.sin(i/(freq**(j/embed))) + else: + posembed[i][j] = np.cos(i/(freq**((j-1)/embed))) + return posembed + + def forward(self, imgs): #assume size checking done by createPatches + """Convolutional layer""" + imgs = self.conv(imgs) + imgs = imgs.flatten(2,4) + """Linear Projection and Positional Embedding""" + tokens = self.proj(imgs) #perform linear projection + clstoken = self.clstoken.repeat(imgs.shape[0], 1, 1) + tokens = torch.cat([clstoken, tokens], dim=1) #concat the class token + x = tokens + self.posembed.repeat(imgs.shape[0], 1, 1) #add positional encoding + """Transformer""" + x = self.transformer(x) + """Classification""" + y = x[:,0] + return self.classifier(y) \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataloader_torch-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataloader_torch-checkpoint.ipynb new file mode 100644 index 000000000..b4222436e --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataloader_torch-checkpoint.ipynb @@ -0,0 +1,146 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "id": "7f66ae1f", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os\n", + "\n", + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"\n", + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + "\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " idx = torch.randperm(xtest.size(0))\n", + " xtest = xtest[idx, :]\n", + " splitsize = int(xtest.shape[0]/2)\n", + " xval, xtest = xtest.split(splitsize, dim=0)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " ytest = ytest[idx]\n", + " yval, ytest = ytest.split(splitsize, dim=0)\n", + " return xtrain, ytrain, xtest, ytest, xval, yval\n", + "\n", + "\"\"\"\n", + "Dataloader\n", + "\"\"\"\n", + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]\n", + "\n", + "trainDIRs = ['AD_NC/train/AD/', 'AD_NC/train/NC']\n", + "testDIRs = ['AD_NC/test/AD/', 'AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest, xval, yval = getImages(trainDIRs, testDIRs)\n", + "ytrain, ytest = ytrain.type(torch.LongTensor), ytest.type(torch.LongTensor)\n", + "xtrain = xtrain.permute(0, 2, 1, 3, 4)\n", + "xtest = xtest.permute(0, 2, 1, 3, 4)\n", + "xval = xval.permute(0, 2, 1, 3, 4)\n", + "\n", + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batch_size=batchsize, shuffle=True, pin_memory=True)\n", + "\n", + "def valloader():\n", + " return DataLoader(DatasetWrapper(xval, yval), batch_size=1, shuffle=True, pin_memory=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batch_size=1, shuffle=True, pin_memory=True)\n", + "\n", + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset-checkpoint.ipynb new file mode 100644 index 000000000..024c99e75 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset-checkpoint.ipynb @@ -0,0 +1,277 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 10, + "id": "338da719", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " return xtrain, ytrain, xtest, ytest" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "a3c45c1a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 20, 1, 240, 256])\n", + "torch.Size([9000, 1, 1, 240, 256])\n" + ] + } + ], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "292100c2", + "metadata": {}, + "outputs": [], + "source": [ + "def createPatches(imgs, patchsize):\n", + " (N, M, C, W, H) = imgs.shape\n", + " (wsize, hsize) = patchsize\n", + " \"\"\"check for errors with sizing\"\"\"\n", + " if (W % wsize != 0) or (H % hsize != 0):\n", + " raise Exception(\"patchsize is not appropriate\")\n", + " if (C != C) or (H != H):\n", + " raise Exception(\"given sizes do not match\")\n", + " size = (N, M, C, W // wsize, wsize, H // hsize, hsize)\n", + " perm = (0, 1, 3, 5, 2, 4, 6) #bring col, row index of patch to front\n", + " flat = (2, 3) #flatten (col, row) index into col*row entry index for patches\n", + " imgs = imgs.reshape(size).permute(perm).flatten(*flat)\n", + " return imgs #in format Nimgs, Npatches, C, Wpatch, Hpatch\n", + " \n", + "def flattenPatches(imgs): #takes input (N, M, Npatches, C, W, H) returns (N, M*Npatches, C*W*H)\n", + " return imgs.flatten(3, 5).flatten(1, 2)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "ea41eef5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 20, 1, 240, 256])\n", + "torch.Size([9000, 1, 1, 240, 256])\n" + ] + } + ], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "1f077f43", + "metadata": {}, + "outputs": [], + "source": [ + "xtrain = flattenPatches(createPatches(xtrain, (16,16)))\n", + "xtest = flattenPatches(createPatches(xtest, (16,16)))" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset3d-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset3d-checkpoint.ipynb new file mode 100644 index 000000000..1cf393de9 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/dataset3d-checkpoint.ipynb @@ -0,0 +1,339 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "338da719", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " return xtrain, ytrain, xtest, ytest" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a3c45c1a", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "292100c2", + "metadata": {}, + "outputs": [], + "source": [ + "def createPatches(imgs, patchsize):\n", + " (N, M, C, W, H) = imgs.shape\n", + " (wsize, hsize) = patchsize\n", + " \"\"\"check for errors with sizing\"\"\"\n", + " if (W % wsize != 0) or (H % hsize != 0):\n", + " raise Exception(\"patchsize is not appropriate\")\n", + " if (C != C) or (H != H):\n", + " raise Exception(\"given sizes do not match\")\n", + " size = (N, M, C, W // wsize, wsize, H // hsize, hsize)\n", + " perm = (0, 1, 3, 5, 2, 4, 6) #bring col, row index of patch to front\n", + " flat = (2, 3) #flatten (col, row) index into col*row entry index for patches\n", + " imgs = imgs.reshape(size).permute(perm).flatten(*flat)\n", + " return imgs #in format Nimgs, Npatches, C, Wpatch, Hpatch\n", + " \n", + "def flattenPatches(imgs): #takes input (N, M, Npatches, C, W, H) returns (N, M*Npatches, C*W*H)\n", + " return imgs.flatten(3, 5).flatten(1, 2)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ea41eef5", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "1f077f43", + "metadata": {}, + "outputs": [], + "source": [ + "xtrain = flattenPatches(createPatches(xtrain, (16,16)))\n", + "xtest = flattenPatches(createPatches(xtest, (16,16)))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "8979dcd1", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "00d6d9fb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 4800, 256])\n" + ] + }, + { + "ename": "RuntimeError", + "evalue": "Given groups=1, weight of size [8, 1, 3, 3], expected input[1, 1076, 4800, 256] to have 1 channels, but got 1076 channels instead", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", + "Input \u001b[0;32mIn [17]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconv(x)\u001b[38;5;241m.\u001b[39mshape)\n\u001b[1;32m 14\u001b[0m imgsize \u001b[38;5;241m=\u001b[39m xtrain\u001b[38;5;241m.\u001b[39mshape\n\u001b[0;32m---> 15\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mTest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimgsize\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mxtrain\u001b[49m\u001b[43m)\u001b[49m\n", + "Input \u001b[0;32mIn [17]\u001b[0m, in \u001b[0;36mTest.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28mprint\u001b[39m(x\u001b[38;5;241m.\u001b[39mshape)\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mshape)\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/container.py:217\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m 216\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 217\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py:463\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 462\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 463\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/conv.py:459\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m 455\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 456\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(F\u001b[38;5;241m.\u001b[39mpad(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode),\n\u001b[1;32m 457\u001b[0m weight, bias, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride,\n\u001b[1;32m 458\u001b[0m _pair(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdilation, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups)\n\u001b[0;32m--> 459\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 460\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[0;31mRuntimeError\u001b[0m: Given groups=1, weight of size [8, 1, 3, 3], expected input[1, 1076, 4800, 256] to have 1 channels, but got 1076 channels instead" + ] + } + ], + "source": [ + "class Test(nn.Module):\n", + " def __init__(self, imgsize):\n", + " super().__init__()\n", + " (self.N, self.Np, self.P) = imgsize\n", + " self.conv = nn.Sequential(\n", + " nn.Conv2d(1, 8, kernel_size=3, padding=1),\n", + " nn.ReLU(),\n", + " nn.MaxPool2d(2, 2)\n", + " )\n", + " def forward(self, x):\n", + " print(x.shape)\n", + " print(self.conv(x).shape)\n", + "\n", + "imgsize = xtrain.shape\n", + "model = Test(imgsize).forward(xtrain)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "690cd78c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/datasetconv-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/datasetconv-checkpoint.ipynb new file mode 100644 index 000000000..786bbfade --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/datasetconv-checkpoint.ipynb @@ -0,0 +1,291 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "338da719", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " return xtrain, ytrain, xtest, ytest" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a3c45c1a", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "292100c2", + "metadata": {}, + "outputs": [], + "source": [ + "def createPatches(imgs, patchsize):\n", + " (N, M, C, W, H) = imgs.shape\n", + " (wsize, hsize) = patchsize\n", + " \"\"\"check for errors with sizing\"\"\"\n", + " if (W % wsize != 0) or (H % hsize != 0):\n", + " raise Exception(\"patchsize is not appropriate\")\n", + " if (C != C) or (H != H):\n", + " raise Exception(\"given sizes do not match\")\n", + " size = (N, M, C, W // wsize, wsize, H // hsize, hsize)\n", + " perm = (0, 1, 3, 5, 2, 4, 6) #bring col, row index of patch to front\n", + " flat = (2, 3) #flatten (col, row) index into col*row entry index for patches\n", + " imgs = imgs.reshape(size).permute(perm).flatten(*flat)\n", + " return imgs #in format Nimgs, Npatches, C, Wpatch, Hpatch\n", + " \n", + "def flattenPatches(imgs): #takes input (N, M, Npatches, C, W, H) returns (N, M*Npatches, C*W*H)\n", + " return imgs.flatten(3, 5).flatten(1, 2)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ea41eef5", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "1f077f43", + "metadata": {}, + "outputs": [], + "source": [ + "xtrain = flattenPatches(createPatches(xtrain, (16,16)))\n", + "xtest = flattenPatches(createPatches(xtest, (16,16)))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "8979dcd1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 4800, 256])\n", + "torch.Size([450, 4800, 256])\n" + ] + } + ], + "source": [ + "print(xtrain.shape)\n", + "print(xtest.shape)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/matplots-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/matplots-checkpoint.ipynb new file mode 100644 index 000000000..363fcab7e --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/matplots-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/model-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/model-checkpoint.ipynb new file mode 100644 index 000000000..3263fcf63 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/model-checkpoint.ipynb @@ -0,0 +1,182 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 37, + "id": "fc1d26a6", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "00044d75", + "metadata": {}, + "outputs": [], + "source": [ + "class Attention(nn.Module):\n", + " def __init__(self, heads, EMBED_DIMENSION):\n", + " super().__init__()\n", + " self.heads = heads\n", + " self.attn = nn.MultiheadAttention(EMBED_DIMENSION, heads, batch_first=True)\n", + " self.Q = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " self.K = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " self.V = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " \n", + " def forward(self, x):\n", + " Q = self.Q(x)\n", + " K = self.K(x)\n", + " V = self.V(x)\n", + " \n", + " attnout, attnweights = self.attn(Q, K, V)\n", + " return attnout" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "733599f9", + "metadata": {}, + "outputs": [], + "source": [ + "class TransBlock(nn.Module):\n", + " def __init__(self, heads, EMBED_DIMENSION, fflsize):\n", + " super().__init__()\n", + " self.fnorm = nn.LayerNorm(EMBED_DIMENSION)\n", + " self.snorm = nn.LayerNorm(EMBED_DIMENSION)\n", + " self.attn = Attention(heads, EMBED_DIMENSION)\n", + " self.ffl = nn.Sequential(\n", + " nn.Linear(EMBED_DIMENSION, fflsize),\n", + " nn.GELU(),\n", + " nn.Linear(fflsize, EMBED_DIMENSION)\n", + " )\n", + " \n", + " def forward(self, x):\n", + " \"\"\"\n", + " Switching to pre-MHA LayerNorm is supposed to give better performance,\n", + " this is used in other models such as LLMs like GPT. Gradients are meant\n", + " to be stabilised. This is different to the original ViT paper.\n", + " \"\"\"\n", + " x = x + self.attn(self.fnorm(x))[0]\n", + " x = x + self.ffl(self.snorm(x))\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d2a5e050", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Inception module for efficient 7x7 convolution\n", + "\"\"\"\n", + "class Inception(nn.Module):\n", + " def __init__(self, dimin, dimout):\n", + " super().__init__()\n", + " self.branch1 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[0], 1, stride=(1,1)),\n", + " nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1),\n", + " nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1)\n", + " )\n", + " self.branch2 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[1]), 1, stride=(1,1),\n", + " nn.Conv2d(dimout[1], dimout[1], 3, stride=(1,1), padding=1)\n", + " )\n", + " self.branch3 = nn.Sequential(\n", + " nn.AvgPool2d(3, stride=(1,1), padding=1),\n", + " nn.Conv2d(dimin, dimout[2], 1, stride=(1,1))\n", + " )\n", + " self.branch4 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[3], 1, stride=(1,1))\n", + " )\n", + " def forward(self, imgs)\n", + " x1 = self.branch1(imgs)\n", + " x2 = self.branch2(imgs)\n", + " x3 = self.branch3(imgs)\n", + " x4 = self.branch4(imgs)\n", + " return torch.cat([x1, x2, x3, x4], dim=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "e6ac9e2b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Vision Transformer Class to create a vision transformer model\n", + "\"\"\"\n", + "class VisionTransformer(nn.Module):\n", + " def __init__(self, classes=2, inputsize=(1,1,1), heads=2, fflscale=2, nblocks=1):\n", + " super().__init__()\n", + " (self.N, self.Np, self.P) = inputsize\n", + " \"\"\"components\"\"\"\n", + " self.proj = nn.Linear(self.P, EMBED_DIMENSION)\n", + " self.clstoken = nn.Parameter(torch.zeros(1, 1, EMBED_DIMENSION))\n", + " self.posembed = self.embedding(self.Np+1, EMBED_DIMENSION, freq=10000) #10000 is described in ViT paper\n", + " self.posembed = self.posembed.repeat(self.N, 1, 1)\n", + " self.transformer = nn.Sequential(\n", + " *((TransBlock(heads, EMBED_DIMENSION, int(fflscale*EMBED_DIMENSION)),)*nblocks)\n", + " )\n", + " self.classifier = nn.Sequential(\n", + " nn.LayerNorm(EMBED_DIMENSION),\n", + " nn.Linear(EMBED_DIMENSION, classes)\n", + " )\n", + " \n", + " def embedding(npatches, EMBED_DIMENSION, freq):\n", + " posembed = torch.zeros(npatches, EMBED_DIMENSION)\n", + " for i in range(npatches):\n", + " for j in range(EMBED_DIMENSION):\n", + " if j % 2 == 0:\n", + " posembed[i][j] = np.sin(i/(freq**(j/EMBED_DIMENSION)))\n", + " else:\n", + " posembed[i][j] = np.cos(i/(freq**((j-1)/EMBED_DIMENSION)))\n", + " return posembed\n", + " \n", + " def forward(self, imgs): #assume size checking done by createPatches\n", + " \"\"\"Linear Projection and Positional Embedding\"\"\"\n", + " tokens = self.proj(imgs) #perform linear projection\n", + " clstoken = self.clstoken.repeat(self.N, 1, 1)\n", + " tokens = torch.cat([clstoken, tokens], dim=1) #concat the class token\n", + " x = tokens + self.posembed #add positional encoding\n", + " \"\"\"Transformer\"\"\"\n", + " x = self.transformer(x)\n", + " \"\"\"Classification\"\"\"\n", + " y = x[0]\n", + " return self.classifier(y)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/train-checkpoint.ipynb b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/train-checkpoint.ipynb new file mode 100644 index 000000000..04b276bce --- /dev/null +++ b/recognition/vision-transformer-4696689/old/.ipynb_checkpoints/train-checkpoint.ipynb @@ -0,0 +1,246 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "73ebb771", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "from dataset import trainloader\n", + "from dataset import testloader\n", + "from dataset import trainaccloader\n", + "from dataset import trainshape\n", + "from dataset import testshape" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "df0ea69a", + "metadata": {}, + "outputs": [], + "source": [ + "from model import VisionTransformer\n", + "from model import Attention\n", + "from model import TransBlock\n", + "from model3d import Inception" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "ae8aebe7", + "metadata": {}, + "outputs": [], + "source": [ + "TRAIN_LOSS = []\n", + "TRAIN_ACC = []\n", + "\n", + "def train(model, dataloader, accloader, lossfunc, optimiser, lr=0.1, momentum=0.9, batchsize=16, nepochs=10):\n", + " device = next(model.parameters()).device # check what device the net parameters are on\n", + " \n", + " \"\"\"training\"\"\"\n", + " for i in range(nepochs): # for each epoch\n", + " epoch_loss = 0\n", + " model.train()\n", + " n_batches = 0\n", + " time1 = time.time()\n", + " for (x, y) in dataloader: # for each mini-batch\n", + " optimiser.zero_grad(set_to_none=True)\n", + " loss = lossfunc(model.forward(x), y)\n", + " loss.backward()\n", + " optimiser.step()\n", + " epoch_loss += loss\n", + " n_batches += 1\n", + " time2 = time.time()\n", + " print(\"Done an epoch\", time2-time1)\n", + " epoch_loss /= n_batches\n", + " \n", + " \"\"\"evaluating\"\"\"\n", + " model.eval()\n", + " accuracy = test(model, accloader)\n", + "\n", + " \"\"\"get performance\"\"\"\n", + " TRAIN_LOSS.append(epoch_loss.item())\n", + " TRAIN_ACC.append(accuracy)\n", + "\n", + "def test(model, dataloader):\n", + " with torch.no_grad(): # disable automatic gradient computation for efficiency\n", + " device = next(model.parameters()).device\n", + " \n", + " \"\"\"make predictions\"\"\"\n", + " pcls = []\n", + " items = 0\n", + " time1=time.time()\n", + " for x, y in dataloader:\n", + " x = x.to(device)\n", + " pcls.append(abs(y.cpu()-torch.max(model(x), 1)[1].cpu()))\n", + " items += 1\n", + " time2 = time.time()\n", + " print(\"found accuracy in:\", time2-time1)\n", + "\n", + " \"\"\"get accuracy\"\"\"\n", + " pcls = torch.cat(pcls) # concat predictions on the mini-batches\n", + " accuracy = 1 - (pcls.sum().float() / items)\n", + " print(\"accuracy:\", accuracy)\n", + " return accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "75a45973", + "metadata": {}, + "outputs": [], + "source": [ + "batchsize=16\n", + "N, Np, P = trainshape()\n", + "model = VisionTransformer(inputsize=(batchsize, Np, P), embed=128, fflscale=2, nblocks=4)" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "7b54a6f0", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimiser = optim.AdamW(model.parameters(), lr=1e-4)" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "18488555", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done an epoch 346.20038080215454\n", + "found accuracy in: 135.9069368839264\n", + "accuracy: tensor(0.5288)\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Input \u001b[0;32mIn [43]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m start \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[0;32m----> 2\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrainloader\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatchsize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatchsize\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrainaccloader\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimiser\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m end \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtraining time: \u001b[39m\u001b[38;5;124m\"\u001b[39m, end\u001b[38;5;241m-\u001b[39mstart)\n", + "Input \u001b[0;32mIn [40]\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m(model, dataloader, accloader, lossfunc, optimiser, lr, momentum, batchsize, nepochs)\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m (x, y) \u001b[38;5;129;01min\u001b[39;00m dataloader: \u001b[38;5;66;03m# for each mini-batch\u001b[39;00m\n\u001b[1;32m 14\u001b[0m optimiser\u001b[38;5;241m.\u001b[39mzero_grad(set_to_none\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m---> 15\u001b[0m loss \u001b[38;5;241m=\u001b[39m lossfunc(\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m, y)\n\u001b[1;32m 16\u001b[0m loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m 17\u001b[0m optimiser\u001b[38;5;241m.\u001b[39mstep()\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:84\u001b[0m, in \u001b[0;36mVisionTransformer.forward\u001b[0;34m(self, imgs)\u001b[0m\n\u001b[1;32m 82\u001b[0m x \u001b[38;5;241m=\u001b[39m tokens \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mposembed\u001b[38;5;241m.\u001b[39mrepeat(imgs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m], \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m1\u001b[39m) \u001b[38;5;66;03m#add positional encoding\u001b[39;00m\n\u001b[1;32m 83\u001b[0m \u001b[38;5;124;03m\"\"\"Transformer\"\"\"\u001b[39;00m\n\u001b[0;32m---> 84\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtransformer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124;03m\"\"\"Classification\"\"\"\u001b[39;00m\n\u001b[1;32m 86\u001b[0m y \u001b[38;5;241m=\u001b[39m x[:,\u001b[38;5;241m0\u001b[39m]\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/container.py:217\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m 216\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 217\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:43\u001b[0m, in \u001b[0;36mTransBlock.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m 38\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;124;03m Switching to pre-MHA LayerNorm is supposed to give better performance,\u001b[39;00m\n\u001b[1;32m 40\u001b[0m \u001b[38;5;124;03m this is used in other models such as LLMs like GPT. Gradients are meant\u001b[39;00m\n\u001b[1;32m 41\u001b[0m \u001b[38;5;124;03m to be stabilised. This is different to the original ViT paper.\u001b[39;00m\n\u001b[1;32m 42\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 43\u001b[0m x \u001b[38;5;241m=\u001b[39m x \u001b[38;5;241m+\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfnorm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 44\u001b[0m x \u001b[38;5;241m=\u001b[39m x \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mffl(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msnorm(x))\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m x\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:22\u001b[0m, in \u001b[0;36mAttention.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 19\u001b[0m K \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mK(x)\n\u001b[1;32m 20\u001b[0m V \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mV(x)\n\u001b[0;32m---> 22\u001b[0m attnout, attnweights \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattn\u001b[49m\u001b[43m(\u001b[49m\u001b[43mQ\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mK\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mV\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m attnout\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/activation.py:1189\u001b[0m, in \u001b[0;36mMultiheadAttention.forward\u001b[0;34m(self, query, key, value, key_padding_mask, need_weights, attn_mask, average_attn_weights, is_causal)\u001b[0m\n\u001b[1;32m 1175\u001b[0m attn_output, attn_output_weights \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mmulti_head_attention_forward(\n\u001b[1;32m 1176\u001b[0m query, key, value, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membed_dim, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_heads,\n\u001b[1;32m 1177\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39min_proj_weight, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39min_proj_bias,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1186\u001b[0m average_attn_weights\u001b[38;5;241m=\u001b[39maverage_attn_weights,\n\u001b[1;32m 1187\u001b[0m is_causal\u001b[38;5;241m=\u001b[39mis_causal)\n\u001b[1;32m 1188\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1189\u001b[0m attn_output, attn_output_weights \u001b[38;5;241m=\u001b[39m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmulti_head_attention_forward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1190\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43membed_dim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnum_heads\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1191\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43min_proj_weight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43min_proj_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1192\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias_k\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias_v\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43madd_zero_attn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1193\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdropout\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mout_proj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mout_proj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1194\u001b[0m \u001b[43m \u001b[49m\u001b[43mtraining\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1195\u001b[0m \u001b[43m \u001b[49m\u001b[43mkey_padding_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkey_padding_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1196\u001b[0m \u001b[43m \u001b[49m\u001b[43mneed_weights\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mneed_weights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1197\u001b[0m \u001b[43m \u001b[49m\u001b[43mattn_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattn_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1198\u001b[0m \u001b[43m \u001b[49m\u001b[43maverage_attn_weights\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maverage_attn_weights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1199\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_causal\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_causal\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1200\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch_first \u001b[38;5;129;01mand\u001b[39;00m is_batched:\n\u001b[1;32m 1201\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m attn_output\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m0\u001b[39m), attn_output_weights\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/functional.py:5313\u001b[0m, in \u001b[0;36mmulti_head_attention_forward\u001b[0;34m(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight, q_proj_weight, k_proj_weight, v_proj_weight, static_k, static_v, average_attn_weights, is_causal)\u001b[0m\n\u001b[1;32m 5311\u001b[0m attn_output_weights \u001b[38;5;241m=\u001b[39m attn_output_weights\u001b[38;5;241m.\u001b[39mview(bsz, num_heads, tgt_len, src_len)\n\u001b[1;32m 5312\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m average_attn_weights:\n\u001b[0;32m-> 5313\u001b[0m attn_output_weights \u001b[38;5;241m=\u001b[39m \u001b[43mattn_output_weights\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmean\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdim\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5315\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_batched:\n\u001b[1;32m 5316\u001b[0m \u001b[38;5;66;03m# squeeze the output if input was unbatched\u001b[39;00m\n\u001b[1;32m 5317\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m attn_output\u001b[38;5;241m.\u001b[39msqueeze(\u001b[38;5;241m1\u001b[39m)\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "start = time.time()\n", + "train(model, trainloader(batchsize=batchsize), trainaccloader(), criterion, optimiser, nepochs=10)\n", + "end = time.time()\n", + "print(\"training time: \", end-start)\n", + "test(model, testloader())" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "bbaac2fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[]\n", + "[]\n" + ] + } + ], + "source": [ + "print(TRAIN_LOSS)\n", + "print(TRAIN_ACC)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "94178617", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "280706\n" + ] + } + ], + "source": [ + "print(sum(p.numel() for p in model.parameters()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ccfcbae", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/conv b/recognition/vision-transformer-4696689/old/conv new file mode 100644 index 000000000..e69de29bb diff --git a/recognition/vision-transformer-4696689/old/dataloader_torch.ipynb b/recognition/vision-transformer-4696689/old/dataloader_torch.ipynb new file mode 100644 index 000000000..48b59d112 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/dataloader_torch.ipynb @@ -0,0 +1,181 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 8, + "id": "b8467df9", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os\n", + "\n", + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"\n", + "\n", + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " hflip = transforms.Compose([\n", + " transforms.RandomHorizontalFlip(p=1.0),\n", + " transforms.PILToTensor()\n", + " ])\n", + " vflip = transforms.Compose([\n", + " transforms.RandomVerticalFlip(p=1.0),\n", + " transforms.PILToTensor()\n", + " ])\n", + " dflip = transforms.Compose([\n", + " transforms.RandomHorizontalFlip(p=1.0),\n", + " transforms.RandomVerticalFlip(p=1.0),\n", + " transforms.PILToTensor()\n", + " ])\n", + " tlist = [transform, hflip, vflip, dflip]\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " for t in tlist:\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = t(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + "\n", + "\n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " for t in tlist:\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " idx = torch.randperm(xtest.size(0))\n", + " xtest = xtest[idx, :]\n", + " splitsize = int(xtest.shape[0]/2)\n", + " xval, xtest = xtest.split(splitsize, dim=0)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " ytest = ytest[idx]\n", + " yval, ytest = ytest.split(splitsize, dim=0)\n", + " return xtrain, ytrain, xtest, ytest, xval, yval\n", + "\n", + "\"\"\"\n", + "Dataloader\n", + "\"\"\"\n", + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]\n", + "\n", + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest, xval, yval = getImages(trainDIRs, testDIRs)\n", + "ytrain, ytest = ytrain.type(torch.LongTensor), ytest.type(torch.LongTensor)\n", + "xtrain = xtrain.permute(0, 2, 1, 3, 4)\n", + "xtest = xtest.permute(0, 2, 1, 3, 4)\n", + "xval = xval.permute(0, 2, 1, 3, 4)\n", + "\n", + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batch_size=batchsize, shuffle=True, pin_memory=True)\n", + "\n", + "def valloader():\n", + " return DataLoader(DatasetWrapper(xval, yval), batch_size=1, shuffle=True, pin_memory=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batch_size=1, shuffle=True, pin_memory=True)\n", + "\n", + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "0334d1ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([4304, 1, 20, 240, 256]) torch.Size([900, 1, 20, 240, 256])\n" + ] + } + ], + "source": [ + "print(trainshape(), testshape())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/dataset.ipynb b/recognition/vision-transformer-4696689/old/dataset.ipynb new file mode 100644 index 000000000..f0321fffd --- /dev/null +++ b/recognition/vision-transformer-4696689/old/dataset.ipynb @@ -0,0 +1,304 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "338da719", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97e1d5de", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " augment = transforms.Compose([\n", + " Rescale(256), \n", + " RandomCrop(224), \n", + " ToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " return xtrain, ytrain, xtest, ytest" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a3c45c1a", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "292100c2", + "metadata": {}, + "outputs": [], + "source": [ + "def createPatches(imgs, patchsize):\n", + " (N, M, C, W, H) = imgs.shape\n", + " (wsize, hsize) = patchsize\n", + " \"\"\"check for errors with sizing\"\"\"\n", + " if (W % wsize != 0) or (H % hsize != 0):\n", + " raise Exception(\"patchsize is not appropriate\")\n", + " if (C != C) or (H != H):\n", + " raise Exception(\"given sizes do not match\")\n", + " size = (N, M, C, W // wsize, wsize, H // hsize, hsize)\n", + " perm = (0, 1, 3, 5, 2, 4, 6) #bring col, row index of patch to front\n", + " flat = (2, 3) #flatten (col, row) index into col*row entry index for patches\n", + " imgs = imgs.reshape(size).permute(perm).flatten(*flat)\n", + " return imgs #in format Nimgs, Npatches, C, Wpatch, Hpatch\n", + " \n", + "def flattenPatches(imgs): #takes input (N, M, Npatches, C, W, H) returns (N, M*Npatches, C*W*H)\n", + " return imgs.flatten(3, 5).flatten(1, 2)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ea41eef5", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "1f077f43", + "metadata": {}, + "outputs": [], + "source": [ + "xtrain = flattenPatches(createPatches(xtrain, (16,16)))\n", + "xtest = flattenPatches(createPatches(xtest, (16,16)))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "8979dcd1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 4800, 256])\n", + "torch.Size([450, 4800, 256])\n" + ] + } + ], + "source": [ + "print(xtrain.shape)\n", + "print(xtest.shape)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/dataset3d.ipynb b/recognition/vision-transformer-4696689/old/dataset3d.ipynb new file mode 100644 index 000000000..11983c0d9 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/dataset3d.ipynb @@ -0,0 +1,319 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "338da719", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " splitsize = int(xtest.shape[0]/2)\n", + " xval, xtest = xtest.split(splitsize, dim=0)\n", + " yval, ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0)).split(splitsize)\n", + " return xtrain, ytrain, xtest, ytest, xval, yval" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "292100c2", + "metadata": {}, + "outputs": [], + "source": [ + "def createPatches(imgs, patchsize):\n", + " (N, M, C, W, H) = imgs.shape\n", + " (dsize, wsize, hsize) = patchsize\n", + " \"\"\"check for errors with sizing\"\"\"\n", + " if (M % dsize != 0) or (W % wsize != 0) or (H % hsize != 0):\n", + " raise Exception(\"patchsize is not appropriate\")\n", + " imgs = imgs.permute(0, 2, 1, 3, 4) # switch M and C\n", + " size = (N, C, M // dsize, dsize, W // wsize, wsize, H // hsize, hsize)\n", + " perm = (0, 2, 4, 6, 1, 3, 5, 7)\n", + " imgs = imgs.reshape(size).permute(perm).flatten(1, 3).flatten(2, 5)\n", + " return imgs #in format Nimgs, Npatches, patchsize" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "id": "ea41eef5", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest, xval, yval = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "1f077f43", + "metadata": {}, + "outputs": [], + "source": [ + "#xtrain = createPatches(xtrain, (4,16,16))\n", + "#xtest = createPatches(xtest, (4,16,16))\n", + "xval = createPatches(xval, (4,16,16))" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "id": "690cd78c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 1200, 1024])\n" + ] + } + ], + "source": [ + "print(xtrain.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "id": "c24aa902", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([225, 1200, 1024])\n" + ] + } + ], + "source": [ + "print(xtest.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "id": "a39f7b82", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([225, 1200, 1024])\n" + ] + } + ], + "source": [ + "print(xval.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9183e53b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/datasetconv.ipynb b/recognition/vision-transformer-4696689/old/datasetconv.ipynb new file mode 100644 index 000000000..715659242 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/datasetconv.ipynb @@ -0,0 +1,365 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "338da719", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "\"\"\"numpy and torch\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "\n", + "\"\"\"PIL\"\"\"\n", + "from PIL import Image\n", + "\n", + "\"\"\"torchvision and utils\"\"\"\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data import DataLoader, Dataset\n", + "\n", + "\"\"\"os\"\"\"\n", + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "65011ff4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nLoading data from local file\\n'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Loading data from local file\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "206e485b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"Assumes images have pixel values in range [0,255]\"\"\"\n", + "def getImages(trainDIRs, testDIRS):\n", + " \"\"\"Get image to tensor\"\"\"\n", + " transform = transforms.Compose([\n", + " transforms.PILToTensor()\n", + " ])\n", + " \"\"\"Loading data into arrays\"\"\"\n", + " xtrain, xtrain, xtest, ytest = [], [], [], []\n", + " \"\"\"training data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(trainDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtrain.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtrain = torch.stack(xtrain)\n", + " ytrain = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " \n", + " \"\"\"testing data\"\"\"\n", + " size = [0, 0]\n", + " for i, DIR in enumerate(testDIRs):\n", + " px = []\n", + " j = 0\n", + " for filename in sorted(os.listdir(DIR)):\n", + " f = os.path.join(DIR, filename)\n", + " img = Image.open(f)\n", + " tensor = transform(img).float()\n", + " tensor.require_grad = True\n", + " px.append(tensor/255)\n", + " j = (j+1) % 20\n", + " if j == 0:\n", + " xtest.append(torch.stack(px))\n", + " px = []\n", + " size[i] += 1\n", + " xtest = torch.stack(xtest)\n", + " ytest = torch.from_numpy(np.concatenate((np.ones(size[0]), np.zeros(size[1])), axis=0))\n", + " return xtrain, ytrain, xtest, ytest" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e0897522", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nDataloader\\n'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Dataloader\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "05c80732", + "metadata": {}, + "outputs": [], + "source": [ + "class DatasetWrapper(Dataset):\n", + " def __init__(self, X, y=None):\n", + " self.X, self.y = X, y\n", + "\n", + " def __len__(self):\n", + " return len(self.X)\n", + "\n", + " def __getitem__(self, idx):\n", + " if self.y is None:\n", + " return self.X[idx]\n", + " else:\n", + " return self.X[idx], self.y[idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ea41eef5", + "metadata": {}, + "outputs": [], + "source": [ + "trainDIRs = ['../../../AD_NC/train/AD/', '../../../AD_NC/train/NC']\n", + "testDIRs = ['../../../AD_NC/test/AD/', '../../../AD_NC/test/NC']\n", + "xtrain, ytrain, xtest, ytest = getImages(trainDIRs, testDIRs)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "a161d76a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 20, 1, 240, 256])\n", + "torch.Size([450, 20, 1, 240, 256])\n", + "torch.Size([450])\n" + ] + } + ], + "source": [ + "print(xtrain.shape)\n", + "print(xtest.shape)\n", + "print(ytest.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "id": "848190bc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([16, 192, 96])\n", + "0.2741\n" + ] + } + ], + "source": [ + "class ConvLayer2(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " #pool\n", + " self.pool = nn.MaxPool2d(kernel_size=3, stride=2)\n", + " self.relu = nn.ReLU()\n", + " #first layer\n", + " self.conv11_x = nn.Conv2d(20, 48, kernel_size=(11,11), stride=(4,4), padding=(0,0))\n", + " self.conv11_y = nn.Conv2d(240, 48, kernel_size=(11,3), stride=(4,1), padding=(0,0))\n", + " self.conv11_z = nn.Conv2d(256, 48, kernel_size=(3,11), stride=(1,4), padding=(0,0))\n", + " #second layer\n", + " self.conv5_x = nn.Conv2d(48, 192, kernel_size=(5,5), stride=(2,2), padding=(0,0))\n", + " self.conv5_y = nn.Conv2d(48, 192, kernel_size=(5,3), stride=(2,1), padding=(0,0))\n", + " self.conv5_z = nn.Conv2d(48, 192, kernel_size=(3,5), stride=(1,2), padding=(0,0))\n", + " #projection\n", + " self.l_x = nn.Linear(30, 32)\n", + " self.l_y = nn.Linear(12, 32)\n", + " self.l_z = nn.Linear(10, 32)\n", + "\n", + " def forward(self, imgs):\n", + " #input N, C, L, W, H\n", + " #first layer\n", + " x_x = self.relu(self.pool(self.conv11_x(imgs.flatten(1,2))))\n", + " x_y = self.relu(self.pool(self.conv11_y(imgs.permute(0,1,3,4,2).flatten(1,2))))\n", + " x_z = self.relu(self.pool(self.conv11_z(imgs.permute(0,1,4,2,3).flatten(1,2))))\n", + " #second layer\n", + " x_x = self.relu(self.pool(self.conv5_x(x_x)))\n", + " x_y = self.relu(self.pool(self.conv5_y(x_y)))\n", + " x_z = self.relu(self.pool(self.conv5_z(x_z)))\n", + " #projection\n", + " x_x = self.l_x(x_x.flatten(2,3))\n", + " x_y = self.l_y(x_y.flatten(2,3))\n", + " x_z = self.l_z(x_z.flatten(2,3))\n", + " return torch.cat([x_x, x_y, x_z], dim=2)\n", + "import time\n", + "start = time.time()\n", + "conv=ConvLayer2()\n", + "print(conv(xtrain[0:16,:].permute(0,2,1,3,4)).shape)\n", + "end = time.time()\n", + "print(round(end-start, 4))" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "f295ee82", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1.004753589630127\n" + ] + } + ], + "source": [ + "conv_11 = nn.Conv2d(20, 64, kernel_size=(11,11), stride=(4,4), padding=(0,0))\n", + "import time\n", + "total = 0\n", + "for i in range(10):\n", + " start=time.time()\n", + " x = conv_11(xtrain[0:16, :].permute(0,2,1,3,4).flatten(1,2))\n", + " end = time.time()\n", + " total += end-start\n", + "print(total)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "c5e39d11", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([1076, 256, 4, 5, 6])\n" + ] + } + ], + "source": [ + "import torch.nn as nn\n", + "class ConvLayer(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv11 = nn.Conv3d(1, 64, kernel_size=(3,11,11), stride=(1,4,4), padding=(1,0,0))\n", + " self.firstpool = nn.MaxPool3d(kernel_size=3, stride=2)\n", + " self.conv5 = nn.Conv3d(64, 256, kernel_size=(3,5,5), stride=(1,2,2), padding=(1,0,0))\n", + " self.secondpool = nn.MaxPool3d(kernel_size=3, stride=2)\n", + "\n", + " def forward(self, imgs):\n", + " x = self.conv11(imgs)\n", + " x = self.firstpool(x)\n", + " x = self.conv5(x)\n", + " x = self.secondpool(x)\n", + " return x\n", + " \n", + "conv = ConvLayer()\n", + "x = conv(xtrain.permute(0,2,1,3,4))\n", + "print(x.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "a02e05bd", + "metadata": {}, + "outputs": [], + "source": [ + "def trainloader(batchsize=16):\n", + " return DataLoader(DatasetWrapper(xtrain, ytrain), batchsize=batchsize, shuffle=True)\n", + "\n", + "def testloader():\n", + " return DataLoader(DatasetWrapper(xtest, ytest), batchsize=1, shuffle=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "18d6ca10", + "metadata": {}, + "outputs": [], + "source": [ + "def trainshape():\n", + " return xtrain.shape\n", + "\n", + "def testshape():\n", + " return xtest.shape" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/matplots.ipynb b/recognition/vision-transformer-4696689/old/matplots.ipynb new file mode 100644 index 000000000..2fed751f6 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/matplots.ipynb @@ -0,0 +1,88 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "id": "46c2d9b9", + "metadata": {}, + "outputs": [], + "source": [ + "LOSS = [0.72875, 0.70531, 0.66767, 0.61233, 0.53435, 0.49842, 0.43119, 0.45669, 0.38625, 0.35263, 0.36537, 0.32514, 0.26318, 0.2506, 0.24311, 0.18782, 0.17435, 0.13011, 0.14882, 0.17382, 0.10999, 0.13796, 0.07506, 0.06944, 0.06198, 0.03524, 0.07395, 0.09999, 0.04692, 0.03988, 0.0566, 0.02929, 0.01366, 0.01277, 0.01246, 0.01824, 0.04371, 0.0791, 0.04064, 0.04082, 0.01846, 0.00784, 0.00725, 0.00714, 0.0071, 0.00703, 0.00697, 0.00684, 0.00686, 0.00677, 0.00665, 0.00629, 0.00595, 0.01606, 0.11788, 0.21843, 0.02893, 0.01473, 0.04044, 0.02642, 0.02621, 0.00663, 0.00604, 0.00071, 0.00035, 0.00026, 0.00022, 0.0002, 0.00018, 0.00016, 0.00015, 0.00014, 0.00013, 0.00012, 0.00011, 0.0001, 0.0001, 9e-05, 8e-05, 8e-05, 7e-05, 7e-05, 7e-05, 6e-05, 6e-05, 6e-05, 5e-05, 5e-05, 5e-05, 5e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n", + "ACC = [50.67, 51.11, 58.67, 63.11, 57.78, 62.67, 63.56, 66.22, 66.22, 67.11, 66.67, 65.78, 67.56, 65.33, 68.0, 68.44, 67.11, 64.89, 64.89, 67.56, 68.0, 69.33, 67.11, 67.56, 68.0, 67.56, 66.22, 71.11, 69.33, 67.11, 66.67, 69.78, 69.33, 69.78, 69.78, 68.0, 66.67, 68.89, 69.78, 69.78, 68.44, 67.56, 67.11, 67.56, 67.56, 67.56, 68.0, 68.0, 68.0, 68.0, 68.0, 67.56, 67.56, 68.0, 66.22, 70.67, 67.56, 66.67, 68.89, 65.33, 66.67, 70.22, 68.0, 69.78, 68.89, 68.0, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "6983675d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEGCAYAAABo25JHAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAAlw0lEQVR4nO3deXhcd33v8fdXo9VavUiyLe+OHcfOjuMkhJQQIDhJIRBo6xQopJQQStrL7fIQLmUpfQo35WmfcktSk4YAbSkhhKS4vSahN2RrFmLZcRbHS+QljrxJtmwtttbR9/4xZ+TxWEti6WiOdD6v59HjOWeORl8dyfrMbzm/Y+6OiIjEV16uCxARkdxSEIiIxJyCQEQk5hQEIiIxpyAQEYm5/FwX8FbNmDHDFyxYkOsyREQmlI0bNx529+rBnptwQbBgwQLq6+tzXYaIyIRiZq8P9Zy6hkREYk5BICIScwoCEZGYUxCIiMScgkBEJOYUBCIiMacgEBGJudgEwY5D7fzVf75KV28y16WIiERKbIKg8egJvvffu6nfczTXpYiIREpsguCyRdMpSBhPvdac61JERCIlNkEwpTCflfOn8cQOBYGISKbYBAHAlUtnsO1gO01tXbkuRUQkMmIVBL+xJLXw3lOvHc5xJSIi0RFqEJjZajPbbmYNZnb7IM//uZltDj5eMbOkmU0Lq57lsyqYXlqocQIRkQyhBYGZJYA7gWuB5cBNZrY88xh3/5a7X+juFwJfBJ5w95awasrLM96xZAZP7zwS1pcQEZlwwmwRrAIa3H2Xu/cA9wE3DHP8TcCPQ6wHgPPqKmlu7+ZIR3fYX0pEZEIIMwjqgDcythuDfacxsynAauBnQzx/i5nVm1l9c/PounXOnlkOwI5DHaN6HRGRySLMILBB9vkQx74feHqobiF3v9vdV7r7yurqQe+09qadXZsOgvZRvY6IyGQRZhA0AnMztucA+4c4dg3j0C0EUF1eRNWUArYrCEREgHCDYAOwxMwWmlkhqT/267IPMrNK4J3Az0OsJfPrsbSmnB0HFQQiIhBiELh7H3Ab8AiwFbjf3beY2a1mdmvGoR8Cfunux8OqJdvSmWVsP9SO+1A9VSIi8ZEf5ou7+3pgfda+tVnbPwB+EGYd2c6uLae9q4+DbV3MqiwZzy8tIhI5sbqyOG1pMGC8Xd1DIiLxDgLNHBIRiWkQTC0tpKa8iO0HdS2BiEgsgwBgwfRSGo+eyHUZIiI5F9sgmFVVzP7WzlyXISKSc7ENgtlVJRxs7SLZrymkIhJvsQ6C3qRzWIvPiUjMxTYI6qqKAdh3TN1DIhJvsQ2C2VWpC8n2KwhEJOZiGwTpK4oPHNP9i0Uk3mIbBBXF+ZQV5atrSERiL7ZBYGbMripW15CIxF5sgwBS4wS6lkBE4i7WQTCrsoT9GiMQkZiLdRDUVRXTcryHzp5krksREcmZWAdBegrpAXUPiUiMKQhA3UMiEmvxDoJKXVQmIhJqEJjZajPbbmYNZnb7EMdcZWabzWyLmT0RZj3ZaiqKAGjWekMiEmOh3bPYzBLAncB7gUZgg5mtc/dXM46pAu4CVrv7XjOrCauewRQXJJhSmOBIR894flkRkUgJs0WwCmhw913u3gPcB9yQdczvAg+6+14Ad28KsZ5BTSstpOW4WgQiEl9hBkEd8EbGdmOwL9NSYKqZPW5mG83s9wZ7ITO7xczqzay+ubl5TIucXlrIkeNqEYhIfIUZBDbIvuy7wOQDbwOuB94HfNnMlp72Se53u/tKd19ZXV09pkVOLyuiRUEgIjEWZhA0AnMztucA+wc55mF3P+7uh4EngQtCrOk000oLNUYgIrEWZhBsAJaY2UIzKwTWAOuyjvk5cKWZ5ZvZFOBSYGuINZ1memkhLcd7cNctK0UknkKbNeTufWZ2G/AIkADudfctZnZr8Pxad99qZg8DLwH9wD3u/kpYNQ1mWmkhPcl+Orr7KC8uGM8vLSISCaEFAYC7rwfWZ+1bm7X9LeBbYdYxnGmlhQC0HO9REIhILMX6ymKAGWWpi8o0c0hE4ir2QTDQItCAsYjElIIgCIIjuqhMRGIq9kEwvSwdBGoRiEg8xT4IphTmU1yQp64hEYmt2AcBwPRSXV0sIvGlICDVPaSuIRGJKwUB6RVIFQQiEk8KAhQEIhJvCgJS6w0d7ujWekMiEksKAlJLUXf39XOiJ5nrUkRExp2CgFPXGxIRiRsFATBnagkAe44cz3ElIiLjT0EALJtZAcC2A+05rkREZPwpCEh1DdVWFLH1YFuuSxERGXcKgsCymRVqEYhILCkIAstmldPQ1EFvsj/XpYiIjCsFQeCcmRX0JPvZfVgDxiISL6EGgZmtNrPtZtZgZrcP8vxVZtZqZpuDj6+EWc9wls0qB2DbQXUPiUi8hBYEZpYA7gSuBZYDN5nZ8kEOfcrdLww+vh5WPSNZNKOMgoSx7YAGjEUkXsJsEawCGtx9l7v3APcBN4T49UalMD+PxdVlahGISOyEGQR1wBsZ243BvmyXm9mLZvYLM1sx2AuZ2S1mVm9m9c3NzWHUCsCymeVsVxCISMyEGQQ2yL7sVd02AfPd/QLgH4B/H+yF3P1ud1/p7iurq6vHtsoMc6ZO4WBbF/39WnxOROIjzCBoBOZmbM8B9mce4O5t7t4RPF4PFJjZjBBrGlZ1eRHJfqflhNYcEpH4CDMINgBLzGyhmRUCa4B1mQeY2Uwzs+DxqqCeIyHWNKya8iIAmtq6c1WCiMi4yw/rhd29z8xuAx4BEsC97r7FzG4Nnl8LfAT4rJn1AZ3AGs/hTQGqgyBo7lAQiEh8hBYEMNDdsz5r39qMx98BvhNmDW9FTXkxAE1tXTmuRERk/OjK4gxqEYhIHCkIMpQUJigvytcYgYjEioIgS3VFkVoEIhIrCoIs1WVFNKtFICIxoiDIUlNRTFO7BotFJD4UBFmqy4pobu+mL9nPX/7HFi1LLSKTnoIgS01FEcd7kjy98wjff3oPv9xyMNcliYiESkGQpbosNYV03ebUahjN7RovEJHJTUGQpaYiFQTplsBhzSASkUlOQZAlfXVxe3cfAIc7tACdiExuCoIs6auLAaYUJtQ1JCKTnoIgS1VJAQUJwwyuXlajriERmfQUBFny8owZZUWsmF3B4uoyWk700Jfsz3VZIiKhCXX10Ynqc+86i9qKYg62deEOLcd7qKkoznVZIiKhUItgEB+7bD7vXV5LdVkhAE0aJxCRSUxBMIz0wLHGCURkMlMQDGNGcHGZZg6JyGSmIBhGOgh0LYGITGahBoGZrTaz7WbWYGa3D3PcJWaWNLOPhFnPW1ValM+UwoS6hkRkUgstCMwsAdwJXAssB24ys+VDHHcHqZvcR86MYDVSEZHJKswWwSqgwd13uXsPcB9wwyDH/RHwM6ApxFrOWHV5kVoEIjKphRkEdcAbGduNwb4BZlYHfAhYO9wLmdktZlZvZvXNzc1jXuhwZpQVqkUgIpPasEFgZu83s/kZ218xsxfNbJ2ZLRzhtW2QfZ61/ffAF9w9OdwLufvd7r7S3VdWV1eP8GXHlloEIjLZjXRl8V8DlwGY2W8CHwNuAi4i9S7+fcN8biMwN2N7DrA/65iVwH1mBjADuM7M+tz9399k/aGbUVbE0RO99Cb7KUhokpWITD4j/WVzdz8RPL4R+J67b3T3e4CR3ppvAJaY2UIzKwTWAOuyXnyhuy9w9wXAA8AfRikE4OQU0iOaQioik9RIQWBmVmZmecC7gUcznht28R137wNuIzUbaCtwv7tvMbNbzezW0RQ9nuqmlgDw+hHdu1hEJqeRuob+HtgMtAFb3b0ewMwuAg6M9OLuvh5Yn7Vv0IFhd//kiNXmwIrZFQBs2d/GpYum57gaEZGxN2wQuPu9ZvYIUAO8mPHUAeDmMAuLipryYqrLi3hlf2uuSxERCcWwQRDMGDrm7vuC7XcBHwReB74TenURsWJ2Ba/ub8t1GSIioRhpjOB+oBTAzC4EfgrsBS4A7gq1sghZMbuC15o66OoddpariMiENNIYQYm7p6d8fgy4193/Nhg83hxqZRGyYnYlyX5nx6F21j6xk/PqqvjsVYtzXZaIyJgYcdZQxuOrCWYNuXus7t2YHjC+67GdrH/5II9sOZjjikRExs5ILYJfmdn9pAaHpwK/AjCzWUBsJtbPnTqF8qJ8Hg4CYGdTB+5OcCGciMiENlKL4PPAg8Ae4B3u3hvsnwl8KbyyoiUvz1getArOq6ukvbtPt68UkUlj2CDwlPuAfwcuMrPrzWyRu7/g7pFcNjosVy+r4by6Sv70mqUANDR15LgiEZGxMdL00QrgHuBtpK4jMOACM9sIfMrdYzOn8jPvXMxn3rmYQ21dQCoIrjhrRo6rEhEZvZG6hv4P8CqwxN1vdPcPAYuBl4nRdQSZasqLKC/KV4tARCaNkQaLr8he+sHdHfi6mb0WWlURZmYsrilTEIjIpPFWpo9K4KyaMhqaFQQiMjmMFARPBzejOSUQzOzLwHPhlRVtZ9WU0dzeTWtn78gHi4hE3EhB8EfAeUCDmf3MzB4ws52klpi4LfTqIuqs6jJAM4dEZHIYafpom7v/FnAN8APgn4Fr3P0jxGT10cGcVZMKgp3qHhKRSWCkwWIA3H0nsDNr95+Qul9B7MyuSt2s5sCxrhxXIiIyeqO5CW9sB5IL8/OYXlrIwTYFgYhMfKMJAh+zKiag2origYvLREQmsmGDwMzazaxtkI92YPZIL25mq81su5k1mNntgzx/g5m9ZGabzazezN4xiu9lXNVWFCkIRGRSGOlWleVn+sJmlgDuBN4LNAIbzGydu7+acdijwDp3dzM7n9SNcJad6dccTzMri3l5n25fKSIT32i6hkayCmhw913u3gPcB9yQeYC7dwRXKkPqTmgTpruptqKYwx099PTF6tYMIjIJhRkEdcAbGduNwb5TmNmHzGwb8H+B3w+xnjFVW1EMQHOHlqMWkYktzCAYbFbRae/43f0hd18GfBD4q0FfyOyWYAyhvrm5eWyrPEMzgyA42KpxAhGZ2MIMgkZgbsb2HGD/EMfi7k8Ci83stLWd3f1ud1/p7iurq6vHvtIzkG4RaMBYRCa6MINgA7DEzBaaWSGwBliXeYCZnZVex8jMLgYKgSMh1jRmZlYqCERkcnhTVxafCXfvM7PbgEeABHCvu28xs1uD59cCHwZ+z8x6gU7gdzIGjyNt6pQCChN5uqhMRCa80IIAwN3XA+uz9q3NeHwHcEeYNYTFzKipKOKQxghEZIILs2to0ptZUcyhNs0aEpGJTUEwClpmQkQmAwXBKNRWFHOwrYsJMqwhIjIoBcEozKws4kRPko7uvlyXIiJyxhQEo1BXNQWAlxu15pCITFwKglG4elkN1eVFfPvR19Q9JCITloJgFEoKE3zuqsX8encLz+ycENfBiYicRkEwSmtWzWNWZTF3PLxNK5GKyISkIBil4oIEf3H9cl5qbOX2B19SF5GITDgKgjFw/fmz+J/vWcqDm/bx/af35LocEZG3REEwRv743WexbGY5j21vynUpIiJviYJgjJgZC2eUsv9YZ65LERF5SxQEY2hWZQn7j+lKYxGZWBQEY2h2VTGdvUlaO3tzXYqIyJumIBhDdVUlAOxT95CITCAKgjE0OwiC/ce0IqmITBwKgjF0MgjUIsjWeqKX87/2CM80HM51KSKSRUEwhqaXFlKYn6cgGMS+Y520dfWx+8jxXJciIlkUBGMoL8+YXVmsMYJBtHWlBtC7erUMh0jUhBoEZrbazLabWYOZ3T7I8x81s5eCj2fM7IIw6xkPqSmkCoJs6ZlUXb3JHFciItlCCwIzSwB3AtcCy4GbzGx51mG7gXe6+/nAXwF3h1XPeJldVcIB3dD+NG1BEHRrYT6RyAmzRbAKaHD3Xe7eA9wH3JB5gLs/4+5Hg83ngDkh1jMu6qpS9zHuTeoPXqa2rtRd3LrVIhCJnDCDoA54I2O7Mdg3lE8BvxjsCTO7xczqzay+ubl5DEsce7OrSuh3dFP7LOoaEomuMIPABtk36NoLZvYuUkHwhcGed/e73X2lu6+srq4ewxLHnq4lGFxbpwaLRaIqzCBoBOZmbM8B9mcfZGbnA/cAN7j7hL/NVzoI3mg5keNKoiU9a6i7Ty0CkagJMwg2AEvMbKGZFQJrgHWZB5jZPOBB4OPuviPEWsbN/OlTqK0o4oGNjbkuJVLaOlNjBGoRiERPaEHg7n3AbcAjwFbgfnffYma3mtmtwWFfAaYDd5nZZjOrD6ue8VKQyOPTVy7i2V1H2Pj60ZE/ISYGuobUIhCJnFCvI3D39e6+1N0Xu/tfB/vWuvva4PEfuPtUd78w+FgZZj3j5aZV85g6pYC7HmvIdSmRMdA1pBaBSOToyuIQlBbl88m3L+TRbU00Hj05VtDW1ct7/u4JntkZv/V21CIQiS4FQUjesWQ6ADsOtQ/se3bnERqaOnhh77EcVZU7rZo1JBJZCoKQLJheCsDuwydbBM/uTE2Kits1Bn3Jfo73pFoCmjUkEj0KgpBMKy2kvDifPYdPrraZ7hKKWxC0B1cVg8YIRKJIQRASM2PRjFJ2B0HQ3N7NjkMdABxq685laeMu3S1UUpDQlcUiEaQgCNGCjCB4dleqW2hJTRlNMWsRpGcM1VYUKQhEIkhBEKIF00vZ39pJV2+SZxoOU16cz9Xn1NDU3k1//6CrbUxK6YvJasqLtfqoSAQpCEK0qLoUd3j9yAmeeu0wly2azuzKEvr6nZYTPbkub9yku4aqy4vo63f6tDKrSKQoCEKUnjn00Av72Hesk9UrZlJTXgTEa8A43TVUU5H63rvUKhCJFAVBiBbMSAXBvzy7h8L8PK5ZUUtNRTEATe3dHO/ui0UgpC8mqylPfe+6J4FItCgIQlRZUsD00kKO9yS5+uwayosLqA3eFTe1dfGN9Vv57e8+m+Mqw9fW1Usiz5hWWgCoRSASNQqCkKVbBe+/YDaQ6ieH1BTSZ3YeYW/LiUl/N7PWzl4qivMpLkgAujmNSNQoCEK2pKaM0sIEVy+rAaAoP8G00kJe3d/G7sPHcYfDHZP7uoK2zj4qSwooyk8FgS4qE4mW/FwXMNn96TVn84m3L6CkMDGwr6a8iMd3NA1sH2ztYlZlSS7KGxdtXb1UlBRQXJB636GF50SiRS2CkFWXF3HOrIpT9tVWFJ+y+Npkv9I41TV0skWgriGRaFEQ5EB6wHj+9CkANLVP7plDbZ29VGa0CNQ1JBItCoIcqA2mkL5vxUwSeTbpp5C2dfVRUXJysFgrkIpEi4IgB9IXlV2yYBo15UUx6hoKxgjUIhCJlFCDwMxWm9l2M2sws9sHeX6ZmT1rZt1m9mdh1hIlV5w1g3cvq+HyxdOpqSie1C2Crt4kPX39wWCxxghEoii0WUNmlgDuBN4LNAIbzGydu7+acVgL8MfAB8OqI4oWVZfxvU9eAsDMiqKBFUono/TyEplBoIXnRKIlzBbBKqDB3Xe5ew9wH3BD5gHu3uTuG4DeEOuItNqK4hG7htq7ern220/x/O6Wcapq7KSXl0hdUJbuGlKLQCRKwgyCOuCNjO3GYN9bZma3mFm9mdU3NzePSXFRUVtRTGtn77B/HJ/b1cLWA208F9zTYCJpDZagrijJnD6qFoFIlIQZBDbIvjNahN/d73b3le6+srq6epRlRUt6BtFw4wS/DgJg39HOcalpLKW7hipLCkjkGQUJ06whkYgJMwgagbkZ23OA/SF+vQkpfU3BcN1Dz+0OguDYBAyCga6h1IJzxfkJtQhEIibMINgALDGzhWZWCKwB1oX49SakkVoEbV29vLq/DZjgQVCSmpdQVJCnJSZEIia0WUPu3mdmtwGPAAngXnffYma3Bs+vNbOZQD1QAfSb2eeB5e7eFlZdUVNbPnwQ1O9pod/hgjmVbD3YTn+/k5c3WK/byNq6emnv6qOuavzWNWrrCsYIghZBUb5uYC8SNaEuOufu64H1WfvWZjw+SKrLKLZSV9zmDRkEv97VQmEij+vPn8WLja0c7ugeuLnNW/XN9Vt5csdhnr796tGU/Ja0dfZSlJ83MHW0uCBP00dFIkZXFueYmTF36hQe3nKQ14+cfj3Bc7tbuHBuFYurywBoHEX30At7j7HvWCdHj4/f/ZJbO1Mrj6YV5Sd0hzKRiFEQRMA3bzyPts4+brzrGbbsbx3Yn+x3th1o4/w5ldRNTXXnnOnMoZ6+fnY2dwCw41D76It+k9q6UgvOpRUX5GmwWCRiFAQRsHLBNB78w7dTmJ/H7/9gAwdaU3/sG4+eoLuvn6W15QP9+mc6YLyzuYPeZGr27o6mjrEp/E1o6+yjovhkD2RxQULTR0UiRkEQEYury/j+zZdwvDvJzd/fQHdfkh2HUn+wl9SWUV5cQEVx/hm3CLYeODn+/to4tgiyu4aKCzR9VCRqFAQRsmxmBd+48Ty2HWxnw+6jA104Z9Wkxgfqpk45pUXQ1jX8FcmZth1spzA/j/PnVOa0a6goP0+zhkQiRkEQMVcvqyHP4PndR2ho6mB2ZTHlwdTLOVNLBloEyX7nhu88zVd/vuVNve7WA20srS1j2cxyGsa1a6h3YOooBC0CdQ2JRIqCIGLKivJZMbuS5/e0sONQO0tqyweeq6sqofHoCdydpxsOs/vwcZ58rRn3kVfu2HqgjWUzK1haW87hjh5axmHmkLsP3JQmrbggT3coE4kYBUEErVo4jRf2HqOhqYMlQbcQpFoEx3uSHD3Ry0/qU+v5HWjtGnEAubm9m8MdPZwzq2IgWMaje+h4T5Jkv2d1DemCMpGoURBE0CULptHd1z8wYyjtonlTMYM/uX8z/7XlEJcunAZA/Z6jw75eeqD4nFnlLK1NBct4DBhnrzME6SUm1CIQiRIFQQRdsmDqwOMltSdbBG+bP5Wv/uZyHt/eTE+yn6+8fznlRfk8v2f4+xRsfP0oZrBidiUzK4opL8rnjoe38/ZvPhrqPQ4yb0qTVpyfoKevf6A76xvrt/KzjY2h1SAiIwt1iQk5M9PLilhSU8ZrTR0DM4bSPnnFQvr6nd2Hj7NidiUXz59K/QhB8OyuI6yYXTHQRXP7dcvY9PoxHn7lAA9uamRV0LIYa60nTi5BnZZ5l7KeZD/3PLWLqimFXHfeLEoKE6HUISLDUxBE1HuW15KfyBuYMZTpD65cNPB41cJpfOuR7Rw93sPU0sLTju3qTbJ57zE+8fb5A/s+eul8PnrpfDp7+3hsexPujtmZLWQ3nOwF54CMG9gn2bT3KP0OLcd7eGBTIx+/bP6gryMi4VLXUET9+TVn8x+3XTHicSvnp7qRNgzRKtj0+lF6kv1cvnj6ac9ddXYNh9q62XognPGC7CWo4WSLoLM3yXPBgnrn1lXwT0/uItl/RvctEpFRUhBEVF6ekZ8Y+cdz4bwqpk4p4KEX9g36/LO7jpDIMy5ZcHr3z1VLU3d7e3xH0+iKHUJr5+ldQ+murie2N/PcriNcOK+K2951FntbTrDsy7/gyr/5FcdOjN+ieCKiIJjwivIT/PbKufzy1UODLmX97M4jnFtXOWgXU01FMefWVfD4tnDuA50eLC4rOtkiuGTBVM6tq+Cux3fyyr5WLls0nWuWz+Sr71/OTavm8UZLJ49sORhKPSIyOAXBJPC7l84j2e/8+Pm9p+x/o+UELzYe4/JFp3cLpb3r7Bo27j1Kc/vQt8o8U22dfZQV5Z/SsjEzPn3lIva2nKDf4fJF08nLM26+YiF/+YEVzJs2hfUvKwhExpOCYBKYP72Udy6t5ke/3su//Xovj21v4r9ePcSH//EZSgoSfPjiuiE/90MX1ZFncMfD28a0Jnfnlf2tTBtkAPu682ZRV1VCYX4eF82rGthvZlx73kyebjis7iGRcaQgmCQ+966zONHdx/966GVu/v4GPv3P9STyjAc++/ZTlqnItqi6jD+4chEPbGwccsD5TDy4aR/P727h01cuPO25gkQe37jxPP7i+nMGBo/Trj9vFn39zi9fPTRmtYjI8OzNrFNzxi9uthr4Nql7Ft/j7v8763kLnr8OOAF80t03DfeaK1eu9Pr6+pAqntiS/c6B1k4OtXXT2ZPk3LoKqqac/o4824mePt77d09y5Hg3s6tKUt05wWB1YSKP/ISRn5dHQSK1ryDPKAj2FyTyBo4tCI7LTxj3PLWLRdVl/PQzl7+leyy7O++44zFKChNce+5MVp87kxWzK0dzWkQEMLON7r5y0OfCCgIzSwA7gPcCjcAG4CZ3fzXjmOuAPyIVBJcC33b3S4d7XQVBOLYeaOO+5/fS3NHNiZ4kfUmnJ9lPX7Kfvn6nN+kZj/vpTfbTl0w97uv31OP+ftK/TlOnFPCTz1x+yhIZb9YPn9nDP/yqgZbj3eQn8vjWR87nAxfMDuVaB5G4yFUQXA58zd3fF2x/EcDdv5lxzHeBx939x8H2duAqdz8w1OsqCKItGQRFImg1jMaRjm4++6NNA8tgFOXnUVyQoCCRRzoT0tFwctuyttPPnxoiA8+P8HmjMdrgGnUNY/BNjPYlFN5ja80lc0+5oPStGC4IwryyuA54I2O7kdS7/pGOqQNOCQIzuwW4BWDevHljXqiMnUSekcgbm6UippcV8a+fupT769+gqb2b7t4kXb1JepLpNy+pf9PvZQb+zd7P4M9z2vN+yvZojPb91WhrGIs3eKN+BV0fOOZmlBWF8rphBsFgbwWyfzXezDG4+93A3ZBqEYy+NJkoCvPz+JiWnhAJVZizhhqBuRnbc4D9Z3CMiIiEKMwg2AAsMbOFZlYIrAHWZR2zDvg9S7kMaB1ufEBERMZeaF1D7t5nZrcBj5CaPnqvu28xs1uD59cC60nNGGogNX305rDqERGRwYW6DLW7ryf1xz5z39qMxw58LswaRERkeLqyWEQk5hQEIiIxpyAQEYk5BYGISMyFuuhcGMysGXj9DD99BnB4DMsJm+oN10SqdyLVCqo3bGdS73x3rx7siQkXBKNhZvVDrbURRao3XBOp3olUK6jesI11veoaEhGJOQWBiEjMxS0I7s51AW+R6g3XRKp3ItUKqjdsY1pvrMYIRETkdHFrEYiISBYFgYhIzMUmCMxstZltN7MGM7s91/VkM7O5ZvaYmW01sy1m9j+C/V8zs31mtjn4uC7XtQKY2R4zezmoqT7YN83M/svMXgv+nZrrOgHM7OyM87fZzNrM7PNROrdmdq+ZNZnZKxn7hjyfZvbF4Hd5u5m9LyL1fsvMtpnZS2b2kJlVBfsXmFlnxnleO+QLj1+tQ/7sI3puf5JR6x4z2xzsH5tz6+6T/oPUMtg7gUVAIfAisDzXdWXVOAu4OHhcDuwAlgNfA/4s1/UNUu8eYEbWvr8Bbg8e3w7ckes6h/hdOAjMj9K5BX4DuBh4ZaTzGfxevAgUAQuD3+1EBOq9BsgPHt+RUe+CzOMicm4H/dlH9dxmPf+3wFfG8tzGpUWwCmhw913u3gPcB9yQ45pO4e4H3H1T8Lgd2Erq/s0TyQ3AD4PHPwQ+mLtShvRuYKe7n+nV6aFw9yeBlqzdQ53PG4D73L3b3XeTup/HqvGoM22wet39l+7eF2w+R+qOgzk3xLkdSiTPbZqZGfDbwI/H8mvGJQjqgDcythuJ8B9ZM1sAXAT8Oth1W9Dcvjcq3S2k7i39SzPbaGa3BPtqPbjDXPBvTc6qG9oaTv1PFMVzmzbU+ZwIv8+/D/wiY3uhmb1gZk+Y2ZW5KirLYD/7qJ/bK4FD7v5axr5Rn9u4BIENsi+S82bNrAz4GfB5d28D/hFYDFwIHCDVLIyCK9z9YuBa4HNm9hu5LmgkwS1TPwD8NNgV1XM7kkj/PpvZl4A+4EfBrgPAPHe/CPgT4N/MrCJX9QWG+tlH+twCN3HqG5kxObdxCYJGYG7G9hxgf45qGZKZFZAKgR+5+4MA7n7I3ZPu3g/8E+PcTB2Ku+8P/m0CHiJV1yEzmwUQ/NuUuwoHdS2wyd0PQXTPbYahzmdkf5/N7BPAbwIf9aATO+hmORI83kiq331p7qoc9mcf5XObD9wI/CS9b6zObVyCYAOwxMwWBu8K1wDrclzTKYK+v+8BW9397zL2z8o47EPAK9mfO97MrNTMytOPSQ0SvkLqnH4iOOwTwM9zU+GQTnk3FcVzm2Wo87kOWGNmRWa2EFgCPJ+D+k5hZquBLwAfcPcTGfurzSwRPF5Eqt5dualyoKahfvaRPLeB9wDb3L0xvWPMzu14jobn8gO4jtRMnJ3Al3JdzyD1vYNUE/QlYHPwcR3wL8DLwf51wKwI1LqI1MyKF4Et6fMJTAceBV4L/p2W61ozap4CHAEqM/ZF5tySCqgDQC+pd6WfGu58Al8Kfpe3A9dGpN4GUv3r6d/ftcGxHw5+T14ENgHvj0CtQ/7so3hug/0/AG7NOnZMzq2WmBARibm4dA2JiMgQFAQiIjGnIBARiTkFgYhIzCkIRERiTkEgMo7M7Coz+89c1yGSSUEgIhJzCgKRQZjZx8zs+WCN9++aWcLMOszsb81sk5k9ambVwbEXmtlzGevwTw32n2Vm/8/MXgw+Z3Hw8mVm9kCwdv+PgqvKRXJGQSCSxczOAX6H1MJ6FwJJ4KNAKam1ii4GngC+GnzKPwNfcPfzSV2tmt7/I+BOd78AeDupq0UhtbLs50mtfb8IuCLkb0lkWPm5LkAkgt4NvA3YELxZLyG14Fs/Jxf8+lfgQTOrBKrc/Ylg/w+BnwZrMdW5+0MA7t4FELze8x6sFxPcaWoB8N+hf1ciQ1AQiJzOgB+6+xdP2Wn25azjhlufZbjunu6Mx0n0/1ByTF1DIqd7FPiImdXAwL2D55P6//KR4JjfBf7b3VuBoxk3BPk48ISn7iXRaGYfDF6jyMymjOc3IfJm6Z2ISBZ3f9XM/oLUHdjySK0C+TngOLDCzDYCraTGESC1RPTa4A/9LuDmYP/Hge+a2deD1/itcfw2RN40rT4q8iaZWYe7l+W6DpGxpq4hEZGYU4tARCTm1CIQEYk5BYGISMwpCEREYk5BICIScwoCEZGY+/8kv7ORjhWS+QAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEGCAYAAAB/+QKOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/YYfK9AAAACXBIWXMAAAsTAAALEwEAmpwYAAAw60lEQVR4nO3deZRc9Xnn//fTS/WmbkktpNbaEosAAxKLhbwQY2MMBv0A7zF4Ccb5HeKZ2GNnO+A4k2TmjDPO4njIOY5jjHGcRBjb2MRMgjGE5AdO5meQhIUkJAFCCKnVUqu1dbd6q+2ZP+691VWtqlK31FXVrfq8zunTdW/dW/Wtq9Z96vt9vou5OyIiIuPVVLoAIiIyPSlAiIhIXgoQIiKSlwKEiIjkpQAhIiJ51VW6AFPpnHPO8RUrVlS6GCIiM8amTZsOu/v8fM+dVQFixYoVbNy4sdLFEBGZMczsjULPqYlJRETyUoAQEZG8FCBERCQvBQgREclLAUJERPJSgBARkbwUIEREJC8FiBlua1cfv9x7rNLFEJGzkALEDPdnP9vJH/7kpUoXQ0TOQgoQM9yJ0SQH+0cqXQwROQspQMxwI4k0h0+MkkylK10UETnLKEDMcCOJFO5w+ES80kU5bX1DCfqGEpUuhoiMowAxw40kUgD0zOBmpt/54Yv8zg83V7oYIjLOWTWbazUaPgsCxKGBEZIpr3QxRGQcBYgZLlODGBitcElO31A8xWgyVeliiMg4ChAzmLszkgiS04dmcA1iOJ5iMJ6sdDFEZJyS5SDM7CIz25z1029mXzCzdjN7ysxeDX/PLXD+TWb2spntMrN7S1XOmWw0OdZzaSY3MQ0nUgyMJHEv3szU0z/CB/76P2b0ZxWZSUoWINz9ZXe/wt2vAN4MDAGPAvcCT7v7SuDpcDuHmdUCXwduBi4B7jCzS0pV1plqOD7WLNPTP3ObmIbjKVJpz+RTCnmpu49f7j3O1q6+MpVMpLqVqxfT9cBr7v4G8D7gu+H+7wLvz3P8WmCXu+929zjwcHieZBlJZgeImfmt2n0sMAyMFG9mOjEaHHd0cOZ26RWZScoVIG4Hvhc+7nD3AwDh7wV5jl8C7Mva7gr3ncTM7jazjWa2sbe3dwqLPP1FNYhZDXUcmqFJ6iiHAjAwUnwsxOBoEECOKECIlEXJk9RmFgNuA744mdPy7MvbQO3u9wP3A6xZs6aq+kpGN9fO9ma2H+hnNJmioa428/y/bO9hJJniltWL6R9J8D8f38FQPMWKeS381g0XnvR6Ow70861nd5Ny5z1v6uDWyxeX/DNkNyv1n6IGEQWIY0NBgHjypYMA3HjpwhKVTqS6laMX083AC+7eE273mNkidz9gZouAQ3nO6QKWZW0vBbpLXM4ZJ7q5rjgnCBC9A6Msnducef7+Z3ez40A/7754AQ8/v5fvPb+P9pYYP9nczad/5VxmN9XnvN76597gsRe7aairYeeBgbIEiKGs3kunbmIKaxDhqPH7nn6VkURKAUKkRMrRxHQHY81LAI8Bd4aP7wR+kuecDcBKMzs3rIHcHp4nWUbDANHZ3gKcnKjuH0kwMJrksc3dfO/5faxZPpc/ujXI9fcOnJyz2NrVx9Ur2vno1Z3sPTp0yl5FU2EkqwZxYoI1iKODwefs6R9l9+HBUzZNicjpKWmAMLNm4Abgx1m7vwLcYGavhs99JTx2sZk9DuDuSeCzwM+AHcAP3F1zWo8TJalXzAtqDePHQkTfyP/0iZ28fniQj72lk462RuDkYBJPptlxYIDVS2fT2d7EcCJF74nS5zWG4xPPQWQnqZOpNEcGR3GHl7r7S1pGkWpV0iYmdx8C5o3bd4SgV9P4Y7uBdVnbjwOPl7J809nOg/1c1NGKWb50TCC6uXaGAWJ8T6aBkQSzGuo4NpRgdlM961Yt4kDfSN5jX+kZIJ5Ks2rpbFpiwZ/FvqNDLGhtnLLPlM9kmpiyk9SHT8SJKjhbu/p463nzipwpIqdDk/VNQ1u6jnPT//o5G/YUXykuap5ZPLuJuhrLmW7D3TkxmuSDVy2htaGO269eRmN9LQtaG4CTaxBbwrEFq5fMYVl7EHDeODI0ZZ+pkOwk9UR7MR0djOcEuC37NS5CpBQ01cY09PLBAQC6jg2x9tz2gsdFN9fmWC1zW2IczZryezCeIu2wZE4T//q772JOc5CQbmmoo7Wh7qQaxNb9x5ndVM+y9ibiqTRmsPdoGQJEfOK9mKIk9VA8xRth2ZbMaWJr1/GSlU+kmqkGMQ1FN+ZTDQiLahCNsVrmtcRyxgdE38ZbG+uZ39pAfe3YP/WCtgYOjUtSb+nqY/XS2ZgZDXW1LGprZG/ZaxCnaGLKao7acSDIO9xwSQd7jgxpPQmRElCAmIaipp1TDQjLBIi6WtpbYpnePTB2s21tPLmS2NHWmNPENJJI8fLBAVYtmZ3Zt6y9uTw1iPAzzG2un0ATU4rG+uBPdseBfmoM3nXRfAC2dauZSWSqKUBMQ5kaxClWiRtJpKmtMeprLQwQ+WoQhQLEWA3ilZ4BkmnPCRDL5zVnmnFKKWpiWtDamFOD+PELXbz3a8/mdLU9MZqkM8yP7DjQz/zWBq5YNgeArcpDiEw5BYhpKAoQp6pBDCdSNNbVYGbMGxcg+ovUIBa0NXCofzRz8z0Y9mxaMrcpc0xnezO9A6M5OYJSyASItgYGRsdqEC/uO87LPQM5U3EMZgWInv5ROtoamdMco7O9WRP4iZSAAsQ0MzCSyNzooyklChlJpGiKBVNrtLc00D+SJJEKbqgnMgGi/qTzOlobiafSHA/b7aP3aW+JZY7pnBcMvit1M9NQIkV9rTG3OZZTgzgali1KTKfTzlA8lelhBWS64K5aOpst+4+XtJwi1UgBYpqJ8g8NdTWnTFIPJ8bmXmqfFdzcj4XnnCoHAdATJqqjmsq8lobMMcvDG3GpA8RwPEVjfS2tjXU5I6mjfErUtTVKUC+a3UhtTTA2pKMtKO/qJbPZd3Q489lFZGqom2sZ/e8Xu0mk0nzwqqUFj4luyKuXzs50dy1kNJHOJG3bm4MAcWQwzoK2xpxeTONFN9ae/lEuXhjkOprqazO1ESDTlPPGkcGJfrzTMhxP0RyrZVZjXU4NIppvKapBDI5GM9fWM7c5xuETo5lAt2ppkDvZur+PloY6vvXsbtJlmCZEZLpoa6rnLz5y+ZS/rgJEmYwkUvzBP25jXkusaICIahBXLJvDhj3HSKTSOV1Usw3nNDEFAeJoVg2ixqAl66YfGZtuYyRzTnbzEsCc5nrmtcR4pad4kDpTw4kUTfW1tDXWE0+lGUkENYroc0Q1iChQtDQEXXqDABEEusuWjAWIZ17p5aX9fTlNUSJnu7nNsVMfdBoUIMrkf7/YTd9wIrN6WtRMMt7eo0O0t8QyOYBjYY0gn5FEisawiWnerLEaBIxNs5Fvqo754WjqaO6mI4PxzPkRMwva9kuc/B1OjDUxBeVO0lBXk8mLRE1LUYCY1VCXCWbRdWlrrOe8c1r4yeb9vNJzgntvvpjPvPP8kpZbpBooB1Em//DcXgDiqTQHi6z+tvfoIJ3tzcxryb3h55OvBpGdg8jXvATQWF/LnOb6zFiIo4PxvN9AVi+dwys9AznzJU21qIlpLEAkwmR70EQUTdA3mKlB1GXyLR1Z80StWjqbV3pOEKut4SNvLlxDE5GJUw2iDLbt7+PFfce54ZIOntrewxtHBlkypynvsW8cGeLNy+dmbtjZier/2HWYlR2zMr13RhJp5rUEAWJucwyzsYDSP5LMm6COdLQ25jQxrVww66RjVi+ZTdphe3c/a1YUnvLj0MAIP916sGC7/7K5zbznko7MdirtPLX9IDdcsjAT5FobgmA2MJLMqfWMb2Ka1VCXCZ5RExPAqiWz+cnmbtatWsi8WWP7ReT0KUCUwTOvBEuhfv76lTy1vYe9R4Z4e54WkH1Hh9h/fJg71nae1GTUdWyIT3z7OdatWsTXP3YVEKwHEdUgamuMOU31md4/AyMJ2grUIAA6ZjfS3TcM5M9BQJAoB3ixq69ogPjK4zv58S/3F70G//o77+S8+UEQemTTPu750VYe+n/fwlA8xdzmWE4TUzKdO/Yh+/eshjouXdxGZ3tzTq3nmgvOoam+lruuObdoOURk4hQgyuDYYJzmWC1vWtRGXY0VHKH8vef3YsD7r1xCQ13Q+nc0XJPh4ef34Q4/23aQQwMjLGhtzAyUi2SPph4YSbJwduGpujvbm9i89xjD8RTDiVSm2SbbgrZGFrY1Fp0M79hgnH/aeoA71nZyz00XnfT80cE4N37tWR56bi9/cEuwWNE//CJobuvuG8mM5YiawwZGEgwnxj7TiXEBoqWhjo9e3clHr+7MeZ83LWrjpf/2XmoK5HZEZPJKvWDQHDN7xMx2mtkOM3ubmX3fzDaHP3vMbHOBc/eY2dbwuI2lLGepHR8O1mOorTGWzm3KOwlePJnmBxv38e6LO1gypynTZHR0ME4ilebhDfu4ZFEbybTzgw37gNyBchCMY8juHlqsiWl5ewv9I0l2Hz4Rnpu/F8TqUySqf7hpH/Fkmk+9fQVzmmMn/Zw3fxbvvXQhP9zUxUgixYv7jmemxejpHwlyEOOS1NlzSo01MUXdXAt/JgUHkalV6hrEfcAT7v7hcOnQZnf/aPSkmX0VKNZN5jp3P1ziMpbEwEiCpvpa6mpr6AsDBAQjlN84evLYgideOsjhE3E+8dbgm3GmyWgozlPbezh8YpQ///BqHvj33Tz03F7+07suyPQAisxtqWd372Dm/YsFiGiRoV/uPQ4EI7HzuXzZHJ7c3pP5DIOjSfYdGwtw65/by9Ur5nLRwtaC7/Xxt3byz1sP8Hf//x62dPXRHKvFCHpRDcWTYQ0iDBCjSUaTY8EgO0ldY2TGfYhI6ZUsQJhZG3At8CkAd48D8aznDfhV4N2lKkOluDs3/OWz3HXNCn7jnefnBIjl7c1s3nvyQkCPbe5myZwmrl05P7MvajJa/9wbwXMXzmc4keI/r3+B514/wkginRMg2lsa2PTGMdy9aC8mCCbjg+wAUbgGAbDh9aO855IO7vrbDTz/+tGcY377hguLXo+3nTePCxbM4k8e3wnAx9/SyYY9R+npH818hlkNddRYMIJ6NJGmqb6WebNiOUnqlgLddkWkNEpZgzgP6AW+Y2aXA5uAz7t79PX5HUCPu79a4HwHnjQzB77p7vfnO8jM7gbuBujs7Mx3SNn1Dyc52D+S+TbfN5TI3JCXz2umfyTJ8aE4c7KSrL0DI5y/YFZOM0l7S4wX9/Wx//gwv/fei6itsczspTsOBAPYsr9Rz2uJcWwowVA8RTLtxWsQ4UCyzfuOZc7N5y3nzuOcWTEe3rCPpe1NPP/6UT751uW8/fx54fvX8s4L5+c9N2JmfOdTV7Ntfx9mxjUXzOM/r3+B/ceHiafSNMeCmtYFC2ax48AAc5rqaW+J0RKry0lSF2teEpGpV8r/cXXAVcDn3P05M7sPuBf4r+HzdwDfK3L+Ne7ebWYLgKfMbKe7Pzv+oDBw3A+wZs2aaTG/wvg5jnKamLKW88wOEH3DCZaHg+Mi7S0xNuw5Rl2N8ZE1Qd/+hW2NxOpqeCWchqMppwYRI5V29h8PeicVq0E0x+o4Z1YDr4VBbG6BABGrq+Eja5bxzWdeo7Ym2P7tGy4seHwhy9qbT5pob/O+4zmfYdWSOTz7ai+XLGqjvSVGU33tWJI6HtQgRKR8Stmg2wV0uftz4fYjBAEDM6sDPgh8v9DJ7t4d/j4EPAqsLWFZp9TY+IIg2do3nMgs+RkFgfE9mbKDSCTKC7z30oWZsQ81NcayuU283BPVILKS1GFPpD2Hg5t+W5EaRFCW4IZdX2tFj73j6k7SDj97qYd1ly2cdHDIp6OtITP3UpRoX710Nr0Do+w40B/UIBpqs0ZSpxQgRMqsZAHC3Q8C+8ws6vt4PbA9fPweYKe7d+U718xazKw1egzcCGwrVVnPxNHBOHc++HzOpHbZI5RHk0E30ujmv6w9GCC3N+v4dNrzBoio2efjb8ltOutsb+bVnpObmKI8wu4wQBRrYoKxGVuDHlOF2/Y75zXzjpXnAHDH2qlpxuvImj4kqkFEcyodGhhlXkuMloa6zCR9QRPTyfNKiUjplLpLyOeA9Wa2BbgC+JNw/+2Ma14ys8Vm9ni42QH8u5m9CDwP/LO7P1Hisp6Wn7/ayzOv9PLAz1/P7Mseodw3HMyqOjtsTmqO1TG7qT5nyc8T8SRpJ1PLiNx6+WI+9+4LeFvY3h9ZPq+FwXChnewmpksWtVFfa/xoUxB3izUxwVhPpkIJ6mz33HQxn73uAtaeW3jA3GRkj4KOahCXLGrLzFHV3hILezGN5SBaYqpBiJRTSf/HuftmYE2e/Z/Ks68bWBc+3g1M/dy1JRCtZPbjF7q45+aLmdVQl5kEr38kyeGBIA+RXTuY1xLjaNZiQH3h4jht42oQFy1s5aKFJw8+y27Lb8hpYmrg5ssW8diL3UDxMQMw1sQ0fqK+fC5bMjvzDX8qZE9AGAWIplgtKxfMYufBAdpnxXByp9pQklqkvNSp/Axt2d/H3OZ6BuMp/jGcbiK7dhA1PWUHiPaWWM5605laRlPxb/yR5VkBIrsGAfCJty7PPD5VE1Nne0tYnvLPXZSviQnGutVGTUxD8RTptAc1CAUIkbJSgDgDqbTz0v4+br18MZcsauMffvEG7p7pxQRj+YA54wPE4MkBYs5EA8S8sQDROC5AXL1iLhd2BHMenaqJKVODmIKk82TNz5pQrzlrNPiqpXOAIGhFOYfBeJJBJalFyk4B4gy8fvgEg/EUq5fO4cNvXsrOgwN0941wqH80M1trNBYip4lpVixnGu9obejZzRMLEMuK1CDMjC+850LWrmin9RQ31HktMa69cD5vPW9q8gqTEauryQSm7M/wrgvnc/HCVi5d3JYJCD39I8RTaeZO8PqIyNTQV7IzEM1RtHrpbIbCpPGWfcc5NDDCOy+cz/7jw7weznU0vonp2FCcdNqpqbFJNzE11tfS0dZAT/9o3qkn1q1axLpVi075OmbG3326cr2HF7Q1cmQwnlMLWtbezBNfuBYYy6FsDwcFdmqVOJGyUg3iDGzp6qOpvpbz58/i4oWt1NUYz7zSSyLlvGlRGwCvR2MSsm7+c5uDAW394brRY01ME2/qiW6W42sQM0nUk6k5z7KoQKbX0vbufmCs15WIlIcCxGkYGEnwy73H2PjGUS5bEnTNbKyv5aKFrfzLjkMAXNjRihkcGwomzcteYnT8Wg/Hh+PEamsmNRFdlGBumMkBIhz811QoQIQ1iB0HwgChGoRIWSlAnIZ7frSFD/z1/2Hb/n6u7Jyb2b966WwOh+s3LJ7TlGkyKjRCOloetH84wezm+klNRHfxwlaaY7UFv33PBCvOaWFWQ11mXe3xxpqYgpHVp0q6i8jUUg7iNHQfH2HVktn87nsvYs3ysQCxaskcvkewVkNHWwPtLTGODyVOGgA3fr3pfKOoT+XX3r6cGy/toL525sb4u65ZwS2rFxVcx6El7MXUOzCamaRQRMpn5t5dKiiYWK+Zd144P6frZdSHH2B+a0MmEJxcg8hdb/r40OQDRENd7UmT+800jfW1OT2yxsseGKfmJZHyU4A4DdmT72W7sKOVWG0N7S0xGupqM4HgVAGibzgx4TEQ1SQ7+C5Xglqk7BQgJsk9/8R6EPTtf9Oi1swo4bEAkds7qbE+yB1Ey4OeThNTNWiO1RKlZYrVNESkNJSDmKQTo0lSaS/YJfWPbruUeDINULAGET2XmQ58KHHSPEwSjNNoiQUT9i1XgBApOwWIScqMei5wQ78qq1dT1Fsp37HzWoLR1Km0MzCazNtkJUGi+sRoUmMgRCpATUyTFA1qm8g3/ihJne/mH83H1D/JUdTVpqWhjlhdTWbMhIiUjwLEJEU39Il84y/exNTAscE4xxUgiprVUEdne3PBrrAiUjolDRBmNsfMHjGznWa2w8zeZmZ/bGb7zWxz+LOuwLk3mdnLZrbLzO4tZTknYzI39EsXt7Fm+Vwuz9OHP5qw73i4LoSamPJ7z5s6uO3yxZUuhkhVKnUO4j7gCXf/sJnFgGbgvcDX3P0vCp1kZrXA14EbCNa23mBmj7n79kLnlEvfJGoQ82Y18Mh/enve5+Y2xxhNpjnYF0wNrhpEfv/l+pWVLoJI1SpZDcLM2oBrgW8DuHvc3Y9P8PS1wC533+3uceBh4H0lKegknSpJPVHzxq0frQAhItNNKZuYzgN6ge+Y2S/N7AEzi4b+ftbMtpjZg2Y2N8+5SyCcsyLQFe47iZndbWYbzWxjb2/vlH6AfPqGE8Rqa854FtUoP/Efuw4DJ4+VEBGptFIGiDrgKuAb7n4lMAjcC3wDOB+4AjgAfDXPufkykp7vTdz9fndf4+5r5s+fPxXlLqpvOBizMJmJ9fJZcU7QbfP/vHaE1oY65SBEZNopZQ6iC+hy9+fC7UeAe929JzrAzL4F/FOBc5dlbS8FuktV0MnoG44zu+nML9sFC1p57vevZ3A0SXtLbEZPuiciZ6eS3ZXc/SCwz8wuCnddD2w3s+ylzj4AbMtz+gZgpZmdGya3bwceK1VZJyOYh2lqmoM62ho5b/6sKXs9EZGpVOpeTJ8D1oc3+d3AXcBfmdkVBE1Ge4DfADCzxcAD7r7O3ZNm9lngZ0At8KC7v1Tisk7I8aFEZq4lEZGzWUkDhLtvBtaM2/3JAsd2A+uyth8HHi9Z4U5T33CCizpaK10MEZGSU8P3JEVJahGRs50CxCSk0s7AiCbWE5HqoAAxCZpYT0SqiQLEJGhiPRGpJgoQkzCZeZhERGY6BYhJiGZeVQ1CRKqBAsQk9GWamDSwTUTOfgoQk6AktYhUEwWISdh/fIT6WlMOQkSqggLEJGzb38dFC1s1sZ6IVIWCdzoz++tw0R8B3J0tXcdZtWROpYsiIlIWxb4K7wE2mdnHylSWaSWZSpNIpTPbe48O0T+S5PKlsytYKhGR8ik4WZ+7/5mZrQf+0sx+nWChn3TW8z8uQ/kq5kuPbuPoUJxv/Vow1+CWrj4AVilAiEiVKDqbq7vvN7N/Br4M3MpYgHDgrA4QbxwdzKw/DbB1fx+xuhou1EyuIlIlCgYIM7uUoNbQDax19wNlK9U0MBxPMZJIZba3dB3nkkVtSlCLSNUoVoN4BPi8uz95ui9uZnOAB4DLCGodnwY+SFAbiQOvAXe5+/E85+4BBoAUkHT38etKlNRwIsVoMqgwpdPOtv39fPCqJeUsgohIRRX7OvwbBKu55TCz28zszRN8/fuAJ9z9YuByYAfwFHCZu68GXgG+WOT869z9inIHB4ChrBrEniODnBhNctkS5R9EpHoUCxD/neCGPt524M9P9cJhF9lrgW8DuHvc3Y+7+5PungwP+wWwdHJFLo+RRIqRRFCDOHwimINp8eymShZJRKSsigWIee6+Z/xOd98FzJvAa58H9ALfMbNfmtkDZtYy7phPAz8tcL4DT5rZJjO7ewLvN6WG4ylGkyncPVOTaIop/yAi1aPYHa/Y1+XxN/p86oCrgG+4+5XAIHBv9KSZfQlIAusLnH+Nu18F3Az8ppldm+8gM7vbzDaa2cbe3t4JFOvU3J2hRIq0QyLlDIcBoqHupBY3EZGzVrEA8S9m9mUzs+ydZvbfgH+dwGt3AV3u/ly4/QhBwMDM7gRuAT7u7p7vZHfvDn8fAh4F1hY47n53X+Pua+bPnz+BYp3aaDJNVKqRZCqrBqEAISLVo1iA+B2CZqJdZvaj8GcXcFH4XFHufhDYZ2YXhbuuB7ab2U3APcBt7j6U71wzazGz1ugxcCOwbaIf6kwNx8e6twa5iGC7sV4BQkSqR7GR1IPAHWZ2HnBpuPsld99tZhOdzvRzwHoziwG7gbuADUAD8FRYOfmFu3/GzBYDD7j7OqADeDR8vg54yN2fmPzHOz3DWeMfRhPpTLK6SQFCRKpI0ZHUAO6+G9gdNjVdZ2a/TzCOoWMC524GxndRvaDAsd3Auqz3vPxUr18qQ1k1iNFkKhMwGuuVpBaR6nHKO56ZvcXM7gPeAB4Dfg5cXOqCVVL2COqRRHqsiUlJahGpIsWm+/6ymb0K/AmwFbgS6HX377r7sXIVsBJympjCGkSsroaaGitylojI2aVYE9PdwMsE8zH9k7uPmFneHkdnm6F4bg1iNJFW/kFEqk6xJqaFBLO43kbQk+nvgSYzO2XeYqbL14tJ+QcRqTbFejGlCEY5/9TMGgnGLTQD+83saXc/axcSGk4kM49HEmmGEynVIESk6kzoa7G7j7j7I+7+IYJeSC+VtliVNRwfW0luNBnVIBQgRKS6FEtS15rZHWb2u2Z2WbjvFuBnwIfLVcBKGIqPr0GkaVCAEJEqUyyf8G1gGfA88Fdm9gbwNuBed//HMpStYnK7uQY1iCblIESkyhQLEGuA1e6eDnMQh4ELwik0zmq5A+WCcRDtLbEKlkhEpPyKfS2Ou3saghwE8Eo1BAcIxkHMaghi51gNQk1MIlJditUgLjazLeFjA84Ptw3wcEW4s9JIIkVzrJZ4Ks1IOFBOSWoRqTbFAsSbylaKaWYonqIpVhusSx1O1qcAISLVptg4iDfKWZDpZDgeNCk11tdmdXNVklpEqkvBAGFmAwTLfkacIFH9b8A97n6kxGWrmOFEUINojNdkJutTDkJEqk3Br8Xu3urubVk/swl6Nr0E/E3ZSlgBw/EgB9FQV8vgaJJEytXEJCJVZ1LtJu5+zN2/BpxfovJMC0OZJqYajg8nAC0WJCLVZ9IN6+FqchOasM/M5pjZI2a208x2mNnbzKzdzJ4ys1fD33MLnHuTmb1sZrvM7N7JlvNMRFNrNNbV0jcUBAjlIESk2hTLQXwwz+65wEeBRyb4+vcBT7j7h8NlR5uB3weedvevhDf+ewnWqM5+71rg68ANQBewwcwec/ftE3zfMzIcdnNtqK9h37E4oPWoRaT6FKsJ3Dpu24EjwH3u/s+nemEzawOuBT4F4O5xIG5m7wPeFR72XeD/Y1yAANYCu8KlRzGzh4H3AWUJEJkmprpajmVqEAoQIlJdinVzvesMX/s8oBf4jpldDmwCPg90uPuB8D0OmNmCPOcuAfZlbXcBb8n3JmZ2N8HiRnR2dp5hkQNBL6Y6GutriSeDmV0VIESk2hSbzfXPzOwzefb/lpn96QReuw64CviGu18JDBI0J01EvrU9865m5+73u/sad18zf/78Cb58Yam0E08GK8g11I1dHiWpRaTaFMu83gLcn2f/fcD/M4HX7gK63P25cPsRgoDRY2aLAMLfhwqcuyxreynQPYH3PGPRetRNsZqcKb6VpBaRalPsrufRZH3jdqbJ/w1//HEHgX1mdlG463qCHMJjwJ3hvjuBn+Q5fQOw0szODZPbt4fnlVy0FkTQxDR2edTEJCLVpliSesjMVrr7q9k7zWwlMDzB1/8csD68ye8G7iIISj8ws18H9gIfCV93MfCAu69z96SZfZZgcaJa4EF3L8sqdiPhanJBE1N2DUIBQkSqS7EA8YcE61H/D4IEMwQjqb8IfGEiL+7um8Nzxrs+z7HdwLqs7ceBxyfyPlMpamJqjtXm1CCaYgoQIlJdivVi+qmZvR/4PYKaAMA24EPuvrUMZauITBNTOFlfpLFOOQgRqS7FBso1Aj3ufue4/QvMrDFcROisE9UgGsf3YlINQkSqTLGvxX8FvCPP/huAr5WmOJU3HM9uYsquQShAiEh1KRYgfsXdfzx+p7uvJxghfVYa6+Y6loOI1dVQU3PKjlsiImeVYgGi2B3xrG2QHwprENm9mJR/EJFqVOzOd8jM1o7fGe7rLV2RKmskTw1C+QcRqUbFurn+HsF4hb8lt5vrrxEMXDsrDWfVIKK8g8ZAiEg1Krai3PMEE+QZwYys2aOff63kJauQaHK+WN3YVBuah0lEqlHRhX/cvQf4IzO7EriDIDhcC/yoDGWriEQqCBB1NZbp5tqgACEiVajYOIgLCZqS7iBYB+L7gLn7dWUqW0XEU06stgYzyzQtKUktItWoWA1iJ/Bz4FZ33wXBVN9lKVUFJVJp6muDDlxKUotINSv21fhDwEHg38zsW2Z2PROYxXWmS6TSxKKmpUw3VwUIEak+xZLUj7r7R4GLCZYF/S2gw8y+YWY3lql8ZRfUIILLohqEiFSzUzauu/ugu69391sIFu7ZzMRXhptx4knPChBRN1flIESk+kzqzufuR939m+7+7lIVqNKym5jqaowa0zgIEalORbu5nikz2wMMACkg6e5rzOz7QLTK3BzguLtfMZFzS1nWSDw5lqQ2M25etYi3nNtejrcWEZlWShogQte5++FoI8xrAGBmXwX6JnpuOWTnIAC+/rGryvn2IiLTRjkCRF5mZsCvAtOquSo+LkCIiFSrUt8JHXjSzDaZ2d3jnnsHwYJEr+Y571TnZpjZ3Wa20cw29vae+RyC2TkIEZFqVuoaxDXu3m1mC4CnzGynuz8bPncH8L3TPDfD3e8H7gdYs2aNn2mBEynX3EsiIpS4BuHu3eHvQ8CjwFoAM6sDPkgwfcekzi217JHUIiLVrGQBwsxazKw1egzcCGwLn34PsNPdu07j3JIKejGpiUlEpJRNTB3Ao0EumjrgIXd/InzudsY1L5nZYuABd193inNLKpFKU68chIhI6QKEu+8GLi/w3Kfy7OsG1p3q3FKLp9LEVIMQETl715Y+XYmkKwchIoICxEnGD5QTEalWuhOOE9c4CBERQAHiJAnlIEREAAWIkyRSriYmEREUIHKk0k4qrQAhIgIKEDkSqTQA9XXqxSQiogCRJR4GCOUgREQUIHIkkmENQgFCREQBIlsiFUwGqwAhIqIAkSPKQWgchIiIAkSOKAehqTZERBQgciSUpBYRydCdMEsiqRyEiEhEd8IsmSYm5SBEREobIMxsj5ltNbPNZrYx3PfHZrY/3LfZzNYVOPcmM3vZzHaZ2b2lLGcknlQOQkQkUsoV5SLXufvhcfu+5u5/UegEM6sFvg7cAHQBG8zsMXffXsJyKgchIpJlut4J1wK73H23u8eBh4H3lfpNM1NtKECIiJQ8QDjwpJltMrO7s/Z/1sy2mNmDZjY3z3lLgH1Z213hvpOY2d1mttHMNvb29p5RYTUOQkRkTKnvhNe4+1XAzcBvmtm1wDeA84ErgAPAV/Ocly8J4PnewN3vd/c17r5m/vz5Z1TYuEZSi4hklPRO6O7d4e9DwKPAWnfvcfeUu6eBbxE0J43XBSzL2l4KdJeyrDA2F5NyECIiJQwQZtZiZq3RY+BGYJuZLco67APAtjynbwBWmtm5ZhYDbgceK1VZI5ruW0RkTCl7MXUAj5pZ9D4PufsTZvb3ZnYFQZPRHuA3AMxsMfCAu69z96SZfRb4GVALPOjuL5WwrICS1CIi2UoWINx9N3B5nv2fLHB8N7Aua/tx4PFSlS+fUU33LSKSoTthlmi6b+UgREQUIHIkNJuriEiGAkSWRCpNjUGdahAiIgoQ2eKptPIPIiIh3Q2zJJKu/IOISEh3wyyJVFpTfYuIhHQ3zJJIpZWgFhEJKUBkiSeVgxARiehumCWeSisHISIS0t0wSyKV1lTfIiIh3Q2zJFKuJiYRkZDuhlmUpBYRGaMAkUVJahGRMbobZlEOQkRkjO6GWZSDEBEZU8oFgzCzPcAAkAKS7r7GzP4cuBWIA68Bd7n78YmcW8qyQtTEpByEiAiUpwZxnbtfkXWDfwq4zN1XA68AX5zEuSWV0GR9IiIZZb8buvuT7p4MN38BLC13GQqJKwchIpJR6ruhA0+a2SYzuzvP858Gfnqa5wJgZneb2UYz29jb23tGhU1oJLWISEZJcxDANe7ebWYLgKfMbKe7PwtgZl8CksD6yZ6bzd3vB+4HWLNmjZ9JYZWkFhEZU9K7obt3h78PAY8CawHM7E7gFuDj7p73pl7o3FJKaByEiEhGye6GZtZiZq3RY+BGYJuZ3QTcA9zm7kOTObdUZY3EU2nq69SLSUQEStvE1AE8ambR+zzk7k+Y2S6ggaDZCOAX7v4ZM1sMPODu6wqdW8KyAspBiIhkK1mAcPfdwOV59l9Q4PhuYF2xc0spmUqTdtTEJCIS0t0wlEgFqRAFCBGRgO6GoXgqDaBxECIiId0NQ4koQGiqDRERQAEiIwoQamISEQnobhhKJJWDEBHJprthKMpB1CsHISICKEBkKAchIpJLASJ0bCgOQENdbYVLIiIyPShAhB59YT/NsVrWrJhb6aKIiEwLChBA31CCx17s5n1XLKG1sb7SxRERmRYUIIAfbtrHaDLNJ97aWemiiIhMG1UfINydh57by1Wdc7h08exKF0dEZNoo9YJB095QPMXac9v5lZXnVLooIiLTStUHiJaGOr7yodWVLoaIyLRT9U1MIiKSX0kDhJntMbOtZrbZzDaG+9rN7CkzezX8nbdfqZndZGYvm9kuM7u3lOUUEZGTlaMGcZ27X+Hua8Lte4Gn3X0l8HS4ncPMaoGvAzcDlwB3mNklZSiriIiEKtHE9D7gu+Hj7wLvz3PMWmCXu+929zjwcHieiIiUSakDhANPmtkmM7s73Nfh7gcAwt8L8py3BNiXtd0V7juJmd1tZhvNbGNvb+8UFl1EpLqVuhfTNe7ebWYLgKfMbOcEz8s3Y57nO9Dd7wfuB1izZk3eY0REZPJKWoNw9+7w9yHgUYKmox4zWwQQ/j6U59QuYFnW9lKgu5RlFRGRXCULEGbWYmat0WPgRmAb8BhwZ3jYncBP8py+AVhpZueaWQy4PTxPRETKxNxL0ypjZucR1BogaMp6yN2/bGbzgB8AncBe4CPuftTMFgMPuPu68Px1wP8CaoEH3f3LE3jPXuCN0yzyOcDh0zy3EmZSeWdSWUHlLaWZVFaojvIud/f5+Z4oWYCYacxsY1ZX3GlvJpV3JpUVVN5SmkllBZVXI6lFRCQvBQgREclLAWLM/ZUuwCTNpPLOpLKCyltKM6msUOXlVQ5CRETyUg1CRETyUoAQEZG8qj5ATPdpxc1smZn9m5ntMLOXzOzz4f4/NrP94VTqm8NxI9PCmUzzXuZyXpR1/TabWb+ZfWE6XVsze9DMDpnZtqx9Ba+lmX0x/Ft+2czeO03K++dmttPMtpjZo2Y2J9y/wsyGs67z30yT8hb896/k9S1Q1u9nlXOPmW0O90/NtXX3qv0hGIT3GnAeEANeBC6pdLnGlXERcFX4uBV4hWAK9D8GfrfS5StQ5j3AOeP2/Rlwb/j4XuBPK13OPH8LB4Hl0+naAtcCVwHbTnUtw7+LF4EG4Nzwb7t2GpT3RqAufPynWeVdkX3cNLq+ef/9K31985V13PNfBf5wKq9ttdcgpv204u5+wN1fCB8PADsoMLPtNDeRad4r6XrgNXc/3ZH4JeHuzwJHx+0udC3fBzzs7qPu/jqwi+BvvGzyldfdn3T3ZLj5C4K51aaFAte3kIpe32JlNTMDfhX43lS+Z7UHiAlPKz4dmNkK4ErguXDXZ8Nq+4PTockmy+lO815Jt5P7n2u6XlsofC1nwt/zp4GfZm2fa2a/NLNnzOwdlSpUHvn+/afz9X0H0OPur2btO+NrW+0BYsLTileamc0CfgR8wd37gW8A5wNXAAcIqpfTxTXufhXBioC/aWbXVrpAxYQTQt4G/DDcNZ2vbTHT+u/ZzL4EJIH14a4DQKe7Xwn8NvCQmbVVqnxZCv37T+frewe5X3Cm5NpWe4CYEdOKm1k9QXBY7+4/BnD3HndPuXsa+BZlbkooxk9/mvdKuRl4wd17YHpf21Chazlt/57N7E7gFuDjHjaSh001R8LHmwja9C+sXCkDRf79p+X1NbM64IPA96N9U3Vtqz1ATPtpxcO2xW8DO9z9L7P2L8o67AMEU6lXnJ3ZNO+VkvPta7pe2yyFruVjwO1m1mBm5wIrgecrUL4cZnYTcA9wm7sPZe2fb8H689HszyuB3ZUp5Zgi//7T8voC7wF2untXtGPKrm25MvDT9QdYR9Az6DXgS5UuT57y/QpBNXYLsDn8WQf8PbA13P8YsKjSZQ3Lex5BT48XgZeiawrMA54GXg1/t1e6rGG5moEjwOysfdPm2hIErgNAguAb7K8Xu5bAl8K/5ZeBm6dJeXcRtN1Hf79/Ex77ofBv5EXgBeDWaVLegv/+lby++coa7v9b4DPjjp2Sa6upNkREJK9qb2ISEZECFCBERCQvBQgREclLAUJERPJSgBARkbwUIESmATN7l5n9U6XLIZJNAUJERPJSgBCZBDP7hJk9H86x/00zqzWzE2b2VTN7wcyeNrP54bFXmNkvstZBmBvuv8DM/sXMXgzPOT98+Vlm9ki4dsL6cBS9SMUoQIhMkJm9CfgowWSEVwAp4ONAC8FcTlcBzwB/FJ7yd8A97r6aYGRutH898HV3vxx4O8HoWAhm6v0CwboD5wHXlPgjiRRVV+kCiMwg1wNvBjaEX+6bCCbKSzM2Udo/AD82s9nAHHd/Jtz/XeCH4TxVS9z9UQB3HwEIX+95D+fTCVcGWwH8e8k/lUgBChAiE2fAd939izk7zf7ruOOKzV9TrNloNOtxCv3/lApTE5PIxD0NfNjMFkBmbejlBP+PPhwe8zHg3929DziWtVDLJ4FnPFjLo8vM3h++RoOZNZfzQ4hMlL6hiEyQu283sz8gWC2vhmBWzd8EBoFLzWwT0EeQp4BgKu6/CQPAbuCucP8ngW+a2X8PX+MjZfwYIhOm2VxFzpCZnXD3WZUuh8hUUxOTiIjkpRqEiIjkpRqEiIjkpQAhIiJ5KUCIiEheChAiIpKXAoSIiOT1fwEnHWoaAcFZxQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pyplot as plt\n", + "steps = range(175)\n", + "plt.plot(steps, LOSS)\n", + "plt.ylabel('LOSS')\n", + "plt.xlabel('epoch')\n", + "plt.show()\n", + "plt.plot(steps, ACC)\n", + "plt.ylabel('ACCURACY')\n", + "plt.xlabel('epoch')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d7c1506", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/model.ipynb b/recognition/vision-transformer-4696689/old/model.ipynb new file mode 100644 index 000000000..3263fcf63 --- /dev/null +++ b/recognition/vision-transformer-4696689/old/model.ipynb @@ -0,0 +1,182 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 37, + "id": "fc1d26a6", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "00044d75", + "metadata": {}, + "outputs": [], + "source": [ + "class Attention(nn.Module):\n", + " def __init__(self, heads, EMBED_DIMENSION):\n", + " super().__init__()\n", + " self.heads = heads\n", + " self.attn = nn.MultiheadAttention(EMBED_DIMENSION, heads, batch_first=True)\n", + " self.Q = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " self.K = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " self.V = nn.Linear(EMBED_DIMENSION, EMBED_DIMENSION, bias=False)\n", + " \n", + " def forward(self, x):\n", + " Q = self.Q(x)\n", + " K = self.K(x)\n", + " V = self.V(x)\n", + " \n", + " attnout, attnweights = self.attn(Q, K, V)\n", + " return attnout" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "733599f9", + "metadata": {}, + "outputs": [], + "source": [ + "class TransBlock(nn.Module):\n", + " def __init__(self, heads, EMBED_DIMENSION, fflsize):\n", + " super().__init__()\n", + " self.fnorm = nn.LayerNorm(EMBED_DIMENSION)\n", + " self.snorm = nn.LayerNorm(EMBED_DIMENSION)\n", + " self.attn = Attention(heads, EMBED_DIMENSION)\n", + " self.ffl = nn.Sequential(\n", + " nn.Linear(EMBED_DIMENSION, fflsize),\n", + " nn.GELU(),\n", + " nn.Linear(fflsize, EMBED_DIMENSION)\n", + " )\n", + " \n", + " def forward(self, x):\n", + " \"\"\"\n", + " Switching to pre-MHA LayerNorm is supposed to give better performance,\n", + " this is used in other models such as LLMs like GPT. Gradients are meant\n", + " to be stabilised. This is different to the original ViT paper.\n", + " \"\"\"\n", + " x = x + self.attn(self.fnorm(x))[0]\n", + " x = x + self.ffl(self.snorm(x))\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d2a5e050", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Inception module for efficient 7x7 convolution\n", + "\"\"\"\n", + "class Inception(nn.Module):\n", + " def __init__(self, dimin, dimout):\n", + " super().__init__()\n", + " self.branch1 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[0], 1, stride=(1,1)),\n", + " nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1),\n", + " nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1)\n", + " )\n", + " self.branch2 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[1]), 1, stride=(1,1),\n", + " nn.Conv2d(dimout[1], dimout[1], 3, stride=(1,1), padding=1)\n", + " )\n", + " self.branch3 = nn.Sequential(\n", + " nn.AvgPool2d(3, stride=(1,1), padding=1),\n", + " nn.Conv2d(dimin, dimout[2], 1, stride=(1,1))\n", + " )\n", + " self.branch4 = nn.Sequential(\n", + " nn.Conv2d(dimin, dimout[3], 1, stride=(1,1))\n", + " )\n", + " def forward(self, imgs)\n", + " x1 = self.branch1(imgs)\n", + " x2 = self.branch2(imgs)\n", + " x3 = self.branch3(imgs)\n", + " x4 = self.branch4(imgs)\n", + " return torch.cat([x1, x2, x3, x4], dim=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "e6ac9e2b", + "metadata": {}, + "outputs": [], + "source": [ + "\"\"\"\n", + "Vision Transformer Class to create a vision transformer model\n", + "\"\"\"\n", + "class VisionTransformer(nn.Module):\n", + " def __init__(self, classes=2, inputsize=(1,1,1), heads=2, fflscale=2, nblocks=1):\n", + " super().__init__()\n", + " (self.N, self.Np, self.P) = inputsize\n", + " \"\"\"components\"\"\"\n", + " self.proj = nn.Linear(self.P, EMBED_DIMENSION)\n", + " self.clstoken = nn.Parameter(torch.zeros(1, 1, EMBED_DIMENSION))\n", + " self.posembed = self.embedding(self.Np+1, EMBED_DIMENSION, freq=10000) #10000 is described in ViT paper\n", + " self.posembed = self.posembed.repeat(self.N, 1, 1)\n", + " self.transformer = nn.Sequential(\n", + " *((TransBlock(heads, EMBED_DIMENSION, int(fflscale*EMBED_DIMENSION)),)*nblocks)\n", + " )\n", + " self.classifier = nn.Sequential(\n", + " nn.LayerNorm(EMBED_DIMENSION),\n", + " nn.Linear(EMBED_DIMENSION, classes)\n", + " )\n", + " \n", + " def embedding(npatches, EMBED_DIMENSION, freq):\n", + " posembed = torch.zeros(npatches, EMBED_DIMENSION)\n", + " for i in range(npatches):\n", + " for j in range(EMBED_DIMENSION):\n", + " if j % 2 == 0:\n", + " posembed[i][j] = np.sin(i/(freq**(j/EMBED_DIMENSION)))\n", + " else:\n", + " posembed[i][j] = np.cos(i/(freq**((j-1)/EMBED_DIMENSION)))\n", + " return posembed\n", + " \n", + " def forward(self, imgs): #assume size checking done by createPatches\n", + " \"\"\"Linear Projection and Positional Embedding\"\"\"\n", + " tokens = self.proj(imgs) #perform linear projection\n", + " clstoken = self.clstoken.repeat(self.N, 1, 1)\n", + " tokens = torch.cat([clstoken, tokens], dim=1) #concat the class token\n", + " x = tokens + self.posembed #add positional encoding\n", + " \"\"\"Transformer\"\"\"\n", + " x = self.transformer(x)\n", + " \"\"\"Classification\"\"\"\n", + " y = x[0]\n", + " return self.classifier(y)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/old/model.py b/recognition/vision-transformer-4696689/old/model.py new file mode 100644 index 000000000..b8fd3eeca --- /dev/null +++ b/recognition/vision-transformer-4696689/old/model.py @@ -0,0 +1,86 @@ +""" +Imports Here +""" +import numpy as np +import torch +import torch.nn as nn + +class Attention(nn.Module): + def __init__(self, heads, embed): + super().__init__() + self.heads = heads + self.attn = nn.MultiheadAttention(embed, heads, batch_first=True) + self.Q = nn.Linear(embed, embed, bias=False) + self.K = nn.Linear(embed, embed, bias=False) + self.V = nn.Linear(embed, embed, bias=False) + + def forward(self, x): + Q = self.Q(x) + K = self.K(x) + V = self.V(x) + + attnout, attnweights = self.attn(Q, K, V) + return attnout + +class TransBlock(nn.Module): + def __init__(self, heads, embed, fflsize): + super().__init__() + self.fnorm = nn.LayerNorm(embed) + self.snorm = nn.LayerNorm(embed) + self.attn = Attention(heads, embed) + self.ffl = nn.Sequential( + nn.Linear(embed, fflsize), + nn.GELU(), + nn.Linear(fflsize, embed) + ) + + def forward(self, x): + """ + Switching to pre-MHA LayerNorm is supposed to give better performance, + this is used in other models such as LLMs like GPT. Gradients are meant + to be stabilised. This is different to the original ViT paper. + """ + x = x + self.attn(self.fnorm(x)) + x = x + self.ffl(self.snorm(x)) + return x + +""" +Vision Transformer Class to create a vision transformer model +""" +class VisionTransformer(nn.Module): + def __init__(self, classes=2, inputsize=(1,1,1), heads=2, embed=64, fflscale=2, nblocks=1): + super().__init__() + (self.N, self.Np, self.P) = inputsize + """components""" + self.proj = nn.Linear(self.P, embed) + self.clstoken = nn.Parameter(torch.zeros(1, 1, embed)) + self.posembed = self.embedding(self.Np+1, embed) + self.transformer = nn.Sequential( + *((TransBlock(heads, embed, int(fflscale*embed)),)*nblocks) + ) + self.classifier = nn.Sequential( + nn.LayerNorm(embed), + nn.Linear(embed, classes) + ) + + def embedding(self, npatches, embed, freq=10000): #10000 is described in ViT paper + posembed = torch.zeros(npatches, embed) + for i in range(npatches): + for j in range(embed): + if j % 2 == 0: + posembed[i][j] = np.sin(i/(freq**(j/embed))) + else: + posembed[i][j] = np.cos(i/(freq**((j-1)/embed))) + return posembed + + def forward(self, imgs): #assume size checking done by createPatches + """Linear Projection and Positional Embedding""" + tokens = self.proj(imgs) #perform linear projection + clstoken = self.clstoken.repeat(imgs.shape[0], 1, 1) + tokens = torch.cat([clstoken, tokens], dim=1) #concat the class token + x = tokens + self.posembed.repeat(imgs.shape[0], 1, 1) #add positional encoding + """Transformer""" + x = self.transformer(x) + """Classification""" + y = x[:,0] + return self.classifier(y) \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/old/model2 output b/recognition/vision-transformer-4696689/old/model2 output new file mode 100644 index 000000000..0df3b0d5f --- /dev/null +++ b/recognition/vision-transformer-4696689/old/model2 output @@ -0,0 +1,6 @@ +cuda +training time: 27699.315416812897 +test acc: tensor(0.6800) +[0.72875, 0.70531, 0.66767, 0.61233, 0.53435, 0.49842, 0.43119, 0.45669, 0.38625, 0.35263, 0.36537, 0.32514, 0.26318, 0.2506, 0.24311, 0.18782, 0.17435, 0.13011, 0.14882, 0.17382, 0.10999, 0.13796, 0.07506, 0.06944, 0.06198, 0.03524, 0.07395, 0.09999, 0.04692, 0.03988, 0.0566, 0.02929, 0.01366, 0.01277, 0.01246, 0.01824, 0.04371, 0.0791, 0.04064, 0.04082, 0.01846, 0.00784, 0.00725, 0.00714, 0.0071, 0.00703, 0.00697, 0.00684, 0.00686, 0.00677, 0.00665, 0.00629, 0.00595, 0.01606, 0.11788, 0.21843, 0.02893, 0.01473, 0.04044, 0.02642, 0.02621, 0.00663, 0.00604, 0.00071, 0.00035, 0.00026, 0.00022, 0.0002, 0.00018, 0.00016, 0.00015, 0.00014, 0.00013, 0.00012, 0.00011, 0.0001, 0.0001, 9e-05, 8e-05, 8e-05, 7e-05, 7e-05, 7e-05, 6e-05, 6e-05, 6e-05, 5e-05, 5e-05, 5e-05, 5e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 4e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 3e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 2e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] +[50.67, 51.11, 58.67, 63.11, 57.78, 62.67, 63.56, 66.22, 66.22, 67.11, 66.67, 65.78, 67.56, 65.33, 68.0, 68.44, 67.11, 64.89, 64.89, 67.56, 68.0, 69.33, 67.11, 67.56, 68.0, 67.56, 66.22, 71.11, 69.33, 67.11, 66.67, 69.78, 69.33, 69.78, 69.78, 68.0, 66.67, 68.89, 69.78, 69.78, 68.44, 67.56, 67.11, 67.56, 67.56, 67.56, 68.0, 68.0, 68.0, 68.0, 68.0, 67.56, 67.56, 68.0, 66.22, 70.67, 67.56, 66.67, 68.89, 65.33, 66.67, 70.22, 68.0, 69.78, 68.89, 68.0, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44, 68.44] +[147.563, 146.343, 144.501, 147.546, 144.388, 143.652, 146.672, 144.336, 145.402, 146.032, 144.47, 144.527, 145.94, 145.326, 144.034, 145.458, 146.047, 143.858, 146.212, 144.663, 144.781, 146.169, 143.851, 146.982, 143.694, 145.329, 145.16, 146.066, 144.08, 145.364, 145.876, 143.906, 145.965, 144.99, 144.381, 147.893, 146.199, 144.357, 145.847, 144.55, 144.047, 145.702, 144.852, 143.926, 145.867, 144.55, 144.213, 146.131, 144.313, 144.568, 145.913, 144.292, 147.893, 147.291, 148.067, 148.66, 149.459, 148.164, 148.963, 149.543, 144.27, 145.208, 145.364, 143.899, 146.17, 143.49, 146.005, 144.319, 144.524, 145.954, 143.908, 145.923, 149.609, 148.143, 149.126, 147.25, 143.868, 145.934, 144.889, 144.385, 146.232, 144.071, 145.286, 145.871, 143.787, 145.719, 148.777, 147.816, 149.28, 148.8, 148.009, 149.313, 149.438, 147.923, 148.943, 149.355, 148.399, 148.242, 149.209, 149.388, 148.377, 148.594, 149.603, 148.353, 148.588, 149.617, 148.425, 148.436, 149.528, 148.536, 148.31, 149.578, 148.509, 148.387, 149.569, 148.542, 148.188, 149.53, 148.641, 148.101, 149.468, 148.894, 148.149, 148.935, 149.422, 148.588, 148.187, 149.229, 149.147, 149.19, 148.44, 148.16, 149.419, 148.88, 148.568, 148.514, 148.583, 148.594, 148.789, 148.996, 149.07, 149.142, 148.768, 148.309, 148.454, 148.685, 149.076, 149.272, 148.759, 148.253, 148.44, 149.121, 149.245, 148.525, 148.261, 148.695, 149.247, 149.253, 148.579, 148.307, 149.357, 147.468, 148.775, 147.945, 149.511, 148.644, 148.232, 149.552, 148.53, 148.147, 149.467, 148.824, 148.064, 149.387, 149.3] \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/old/model3d.py b/recognition/vision-transformer-4696689/old/model3d.py new file mode 100644 index 000000000..d842d2e7d --- /dev/null +++ b/recognition/vision-transformer-4696689/old/model3d.py @@ -0,0 +1,114 @@ +""" +Imports Here +""" +import numpy as np +import torch +import torch.nn as nn + +class Attention(nn.Module): + def __init__(self, heads, embed): + super().__init__() + self.heads = heads + self.attn = nn.MultiheadAttention(embed, heads, batch_first=True) + self.Q = nn.Linear(embed, embed, bias=False) + self.K = nn.Linear(embed, embed, bias=False) + self.V = nn.Linear(embed, embed, bias=False) + + def forward(self, x): + Q = self.Q(x) + K = self.K(x) + V = self.V(x) + + attnout, attnweights = self.attn(Q, K, V) + return attnout + +class TransBlock(nn.Module): + def __init__(self, heads, embed, fflsize): + super().__init__() + self.fnorm = nn.LayerNorm(embed) + self.snorm = nn.LayerNorm(embed) + self.attn = Attention(heads, embed) + self.ffl = nn.Sequential( + nn.Linear(embed, fflsize), + nn.GELU(), + nn.Linear(fflsize, embed) + ) + + def forward(self, x): + """ + Switching to pre-MHA LayerNorm is supposed to give better performance, + this is used in other models such as LLMs like GPT. Gradients are meant + to be stabilised. This is different to the original ViT paper. + """ + x = x + self.attn(self.fnorm(x)) + x = x + self.ffl(self.snorm(x)) + return x +""" +Inception module for efficient 7x7 convolution +""" +class Inception(nn.Module): + def __init__(self, dimin, dimout): + super().__init__() + self.branch1 = nn.Sequential( + nn.Conv2d(dimin, dimout[0], 1, stride=(1,1)), + nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1), + nn.Conv2d(dimout[0], dimout[0], 3, stride=(1,1), padding=1) + ) + self.branch2 = nn.Sequential( + nn.Conv2d(dimin, dimout[1], 1, stride=(1,1)), + nn.Conv2d(dimout[1], dimout[1], 3, stride=(1,1), padding=1) + ) + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=(1,1), padding=1), + nn.Conv2d(dimin, dimout[2], 1, stride=(1,1)) + ) + self.branch4 = nn.Sequential( + nn.Conv2d(dimin, dimout[3], 1, stride=(1,1)) + ) + self.maxpool = nn.MaxPool2d(2, 2) + def forward(self, imgs): + x1 = self.branch1(imgs) + x2 = self.branch2(imgs) + x3 = self.branch3(imgs) + x4 = self.branch4(imgs) + return self.maxpool(torch.cat([x1, x2, x3, x4], dim=1)) +""" +Vision Transformer Class to create a vision transformer model +""" +class VisionTransformer(nn.Module): + def __init__(self, classes=2, inputsize=(1,1,1), heads=2, embed=64, fflscale=2, nblocks=1): + super().__init__() + (self.N, self.Np, self.P) = inputsize + """components""" + self.proj = nn.Linear(self.P, embed) + self.clstoken = nn.Parameter(torch.zeros(1, 1, embed)) + self.posembed = self.embedding(self.Np+1, embed) + self.transformer = nn.Sequential( + *((TransBlock(heads, embed, int(fflscale*embed)),)*nblocks) + ) + self.classifier = nn.Sequential( + nn.LayerNorm(embed), + nn.Linear(embed, classes) + ) + + def embedding(self, npatches, embed, freq=10000): #10000 is described in ViT paper + posembed = torch.zeros(npatches, embed) + for i in range(npatches): + for j in range(embed): + if j % 2 == 0: + posembed[i][j] = np.sin(i/(freq**(j/embed))) + else: + posembed[i][j] = np.cos(i/(freq**((j-1)/embed))) + return posembed + + def forward(self, imgs): #assume size checking done by createPatches + """Linear Projection and Positional Embedding""" + tokens = self.proj(imgs) #perform linear projection + clstoken = self.clstoken.repeat(imgs.shape[0], 1, 1) + tokens = torch.cat([clstoken, tokens], dim=1) #concat the class token + x = tokens + self.posembed.repeat(imgs.shape[0], 1, 1) #add positional encoding + """Transformer""" + x = self.transformer(x) + """Classification""" + y = x[:,0] + return self.classifier(y) \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/old/output 2 epochs, lr=1e-4 b/recognition/vision-transformer-4696689/old/output 2 epochs, lr=1e-4 new file mode 100644 index 000000000..e69de29bb diff --git a/recognition/vision-transformer-4696689/old/train.ipynb b/recognition/vision-transformer-4696689/old/train.ipynb new file mode 100644 index 000000000..a6473b90c --- /dev/null +++ b/recognition/vision-transformer-4696689/old/train.ipynb @@ -0,0 +1,315 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 52, + "id": "73ebb771", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: 'dlopen(/Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so, 6): Library not loaded: @rpath/libpng16.16.dylib\n", + " Referenced from: /Users/oliver/opt/anaconda3/lib/python3.9/site-packages/torchvision/image.so\n", + " Reason: Incompatible library version: image.so requires version 56.0.0 or later, but libpng16.16.dylib provides version 54.0.0'If you don't plan on using image functionality from `torchvision.io`, you can ignore this warning. Otherwise, there might be something wrong with your environment. Did you have `libjpeg` or `libpng` installed before building `torchvision` from source?\n", + " warn(\n" + ] + } + ], + "source": [ + "\"\"\"\n", + "Imports Here\n", + "\"\"\"\n", + "from dataset import trainloader\n", + "from dataset import testloader\n", + "from dataset import trainaccloader\n", + "from dataset import trainshape\n", + "from dataset import testshape" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "id": "df0ea69a", + "metadata": {}, + "outputs": [], + "source": [ + "from model import VisionTransformer\n", + "from model import Attention\n", + "from model import TransBlock\n", + "from model3d import Inception" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "id": "ae8aebe7", + "metadata": {}, + "outputs": [], + "source": [ + "TRAIN_LOSS = []\n", + "TRAIN_ACC = []\n", + "\n", + "def train(model, dataloader, accloader, lossfunc, optimiser, lr=0.1, momentum=0.9, batchsize=16, nepochs=10):\n", + " device = next(model.parameters()).device # check what device the net parameters are on\n", + " \n", + " \"\"\"training\"\"\"\n", + " for i in range(nepochs): # for each epoch\n", + " epoch_loss = 0\n", + " model.train()\n", + " n_batches = 0\n", + " time1 = time.time()\n", + " for (x, y) in dataloader: # for each mini-batch\n", + " optimiser.zero_grad(set_to_none=True)\n", + " loss = lossfunc(model.forward(x), y)\n", + " loss.backward()\n", + " optimiser.step()\n", + " epoch_loss += loss.detach().item()\n", + " n_batches += 1\n", + " time2 = time.time()\n", + " print(\"Done an epoch\", time2-time1)\n", + " epoch_loss /= n_batches\n", + " \n", + " \"\"\"evaluating\"\"\"\n", + " model.eval()\n", + " accuracy = test(model, accloader).detach().item()\n", + "\n", + " \"\"\"get performance\"\"\"\n", + " TRAIN_LOSS.append(epoch_loss)\n", + " TRAIN_ACC.append(accuracy)\n", + "\n", + "def test(model, dataloader):\n", + " with torch.no_grad(): # disable automatic gradient computation for efficiency\n", + " device = next(model.parameters()).device\n", + " \n", + " \"\"\"make predictions\"\"\"\n", + " pcls = []\n", + " items = 0\n", + " time1=time.time()\n", + " for x, y in dataloader:\n", + " x = x.to(device)\n", + " pcls.append(abs(y.cpu()-torch.max(model(x), 1)[1].cpu()))\n", + " items += 1\n", + " time2 = time.time()\n", + " print(\"found accuracy in:\", time2-time1)\n", + "\n", + " \"\"\"get accuracy\"\"\"\n", + " pcls = torch.cat(pcls) # concat predictions on the mini-batches\n", + " accuracy = 1 - (pcls.sum().float() / items)\n", + " print(\"accuracy:\", accuracy)\n", + " return accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "id": "26cde279", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([16, 1, 20, 240, 256])\n" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "rand = torch.rand((16, 1, 20, 240, 256))\n", + "print(rand.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "id": "f6d1dc89", + "metadata": {}, + "outputs": [], + "source": [ + "class ConvLayer(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv11 = nn.Conv3d(1, 64, kernel_size=(3,11,11), stride=(1,4,4), padding=(1,0,0))\n", + " self.firstpool = nn.MaxPool3d(kernel_size=3, stride=2)\n", + " self.conv5 = nn.Conv3d(64, 256, kernel_size=(3,5,5), stride=(1,2,2), padding=(1,0,0))\n", + " self.secondpool = nn.MaxPool3d(kernel_size=3, stride=2)\n", + " \n", + " def forward(self, imgs):\n", + " x = self.conv11(imgs)\n", + " x = self.firstpool(x)\n", + " x = self.conv5(x)\n", + " x = self.secondpool(x)\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "id": "e1bd3d40", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "time: 0.8001\n", + "torch.Size([16, 256, 4, 5, 6])\n" + ] + } + ], + "source": [ + "import time\n", + "conv = ConvLayer()\n", + "start = time.time()\n", + "out = conv(rand)\n", + "end = time.time()\n", + "print(\"time:\", round(end-start,4))\n", + "print(out.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75a45973", + "metadata": {}, + "outputs": [], + "source": [ + "batchsize=16\n", + "N, Np, P = trainshape()\n", + "model = VisionTransformer(inputsize=(batchsize, Np, P), embed=128, fflscale=2, nblocks=4)" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "7b54a6f0", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimiser = optim.AdamW(model.parameters(), lr=1e-4)" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "18488555", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done an epoch 346.20038080215454\n", + "found accuracy in: 135.9069368839264\n", + "accuracy: tensor(0.5288)\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Input \u001b[0;32mIn [43]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m start \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[0;32m----> 2\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrainloader\u001b[49m\u001b[43m(\u001b[49m\u001b[43mbatchsize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbatchsize\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtrainaccloader\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcriterion\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimiser\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3\u001b[0m end \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime()\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtraining time: \u001b[39m\u001b[38;5;124m\"\u001b[39m, end\u001b[38;5;241m-\u001b[39mstart)\n", + "Input \u001b[0;32mIn [40]\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m(model, dataloader, accloader, lossfunc, optimiser, lr, momentum, batchsize, nepochs)\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m (x, y) \u001b[38;5;129;01min\u001b[39;00m dataloader: \u001b[38;5;66;03m# for each mini-batch\u001b[39;00m\n\u001b[1;32m 14\u001b[0m optimiser\u001b[38;5;241m.\u001b[39mzero_grad(set_to_none\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m---> 15\u001b[0m loss \u001b[38;5;241m=\u001b[39m lossfunc(\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m, y)\n\u001b[1;32m 16\u001b[0m loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m 17\u001b[0m optimiser\u001b[38;5;241m.\u001b[39mstep()\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:84\u001b[0m, in \u001b[0;36mVisionTransformer.forward\u001b[0;34m(self, imgs)\u001b[0m\n\u001b[1;32m 82\u001b[0m x \u001b[38;5;241m=\u001b[39m tokens \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mposembed\u001b[38;5;241m.\u001b[39mrepeat(imgs\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m], \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m1\u001b[39m) \u001b[38;5;66;03m#add positional encoding\u001b[39;00m\n\u001b[1;32m 83\u001b[0m \u001b[38;5;124;03m\"\"\"Transformer\"\"\"\u001b[39;00m\n\u001b[0;32m---> 84\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtransformer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124;03m\"\"\"Classification\"\"\"\u001b[39;00m\n\u001b[1;32m 86\u001b[0m y \u001b[38;5;241m=\u001b[39m x[:,\u001b[38;5;241m0\u001b[39m]\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/container.py:217\u001b[0m, in \u001b[0;36mSequential.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 215\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[1;32m 216\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m module \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m:\n\u001b[0;32m--> 217\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mmodule\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 218\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:43\u001b[0m, in \u001b[0;36mTransBlock.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[1;32m 38\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;124;03m Switching to pre-MHA LayerNorm is supposed to give better performance,\u001b[39;00m\n\u001b[1;32m 40\u001b[0m \u001b[38;5;124;03m this is used in other models such as LLMs like GPT. Gradients are meant\u001b[39;00m\n\u001b[1;32m 41\u001b[0m \u001b[38;5;124;03m to be stabilised. This is different to the original ViT paper.\u001b[39;00m\n\u001b[1;32m 42\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 43\u001b[0m x \u001b[38;5;241m=\u001b[39m x \u001b[38;5;241m+\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfnorm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 44\u001b[0m x \u001b[38;5;241m=\u001b[39m x \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mffl(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msnorm(x))\n\u001b[1;32m 45\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m x\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/Desktop/COMP3710 Project/PatternAnalysis-2023/recognition/vision-transformer-4696689/model.py:22\u001b[0m, in \u001b[0;36mAttention.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 19\u001b[0m K \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mK(x)\n\u001b[1;32m 20\u001b[0m V \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mV(x)\n\u001b[0;32m---> 22\u001b[0m attnout, attnweights \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mattn\u001b[49m\u001b[43m(\u001b[49m\u001b[43mQ\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mK\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mV\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m attnout\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/module.py:1501\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1496\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1497\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1498\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1499\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1500\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1501\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1502\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1503\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/modules/activation.py:1189\u001b[0m, in \u001b[0;36mMultiheadAttention.forward\u001b[0;34m(self, query, key, value, key_padding_mask, need_weights, attn_mask, average_attn_weights, is_causal)\u001b[0m\n\u001b[1;32m 1175\u001b[0m attn_output, attn_output_weights \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mmulti_head_attention_forward(\n\u001b[1;32m 1176\u001b[0m query, key, value, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membed_dim, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_heads,\n\u001b[1;32m 1177\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39min_proj_weight, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39min_proj_bias,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1186\u001b[0m average_attn_weights\u001b[38;5;241m=\u001b[39maverage_attn_weights,\n\u001b[1;32m 1187\u001b[0m is_causal\u001b[38;5;241m=\u001b[39mis_causal)\n\u001b[1;32m 1188\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1189\u001b[0m attn_output, attn_output_weights \u001b[38;5;241m=\u001b[39m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmulti_head_attention_forward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1190\u001b[0m \u001b[43m \u001b[49m\u001b[43mquery\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43membed_dim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnum_heads\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1191\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43min_proj_weight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43min_proj_bias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1192\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias_k\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias_v\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43madd_zero_attn\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1193\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdropout\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mout_proj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mout_proj\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1194\u001b[0m \u001b[43m \u001b[49m\u001b[43mtraining\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtraining\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1195\u001b[0m \u001b[43m \u001b[49m\u001b[43mkey_padding_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkey_padding_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1196\u001b[0m \u001b[43m \u001b[49m\u001b[43mneed_weights\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mneed_weights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1197\u001b[0m \u001b[43m \u001b[49m\u001b[43mattn_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mattn_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1198\u001b[0m \u001b[43m \u001b[49m\u001b[43maverage_attn_weights\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43maverage_attn_weights\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1199\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_causal\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_causal\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1200\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbatch_first \u001b[38;5;129;01mand\u001b[39;00m is_batched:\n\u001b[1;32m 1201\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m attn_output\u001b[38;5;241m.\u001b[39mtranspose(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m0\u001b[39m), attn_output_weights\n", + "File \u001b[0;32m~/opt/anaconda3/lib/python3.9/site-packages/torch/nn/functional.py:5313\u001b[0m, in \u001b[0;36mmulti_head_attention_forward\u001b[0;34m(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight, q_proj_weight, k_proj_weight, v_proj_weight, static_k, static_v, average_attn_weights, is_causal)\u001b[0m\n\u001b[1;32m 5311\u001b[0m attn_output_weights \u001b[38;5;241m=\u001b[39m attn_output_weights\u001b[38;5;241m.\u001b[39mview(bsz, num_heads, tgt_len, src_len)\n\u001b[1;32m 5312\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m average_attn_weights:\n\u001b[0;32m-> 5313\u001b[0m attn_output_weights \u001b[38;5;241m=\u001b[39m \u001b[43mattn_output_weights\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmean\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdim\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5315\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_batched:\n\u001b[1;32m 5316\u001b[0m \u001b[38;5;66;03m# squeeze the output if input was unbatched\u001b[39;00m\n\u001b[1;32m 5317\u001b[0m attn_output \u001b[38;5;241m=\u001b[39m attn_output\u001b[38;5;241m.\u001b[39msqueeze(\u001b[38;5;241m1\u001b[39m)\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "start = time.time()\n", + "train(model, trainloader(batchsize=batchsize), trainaccloader(), criterion, optimiser, nepochs=1)\n", + "end = time.time()\n", + "print(\"training time: \", end-start)\n", + "test(model, testloader())" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "bbaac2fc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[]\n", + "[]\n" + ] + } + ], + "source": [ + "print(TRAIN_LOSS)\n", + "print(TRAIN_ACC)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "94178617", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "280706\n" + ] + } + ], + "source": [ + "print(sum(p.numel() for p in model.parameters()))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ccfcbae", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/recognition/vision-transformer-4696689/predict.py b/recognition/vision-transformer-4696689/predict.py new file mode 100644 index 000000000..895f9d74e --- /dev/null +++ b/recognition/vision-transformer-4696689/predict.py @@ -0,0 +1,56 @@ +""" +Imports +""" +import torch +from train import test +from dataset import trainloader, valloader, testloader +from numpy import loadtxt +import matplotlib.pyplot as plt + +model = torch.jit.load('model_trained.pt') +model.eval() + +#try load and plot loss curve +try: + loss = loadtxt('loss.txt') + steps = len(loss) + plt.plot(steps, LOSS) + plt.ylabel('LOSS') + plt.xlabel('epoch') + plt.title('Training Loss') + plt.show() +except: + print("No training loss!") + +#try load and plot accuracy curve +try: + loss = loadtxt('acc.txt') + steps = len(loss) + plt.plot(steps, LOSS) + plt.ylabel('ACCURACY') + plt.xlabel('epoch') + plt.title('Validation Accuracy') + plt.show() +except: + print("No accuracy!") + +#try load and plot train accuracy curve +try: + loss = loadtxt('train.txt') + steps = len(loss) + plt.plot(steps, LOSS) + plt.ylabel('ACCURACY') + plt.xlabel('epoch') + plt.title('Training Accuracy') + plt.show() +except: + print("No training accuracy") + +"""train models on datasets""" +# train_acc = test(model, trainloader) #test on train set +# val_acc = test(model, valloader) #test on validation set +# test_acc = test(model, testloader) #test on test set + +# print("accuracy on training set:", train_acc) +# print("accuracy on validation set:", val_acc) +# print("accuracy on test set:", test_acc) \ No newline at end of file diff --git a/recognition/vision-transformer-4696689/train.py b/recognition/vision-transformer-4696689/train.py new file mode 100644 index 000000000..ee98fdd6c --- /dev/null +++ b/recognition/vision-transformer-4696689/train.py @@ -0,0 +1,96 @@ +""" +Imports Here +""" +from dataset import trainloader +from dataset import testloader +from dataset import valloader +from dataset import trainshape +from dataset import testshape + +from modules import VisionTransformer +from modules import Attention +from modules import TransBlock +from modules import ConvLayer + +import time +import torch +import torch.nn as nn +import torch.optim as optim + +from numpy import savetxt + +"""for results""" +TRAIN_LOSS = [] +TRAIN_ACC = [] +TRAIN_TIMES = [] + +""" +function to train the model +""" +def train(model, dataloader, accloader, lossfunc, optimiser, nepochs=10): + """training""" + for i in range(nepochs): # for each epoch + epoch_loss = 0 + model.train() + n_batches = 0 + time1 = time.time() + for (x, y) in dataloader: # for each mini-batch + optimiser.zero_grad(set_to_none=True) + loss = lossfunc(model.forward(x), y) + loss.backward() + optimiser.step() + epoch_loss += loss.detach().item() + n_batches += 1 + time2 = time.time() + TRAIN_TIMES.append(round(time2-time1,3)) + epoch_loss /= n_batches + + """evaluating""" + model.eval() + accuracy = test(model, accloader).detach().item() + + """get performance""" + TRAIN_LOSS.append(round(epoch_loss,5)) + TRAIN_ACC.append(round(accuracy*100,2)) + + +""" +function to test the model +""" +def test(model, dataloader): + with torch.no_grad(): # disable automatic gradient computation for efficiency + """make predictions""" + pcls = [] + items = 0 + for x, y in dataloader: + pcls.append(abs(y.cpu()-torch.max(model(x), 1)[1].cpu())) + items += 1 + + """get accuracy""" + pcls = torch.cat(pcls) # concat predictions on the mini-batches + accuracy = 1 - (pcls.sum().float() / items) + return accuracy + +"""model training""" +batchsize=16 +N, Np, L, W, H = trainshape() +model = VisionTransformer(inputsize=(batchsize, 192, 120), heads=4, embed=360, fflscale=2, nblocks=4) +criterion = nn.CrossEntropyLoss() +optimiser = optim.AdamW(model.parameters(), lr=3e-4) +start = time.time() +train(model, trainloader(batchsize=batchsize), valloader(), criterion, optimiser, nepochs=100) +end = time.time() +print("training time: ", end-start) +print("test acc: ", test(model, testloader())) +print(TRAIN_LOSS) +print(TRAIN_ACC) +print(TRAIN_TIMES) +test(model, testloader()) +print(TRAIN_LOSS) +print(TRAIN_ACC) + +"""saving model""" +# model_trained = torch.jit.script(model) +# model_trained.save('model_trained.pt') +savetxt('loss.txt', TRAIN_LOSS) +savetxt('acc.txt', TRAIN_ACC) \ No newline at end of file