Модель преобразователя зрения не тренируется должным образомPython

Программы на Python
Ответить Пред. темаСлед. тема
Anonymous
 Модель преобразователя зрения не тренируется должным образом

Сообщение Anonymous »

Вот код преобразователя зрения, который я создал с помощью Pytorch. Модель показывает перекрестную энтропию 2,31 и точность около 10%. Это остается неизменным во все эпохи. Следовательно, модель не может обучаться. Пожалуйста, дайте мне знать, что я делаю неправильно, и, если возможно, пришлите исправленный код. Заранее спасибо!
PS: Модель обучена на MNIST

Код: Выделить всё

# -*- coding: utf-8 -*-
"""
Created on Sun Jul  2 14:04:19 2023

@author: Paras
"""

import torch
from torch import nn
from torchvision import transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import math

class Head(nn.Module):
def __init__(self,num_heads,weight_dimension):
super(Head, self).__init__()
self.w1 = nn.Parameter(torch.randn((weight_dimension*num_heads,weight_dimension))).to(device)
self.w2 = nn.Parameter(torch.randn((weight_dimension*num_heads,weight_dimension))).to(device)
self.w3 = nn.Parameter(torch.randn((weight_dimension*num_heads,weight_dimension))).to(device)

def forward(self,x):

x = x.to(device)
self.Q = torch.matmul(x,self.w1).to(device)
self.K = torch.matmul(x,self.w2).to(device)
self.V = torch.matmul(x,self.w3).to(device)

lnq = nn.LayerNorm(self.Q.size()[1:]).to(device)
lnk = nn.LayerNorm(self.K.size()[1:]).to(device)
lnv = nn.LayerNorm(self.V.size()[1:]).to(device)

self.Q = lnq(self.Q)
self.K = lnk(self.K)
self.V = lnv(self.V)
self.K = torch.transpose(self.K, -2, -1)

out = torch.matmul(self.Q,self.K)
out = out/np.sqrt(self.Q.shape[1])
out = F.softmax(out,dim=-1)
out = torch.matmul(out,self.V)
return out

class MHA(nn.Module):
def __init__(self,num_heads,weight_dimension):
super(MHA, self).__init__()
self.num_heads = num_heads
self.weight_dimension = weight_dimension
heads = []
for i in range(self.num_heads):
head = Head(self.num_heads,self.weight_dimension)
heads.append(head)

self.heads = heads

def forward(self,x):

flag=True
for i in range(self.num_heads):
if flag:
out_multihead = self.heads[i](x)
flag=False
else:
out_multihead = torch.cat((out_multihead,self.heads[i](x)),axis=2)

return out_multihead

class vit_model(nn.Module):

def __init__(self,img_size,patch_size,embedding_dim,n_heads,hidden_dims_mlp,n_classes,batch_size):

super().__init__()
self.patch_size = patch_size
self.n_heads = n_heads
self.hidden_dims_mlp = hidden_dims_mlp
self.img_size = img_size
self.n_classes = n_classes
self.device = torch.device("cuda"  if torch.cuda.is_available() else "cpu")
self.embedding_dim = embedding_dim
self.batch_size = batch_size

embedding_rows = self.patch_size*self.patch_size
embedding_cols = self.embedding_dim
embedding_cols = int(embedding_cols)

self.embedding_matrix = nn.Parameter(torch.randn((embedding_rows,embedding_cols)))
self.embedding_matrix.requires_grad_(True)

self.added_class_head = nn.Parameter(torch.randn((1,embedding_cols))) #Normally distributed like nn.init.normal_ (std 10-6)
self.added_class_head.requires_grad_(True)

self.positional_embeddings = nn.Parameter(self.get_positional_encodings((img_size//patch_size)**2,embedding_cols)) #Trunc Normal distribution
self.positional_embeddings.requires_grad_(True)

self.weight_dimension = embedding_cols//self.n_heads

self.mha = MHA(self.n_heads,self.weight_dimension)

self.mlp_inside_encoder = nn.Sequential(
nn.Linear(self.embedding_dim*(self.positional_embeddings.shape[0]+1), self.hidden_dims_mlp),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(self.hidden_dims_mlp, self.embedding_dim*(self.positional_embeddings.shape[0]+1)),
nn.GELU(),
nn.Dropout(0.5)
)

self.mlp_classification = nn.Sequential(
nn.Linear(self.embedding_dim, self.n_classes),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(self.n_classes, self.n_classes),
nn.GELU(),
nn.Dropout(0.5)
)

def divide_image_into_patches(self,imgs,patch_size):

imgs = imgs/255

startx, starty = 0,0
batch_size, channels, height, width = imgs.shape

flag = True
for startx in range(0,height,patch_size):
for starty in range(0,width,patch_size):
tmat = imgs[:,:,startx:startx+patch_size,starty:starty+patch_size]

tmat = tmat.reshape((batch_size,1,tmat.shape[1]*tmat.shape[2]*tmat.shape[3]))
if flag:
patches_list = tmat
flag=False
else:
patches_list = torch.cat((patches_list,tmat),1)

return patches_list

def get_positional_encodings(self,seq_length, hidden_size):
position = torch.arange(seq_length).unsqueeze(1)
div_term = torch.exp(torch.arange(0, hidden_size, 2) * (-math.log(10000.0) / hidden_size))
encodings = torch.zeros(seq_length, hidden_size)
encodings[:, 0::2] = torch.sin(position * div_term)
encodings[:, 1::2] = torch.cos(position * div_term)
return encodings

def forward(self,images):

out = self.divide_image_into_patches(images,self.patch_size)
out = torch.matmul(out,self.embedding_matrix)
out = out + self.positional_embeddings.unsqueeze(0).expand(self.batch_size, -1, -1)
out = torch.cat((out,self.added_class_head.expand(self.batch_size, 1, -1)),1)
out = out.to(self.device)
ln = nn.LayerNorm(out.size()[1:]).to(self.device)
out = ln(out)
layer_norm1 = out.clone()
out = self.mha(out)
out = out + layer_norm1
skip = out.clone()
out = out.to(self.device)
ln = nn.LayerNorm(out.size()[1:]).to(self.device)
out = ln(out)
out = self.mlp_inside_encoder(out.reshape(out.shape[0],out.shape[1]*out.shape[2]))
out = skip + out.reshape(self.batch_size,layer_norm1.shape[1],self.embedding_dim)
out = out[:,-1,:]
out = self.mlp_classification(out)
return out

# Define the transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

# Load the training and test datasets
train_dataset = datasets.MNIST(root='./data', train=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transform)

# Create data loaders
train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=True, batch_size=1000)
test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=False, batch_size=1000)

device = torch.device("cuda"  if torch.cuda.is_available() else "cpu")

torch.autograd.set_detect_anomaly(True)
model = vit_model(28, 4, 512, 8, 2048, 10, 1000)
model = model.to(device)
print(model)
#num_params = sum(p.numel() for p in model.named_parameters())
for p in model.named_parameters():
print(p)
#print('Number of parameters:',num_params)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)

epoch_losses = []
epoch_accuracies = []
for epoch in range(100):  # Number of training epochs

epoch_loss = []
epoch_acc = []
model.train()
for i, (images,labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
c = model(images)
loss = criterion(c,labels)

with torch.no_grad():
predictions = torch.argmax(c, dim=-1)

acc = torch.sum(predictions == labels)/1000
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss.append(loss.item())
epoch_acc.append(acc.cpu().numpy())

model.eval()
epoch_losses.append(np.average(epoch_loss))
epoch_accuracies.append(np.average(epoch_acc))
print('Epoch loss:',epoch_losses[-1])
print('Epoch accuracy:',epoch_accuracies[-1])

Я пробовал варьировать скорость обучения, размеры патчей и т. д. гиперпараметры, но это не сработало.

Подробнее здесь: https://stackoverflow.com/questions/766 ... -it-should
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Python»