Код: Выделить всё
class GACCR(nn.Module):
def __init__(self,beta,X):
'''
:param beta: the coefficient vector of the splines and the bias (beta_0) is at the last element
:param X: design matrix with basic function (n_dim,n_f+1) n_f is the number of basic function, the last column is all ones for bias
'''
super(GACCR,self).__init__()
assert(beta.shape[0]==X.shape[1]) # the coefficient number match the spline number
self.beta = torch.nn.parameter.Parameter(beta.clone().detach(),requires_grad=True)
self.X = X
self.n_dim = X.shape[0]
self.n_spline = X.shape[1]-1
self.cov_pre = torch.zeros((self.n_dim,self.n_dim))
def forward(self, inputs):
'''
:param inputs: [x_t,r] (n_dim,2)
:return:
'''
theta = (self.X @ self.beta) * inputs[:,0].reshape(self.n_dim,1)
output= torch.exp(-theta * inputs[:,1].reshape(self.n_dim,1))
# print('shape of theta:',theta.shape)
# print(output.shape)
for i in range(self.n_dim):
for j in range(i+1):
self.cov_pre[i][j]= output[abs(i-j)]
output=self.cov_pre
return output
Код: Выделить всё
num_epoches=10
lr=0.01
optimizer=torch.optim.Adam(GAC_model_cubic.parameters(),lr=lr)
criterion=torch.nn.MSELoss()
training_losses=[0 for _ in range(num_epoches)]
GAC_model_cubic.train()
for epoch in range(num_epoches):
cov_pred=GAC_model_cubic(input.T)
loss= criterion(cov_pred,cov_sample)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"loss for epoch {epoch+1}:", loss.item())
training_losses[epoch] = loss.item()
Я хотел бы выполнить процедуру оптимизации без установки «retain_graph=True», чтобы ускорить обучение . Любые решения будут оценены по достоинству.
Подробнее здесь: https://stackoverflow.com/questions/791 ... e-or-direc