Код: Выделить всё
for iter in range(max_iters):
start_ix = 0
loss = None
while start_ix < len(X_train_tensor):
loss = None
end_ix = min(start_ix + batch_size, len(X_train_tensor))
out, loss, accuracy = model(X_train_tensor[start_ix:end_ix], y_train_tensor[start_ix:end_ix])
# every once in a while evaluate the loss on train and val sets
if (start_ix==0) and (iter % 10 == 0 or iter == max_iters - 1):
out_val, loss_val, accuracy_val = model(X_val_tensor, y_val_tensor)
print(f"step {iter}: train loss={loss:.2f} train_acc={accuracy:.3f} | val loss={loss_val:.2f} val_acc={accuracy_val:.3f} {datetime.datetime.now()}")
optimizer.zero_grad(set_to_none=True)
print (iter, start_ix, X_train_tensor.requires_grad, y_train_tensor.requires_grad, loss.requires_grad)
loss.backward()
optimizer.step()
start_ix = end_ix + 1
Код: Выделить всё
RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.
Код: Выделить всё
autoencoder.eval()
with torch.no_grad(): # it seems like adding this line solves the problem?
X_train_encoded, loss = autoencoder(X_train_tensor)
X_val_encoded, loss = autoencoder(X_val_tensor)
X_test_encoded, loss = autoencoder(X_test_tensor)
Подробнее здесь: https://stackoverflow.com/questions/792 ... oss-tensor