RuntimeError: элемент 0 тензоров не требует grad и не имеет grad_fn при использовании атаки дуракаPython

Программы на Python
Ответить Пред. темаСлед. тема
Anonymous
 RuntimeError: элемент 0 тензоров не требует grad и не имеет grad_fn при использовании атаки дурака

Сообщение Anonymous »

Я пытаюсь реализовать модель классификатора PuVAE в соответствии с шаблоном Lightning Hydra. После обучения я хочу использовать атаку дурака (FGSM и PGD) на этапе тестирования моей модели, но получил эту ошибку. Я знаю, что FGSM и PGD требуют градиента для атаки, и я уже установил свой классификатор в режим обучения на этапе тестирования, x.requires_grad = True

Код: Выделить всё

from typing import Any, Dict, Tuple

import torch
import torch.nn.functional as F
from lightning import LightningModule
from torchmetrics import MeanMetric
from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure
from torchmetrics.classification.accuracy import Accuracy
from torchvision.utils import make_grid
from src.models.components.classifier import Classifier
from src.models.components.puvae_model import PuVAE
import foolbox as fb

class PuVAEClassifierModule(LightningModule):

def __init__(
self,
net: PuVAE,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler,
compile: bool,
classifier: Classifier,
pretrained_classifier_path: str = "ckpt.pt",  # path to your pre-trained classifier
threshold:float=0.12,
ce_coeff:float=10,
rc_coeff:float=0.01,
kl_coeff:float=0.1
) -> None:
"""Initialize a `MNISTLitModule`.

:param net: The model to train.
:param optimizer: The optimizer to use for training.
:param scheduler: The learning rate scheduler to use for training.
"""
super().__init__()

# this line allows to access init params with 'self.hparams' attribute
# also ensures init params will be stored in ckpt
self.save_hyperparameters(logger=False)

self.net = net
self.threshold = threshold
self.classifier = classifier # Load pre-trained classifier weights

classifier_ckpt = torch.load(pretrained_classifier_path, map_location=self.device)
state_dict = classifier_ckpt['state_dict']
new_state_dict = {k.replace('net.', ''): v for k, v in state_dict.items()}
self.classifier.load_state_dict(new_state_dict)
self.classifier.eval()
for param in self.classifier.parameters():
param.requires_grad = False

self.criterion = torch.nn.CrossEntropyLoss() # for classifier
self.bce = torch.nn.BCELoss() # for reconstruction
self.ce_coeff = ce_coeff
self.rc_coeff = rc_coeff
self.kl_coeff = kl_coeff

# just measure acc during test time
self.test_acc = Accuracy(task="multiclass", num_classes=10)

# for averaging loss across batches
self.train_loss = MeanMetric()
self.val_loss = MeanMetric()
self.test_loss = MeanMetric()

self.val_psnr = PeakSignalNoiseRatio()
self.val_ssim = StructuralSimilarityIndexMeasure()
self.test_psnr = PeakSignalNoiseRatio()
self.test_ssim = StructuralSimilarityIndexMeasure()

def forward(self, x, y) -> torch.Tensor:
"""Perform a forward pass through the model `self.net`.

:param x: A tensor of images.
:return: A tensor of logits.
"""
return self.net(x, y)

def model_step(
self, batch: Tuple[torch.Tensor, torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Perform a single model step on a batch of data.

:param batch: A batch of data (a tuple) containing the input tensor of images and target labels.

:return: A tuple containing (in order):
- A tensor of losses.
- A tensor of predictions.
- A tensor of target labels.
"""
x, y = batch
reconstruction, kl_loss = self.net(x, y)

# calculate loss
rc_loss = self.bce(reconstruction, x)
vae_loss = self.rc_coeff * rc_loss + self.kl_coeff * kl_loss

return reconstruction, vae_loss

def test_step(self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int) ->  None:
"""Perform a single test step on a batch of data from the test set.

:param batch: A batch of data (a tuple) containing the input tensor of images and target
labels.
:param batch_idx: The index of the current batch.
"""
with torch.enable_grad():
self.classifier.train()
x, y = batch
reconstruction, vae_loss= self.model_step(batch)
preds = self.classifier(reconstruction)

clean_ce_loss = F.cross_entropy(preds.float(), y.float())
clean_loss = self.ce_coeff * clean_ce_loss + vae_loss
clean_psnr = self.test_psnr(reconstruction, x)
clean_ssim = self.test_ssim(reconstruction, x)

# Log adversarial attacks
# self.log_adversarial_attacks(batch, reconstruction, preds)
x.requires_grad = True
# Foolbox Attack Evaluation
self.foolbox_attack(x, y)

self.classifier.eval()

# update and log metrics
self.test_loss(clean_loss)
self.test_acc(preds, y)
self.log("test/loss_clean", self.test_loss, on_step=False, on_epoch=True, prog_bar=True)
self.log("test/psnr_clean", clean_psnr, on_step=False, on_epoch=True, prog_bar=True)
self.log("test/ssim_clean", clean_ssim, on_step=False, on_epoch=True, prog_bar=True)
self.log("test/acc_clean", self.test_acc, on_step=False, on_epoch=True, prog_bar=True)

def foolbox_attack(self, x: torch.Tensor, y) -> None:

# Set up Foolbox model and preprocessing if needed
fmodel = fb.PyTorchModel(self.classifier, bounds=(0, 1))

# Convert labels to the format required by Foolbox
labels = y.argmax(dim=1) if y.dim() > 1 else y

# FGSM Attack
fgsm_attack = fb.attacks.FGSM()
fgsm_images, _, success_fgsm = fgsm_attack(fmodel, x, labels, epsilons=0.1)  # Adjust epsilon as needed
fgsm_reconstruction, _ = self.forward(fgsm_images, y)
fgsm_preds = self.classifier(fgsm_reconstruction)

# PGD Attack
pgd_attack = fb.attacks.LinfPGD()
pgd_images, _, success_pgd = pgd_attack(fmodel, x, labels, epsilons=0.1)  # Adjust epsilon as needed
pgd_reconstruction, _ = self.forward(pgd_images, y)
pgd_preds = self.classifier(pgd_reconstruction)

# Compute and log metrics
fgsm_acc = (fgsm_preds.argmax(dim=1) == labels).float().mean()
pgd_acc = (pgd_preds.argmax(dim=1) == labels).float().mean()

self.log("test/fgsm_acc", fgsm_acc, prog_bar=True)
self.log("test/pgd_acc", pgd_acc, prog_bar=True)

def configure_optimizers(self) ->  Dict[str, Any]:
optimizer = self.hparams.optimizer(params=self.trainer.model.parameters())
if self.hparams.scheduler is not None:
scheduler = self.hparams.scheduler(optimizer=optimizer)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": "val/loss",
"interval": "epoch",
"frequency": 1,
},
}
return {"optimizer":  optimizer}

При запуске я получаю эту ошибку

Код: Выделить всё

 File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/lightning/pytorch/loops/evaluation_loop.py", line 135, in run
self._evaluation_step(batch, batch_idx, dataloader_idx, dataloader_iter)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/lightning/pytorch/loops/evaluation_loop.py", line 396, in _evaluation_step
output = call._call_strategy_hook(trainer, hook_name, *step_args)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 319, in _call_strategy_hook
output = fn(*args, **kwargs)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 425, in test_step
return self.lightning_module.test_step(*args, **kwargs)
File "/mnt/apple/k66/ptrang/PuVAE/src/models/classifier_module.py", line 186, in test_step
self.foolbox_attack(x, y)
File "/mnt/apple/k66/ptrang/PuVAE/src/models/classifier_module.py", line 214, in foolbox_attack
fgsm_images, _, success_fgsm = fgsm_attack(fmodel, x, labels, epsilons=0.1)  # Adjust epsilon as needed
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/foolbox/attacks/base.py", line 283, in __call__
xp = self.run(model, x, criterion, epsilon=epsilon, **kwargs)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/foolbox/attacks/fast_gradient_method.py", line 98, in run
return super().run(
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/foolbox/attacks/gradient_descent_base.py", line 155, in run
_, gradients = self.value_and_grad(loss_fn, x)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/foolbox/attacks/gradient_descent_base.py", line 111, in value_and_grad
return ep.value_and_grad(loss_fn, x)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/eagerpy/framework.py", line 360, in value_and_grad
return t.value_and_grad(f, *args, **kwargs)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/eagerpy/tensor/tensor.py", line 553, in value_and_grad
return self._value_and_grad_fn(f, has_aux=False)(self, *args, **kwargs)
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/eagerpy/tensor/pytorch.py", line 507,  in value_and_grad
loss.backward()
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/torch/_tensor.py", line 581, in backward
torch.autograd.backward(
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/torch/autograd/__init__.py", line 347, in backward
_engine_run_backward(
File "/mnt/apple/k66/ptrang/env/puvae/lib/python3.10/site-packages/torch/autograd/graph.py", line 825, in _engine_run_backward
return Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
Кто-нибудь знает решение? Спасибо

Подробнее здесь: https://stackoverflow.com/questions/791 ... have-a-gra
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Python»