Я использовал этот скрипт для моей проверки, я хочу загрузить несколько клипов для проверки моей модели, я использовал Tempral Transformation для получения клипов с одинаковым размером
, поэтому я использовал этот пользовательский набор данных и пользовательский Collate_fn < /p>
def collate_fn(batch):
print(f"Input batch structure: {type(batch)}, {len(batch)} elements")
for i, item in enumerate(batch):
print(f"Item {i}: {type(item)}, {len(item)} elements")
# Unpack clips and targets
batch_clips, batch_targets = zip(*batch)
print(f"Number of clips before flattening: {len(batch_clips)}")
print(f"Number of targets before flattening: {len(batch_targets)}")
# Flatten the clips and targets
batch_clips = [clip for multi_clips in batch_clips for clip in multi_clips]
batch_targets = [target for multi_targets in batch_targets for target in multi_targets]
print(f"Number of clips after flattening: {len(batch_clips)}")
print(f"Number of targets after flattening: {len(batch_targets)}")
# Stack each of them into a single tensor
batch_clips = torch.stack(batch_clips, 0)
batch_targets = torch.stack(batch_targets, 0)
print(f"Final batch_clips shape: {batch_clips.shape}")
print(f"Final batch_targets shape: {batch_targets.shape}")
return batch_clips, batch_targets
class ActionDatasetMultiClips(ActionDataset):
def __loading(self, path, frame_indices):
clips = []
for clip_frame_indices in frame_indices:
valid_indices = [i for i in clip_frame_indices if i < len(path)]
if not valid_indices:
raise ValueError(f"No valid frame indices for clip: {clip_frame_indices}")
clip = [Image.open(path).convert('RGB') for i in valid_indices]
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clips.append(torch.stack(clip, 0))
return clips
def __getitem__(self, idx):
sequence_key = list(self.sequences.keys())[idx]
sequence_data = self.sequences[sequence_key]
frame_paths = [data['image_path'] for data in sequence_data]
# Initialize frame_indices
frame_indices = list(range(len(frame_paths)))
# Apply temporal transformation
if self.temporal_transform:
frame_indices = self.temporal_transform(frame_indices)
if isinstance(frame_indices[0], list):
frame_indices =
frame_indices = [i for i in frame_indices if i < len(frame_paths)]
if not frame_indices:
raise ValueError(f"No valid frame indices for sequence {sequence_key}")
clips = self.__loading(frame_paths, [frame_indices])
action_name = sequence_data[0]['action_name']
condition = sequence_data[0]['condition']
action_name_id = self.action_name_to_id[action_name]
condition_id = self.condition_name_to_id[condition]
# Prepare one target per clip
targets = [torch.tensor([action_name_id, condition_id], dtype=torch.long) for _ in
range(len(clips))]
return clips, targets
< /code>
И вот моя функция One_epoch_validate, которую я использую для проверки во время обучения < /p>
def validate_one_epoch(model, dataloader, criterion, device):
with torch.no_grad():
for batch in tqdm(dataloader, desc="Validation", leave=False):
print(f"Batch structure in validate_one_epoch: {type(batch)}, {len(batch)} elements")
for i, item in enumerate(batch):
print(f"Item {i}: {type(item)}, shape: {item.shape if hasattr(item, 'shape') else 'N/A'}")
if len(batch) != 2:
raise ValueError(f"Expected batch to have 2 elements, but got {len(batch)}
elements")
#rest of the script
return avg_loss, action_acc, condition_acc
< /code>
И это DataLoader < /p>
def val_utils():
# spatial transformations
temporal_augmentation = []
temporal_augmentation.append(TemporalSubsampling(stride=2))
temporal_augmentation.append(TemporalEvenCrop(16, 2))
temporal_augmentation = Compose(temporal_augmentation)
val_dataset = ActionDatasetMultiClips(
annotation_path="test.csv",
spatial_transform=spatial_augmentation,
temporal_transform=temporal_augmentation,
max_sequence_length=16)
val_loader = DataLoader(
val_dataset,
batch_size=BATCH_SIZE // 2,
shuffle=True,
pin_memory=True,
num_workers=0,
collate_fn=collate_fn
)
< /code>
Я и ожидаю, что каждый элемент партии является кортежом из двух элементов, которые клипы и их цели (2 элеганта), но по какой -то причине набор данных возвращает 4 элемента вместо 2 < /p>
torch.utils.data.dataloader.DataLoader'>, 4 elements
Item 0: , shape: N/A
Item 1: , shape: N/A
Item 2: , shape: N/A
Item 3: , shape: N/A
< /code>
Я получил эту ошибку < /p>
Traceback (most recent call last):
File ~\anaconda3\envs\cnnsvm\Lib\site-packages\spyder_kernels\customize\utils.py:209 in
exec_encapsulate_locals
exec_fun(compile(code_ast, filename, "exec"), globals)
File d:\organized_files\transformer_exemple\train.py:136
val_loss, val_action_acc, val_condition_acc = validate_one_epoch(
File d:\organized_files\transformer_exemple\utils.py:92 in validate_one_epoch
frames, labels = batch # Assuming batch is a tuple of (frames, labels)# Disable gradient
computation
ValueError: too many values to unpack (expected 2) this is what i gotand what i need is
that the collate_fn returns two elements how to make sure of that
Подробнее здесь: https://stackoverflow.com/questions/794 ... dataloader
ValueError: слишком много значений для распаковки (ожидаемое 2): для партии в TQDM (DataLoader, desc = "Validation", Lea ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение