clip = self.loader(path, frame_indices)
if self.spatial_transform is not None:
self.spatial_transform.randomize_parameters()
clip = [self.spatial_transform(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
target = self.data[index]
if self.target_transform is not None:
target = self.target_transform(target)
After Change
for one_frame_indices in frame_indices:
clips.append(self.loading(path, one_frame_indices))
return clips, [target for _ in range(len(clips))]
else:
clip = self.loading(path, frame_indices)