confounds = [None] * len(imgs)
// Precomputing number of samples for preallocation
subject_n_samples = np.zeros(len(imgs), dtype="int")
for i, img in enumerate(imgs):
this_n_samples = check_niimg_4d(img).shape[3]
if reduction_ratio == "auto":
subject_n_samples[i] = min(n_components,
After Change
"got %.2f" % reduction_ratio)
if confounds is None:
confounds = itertools.repeat(confounds)
if reduction_ratio == "auto":
n_samples = n_components
reduction_ratio = None
else:
// We"ll let _mask_and_reduce_single decide on the number of
// samples based on the reduction_ratio
n_samples = None
data_list = Parallel(n_jobs=n_jobs)(
delayed(_mask_and_reduce_single)(
masker,
img, confound,
reduction_ratio=reduction_ratio,
n_samples=n_samples,
memory=memory,
memory_level=memory_level,
random_state=random_state
) for img, confound in zip(imgs, confounds))
subject_n_samples = [subject_data.shape[0]
for subject_data in data_list]
n_samples = np.sum(subject_n_samples)
n_voxels = np.sum(_safe_get_data(masker.mask_img_))
data = np.empty((n_samples, n_voxels), order="F",