// Setup results directory ----------------------------------------------------
save_dir = os.path.join(results_dir, os.path.splitext(__file__)[0] + "_" + datetime.now().strftime("%Y%m%dT%H%M%S"))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
// Set reconstruction options -------------------------------------------------
opts = {
// The loss function type: {"l2","l1","inner","gram"}
"loss_type": "l2",
// The maximum number of iterations
"maxiter": max_iteration,
// The initial image for the optimization (setting to None will use random noise as initial image)
"initial_image": initial_image,
// Display the information on the terminal or not
"disp": True
}
// Save the optional parameters
with open(os.path.join(save_dir, "options.pkl"), "w") as f:
pickle.dump(opts, f)
// Reconstrucion --------------------------------------------------------------
for subject, roi, image_label in product(subjects_list, rois_list, image_label_list):
print("")
print("Subject: " + subject)
print("ROI: " + roi)
print("Image label: " + image_label)
print("")
// Load the decoded CNN features
features = {}
for layer in layers:
// The file full name depends on the data structure for decoded CNN features
file_name = decode_feature_filename(network, layer, subject, roi, image_type, image_label)
feat = sio.loadmat(file_name)["feat"]
if "fc" in layer:
feat = feat.reshape(feat.size)
// Correct the norm of the decoded CNN features
feat_std = estimate_cnn_feat_std(feat)
feat = (feat / feat_std) * feat_std0[layer]
features.update({layer: feat})
// Weight of each layer in the total loss function
// Norm of the CNN features for each layer
feat_norm = np.array([np.linalg.norm(features[layer]) for layer in layers], dtype="float32")
// Use the inverse of the squared norm of the CNN features as the weight for each layer
weights = 1. / (feat_norm ** 2)
// Normalise the weights such that the sum of the weights = 1
weights = weights / weights.sum()
layer_weight = dict(zip(layers, weights))
opts.update({"layer_weight": layer_weight})
// Reconstruction
snapshots_dir = os.path.join(save_dir, "snapshots_%s-%s" % (subject, roi), "image-%s" % image_label)
recon_img, loss_list = reconstruct_image(features, net,
save_intermediate=True,
save_intermediate_path=snapshots_dir,
**opts)
// Save the results
// Save the raw reconstructed image
save_name = "recon_img" + "-" + subject + "-" + roi + "-" + image_label + ".mat"sio.savemat(os.path.join(save_dir, save_name), {"recon_img": recon_img})
// To better display the image, clip pixels with extreme values (0.02% of
// pixels with extreme low values and 0.02% of the pixels with extreme high
// values). And then normalise the image by mapping the pixel value to be
// within [0,255].
save_name = "recon_img" + "-" + subject + "-" + roi + "-" + image_label + ".jpg"
PIL.Image.fromarray(normalise_img(clip_extreme_value(recon_img, pct=0.04))).save(os.path.join(save_dir, save_name))
print("Done")
After Change
// Setup results directory ----------------------------------------------------
save_dir_root = os.path.join(results_dir, os.path.splitext(__file__)[0])
if not os.path.exists(save_dir_root):
os.makedirs(save_dir_root)
// Set reconstruction options -------------------------------------------------
opts = {
// The loss function type: {"l2","l1","inner","gram"}
"loss_type": "l2",
// The maximum number of iterations
"maxiter": max_iteration,
// The initial image for the optimization (setting to None will use random noise as initial image)
"initial_image": initial_image,
// Display the information on the terminal or not
"disp": True
}
// Save the optional parameters
with open(os.path.join(save_dir_root, "options.pkl"), "w") as f:
pickle.dump(opts, f)
// Reconstrucion --------------------------------------------------------------
for subject, roi, image_label in product(subjects_list, rois_list, image_label_list):
print("")
print("Subject: " + subject)
print("ROI: " + roi)
print("Image label: " + image_label)
print("")
save_dir = os.path.join(save_dir_root, subject, roi)if not os.path.exists(save_dir):
os.makedirs(save_dir)
// Load the decoded CNN features
features = {}
for layer in layers:
// The file full name depends on the data structure for decoded CNN features
file_name = decode_feature_filename(network, layer, subject, roi, image_type, image_label)
feat = sio.loadmat(file_name)["feat"]
if "fc" in layer:
feat = feat.reshape(feat.size)
// Correct the norm of the decoded CNN features
feat_std = estimate_cnn_feat_std(feat)
feat = (feat / feat_std) * feat_std0[layer]
features.update({layer: feat})
// Weight of each layer in the total loss function
// Norm of the CNN features for each layer
feat_norm = np.array([np.linalg.norm(features[layer]) for layer in layers], dtype="float32")
// Use the inverse of the squared norm of the CNN features as the weight for each layer
weights = 1. / (feat_norm ** 2)
// Normalise the weights such that the sum of the weights = 1
weights = weights / weights.sum()
layer_weight = dict(zip(layers, weights))
opts.update({"layer_weight": layer_weight})
// Reconstruction
snapshots_dir = os.path.join(save_dir, "snapshots", "image-%s" % image_label)
recon_img, loss_list = reconstruct_image(features, net,
save_intermediate=True,
save_intermediate_path=snapshots_dir,
**opts)
// Save the results
// Save the raw reconstructed image
save_name = "recon_img" + "-" + image_label + ".mat"sio.savemat(os.path.join(save_dir, save_name), {"recon_img": recon_img})
// To better display the image, clip pixels with extreme values (0.02% of
// pixels with extreme low values and 0.02% of the pixels with extreme high
// values). And then normalise the image by mapping the pixel value to be
// within [0,255].
save_name = "recon_img_normalized" + "-" + image_label + ".jpg"
PIL.Image.fromarray(normalise_img(clip_extreme_value(recon_img, pct=0.04))).save(os.path.join(save_dir, save_name))
print("Done")