if self.is_training:
self.reader = ImageReader(SUPPORTED_INPUT)
else: // in the inference process use image input only
self.reader = ImageReader(["image"])self.reader.initialise_reader(data_param, task_param)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name="image")
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name="image",
modalities=vars(task_param).get("image"),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name="hist_norm_layer")
else:
histogram_normaliser = None
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(self.action_param.rotation_angle)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
self.reader.add_preprocessing_layers(
volume_padding_layer + normalisation_layers + augmentation_layers)
def initialise_uniform_sampler(self):
After Change
// read each line of csv files into an instance of Subject
if self.is_training:
self.readers = [ImageReader(SUPPORTED_INPUT, phase="train"),
ImageReader(SUPPORTED_INPUT, phase="validation")]
else: // in the inference process use image input only
self.readers = [ImageReader(["image"], phase="test")]for reader in self.readers:
reader.initialise_reader(data_param, task_param)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name="image")
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name="image",
modalities=vars(task_param).get("image"),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name="hist_norm_layer")
else:
histogram_normaliser = None
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
augmentation_layers = []
if self.is_training:
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(
flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle:
augmentation_layers.append(RandomRotationLayer())
augmentation_layers[-1].init_uniform_angle(self.action_param.rotation_angle)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(volume_padding_layer +
normalisation_layers +
augmentation_layers)
def initialise_uniform_sampler(self):
self.sampler = [[UniformSampler(
reader=reader,
data_param=self.data_param,