model_function = model_fn
if flags_obj.multi_gpu:
validate_batch_size_for_multi_gpu(flags_obj.batch_size)
// There are two steps required if using multi-GPU: (1) wrap the model_fn,
// and (2) wrap the optimizer. The first happens here, and (2) happens
// in the model_fn itself when the optimizer is defined.
After Change
// Get number of GPUs as defined by the --num_gpus flags and the number of
// GPUs available on the machine.
num_gpus = flags_core.get_num_gpus(flags_obj)
multi_gpu = num_gpus > 1
ifmulti_gpu:
// Validate that the batch size can be split into devices.
distribution_utils.per_device_batch_size(flags_obj.batch_size, num_gpus)
// There are two steps required if using multi-GPU: (1) wrap the model_fn,
// and (2) wrap the optimizer. The first happens here, and (2) happens
// in the model_fn itself when the optimizer is defined.
model_function = tf.contrib.estimator.replicate_model_fn(
model_fn, loss_reduction=tf.losses.Reduction.MEAN,
devices=["/device:GPU:%d" % d for d in range(num_gpus)])
data_format = flags_obj.data_format
if data_format is None: