"distributed_replicated", "independent"],
"""The method for managing variables: parameter_server,
replicated, distributed_replicated, independent""")
flags.DEFINE_enum("tf_local_parameter_device", "gpu", ["cpu", "gpu"],
"""Device to use as parameter server: cpu or gpu. For
distributed training, it can affect where caching of
variables happens.""")
After Change
"""Device to use as parameter server: cpu or gpu. For
distributed training, it can affect where caching of
variables happens.""")
flags.DEFINE_enum("tf_device", GPU, [CPU, GPU],
"Device to use for computation: cpu or gpu")
flags.DEFINE_enum("tf_data_format", "NCHW", ["NCHW", "NHWC"], """Data layout to
use: NHWC (TF native) or NCHW (cuDNN native).""")
flags.DEFINE_boolean("tf_use_nccl", True,
"Whether to use nccl all-reduce primitives where possible")