public static final class GPUOptions.Builder extends com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder> implements GPUOptionsOrBuilder
tensorflow.GPUOptions| Modifier and Type | Method and Description |
|---|---|
GPUOptions.Builder |
addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) |
GPUOptions |
build() |
GPUOptions |
buildPartial() |
GPUOptions.Builder |
clear() |
GPUOptions.Builder |
clearAllocatorType()
The type of GPU allocation strategy to use.
|
GPUOptions.Builder |
clearAllowGrowth()
If true, the allocator does not pre-allocate the entire specified
GPU memory region, instead starting small and growing as needed.
|
GPUOptions.Builder |
clearDeferredDeletionBytes()
Delay deletion of up to this many bytes to reduce the number of
interactions with gpu driver code.
|
GPUOptions.Builder |
clearExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.Builder |
clearField(com.google.protobuf.Descriptors.FieldDescriptor field) |
GPUOptions.Builder |
clearForceGpuCompatible()
Force all tensors to be gpu_compatible.
|
GPUOptions.Builder |
clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) |
GPUOptions.Builder |
clearPerProcessGpuMemoryFraction()
A value between 0 and 1 that indicates what fraction of the
available GPU memory to pre-allocate for each process.
|
GPUOptions.Builder |
clearPollingActiveDelayUsecs()
In the event polling loop sleep this many microseconds between
PollEvents calls, when the queue is not empty.
|
GPUOptions.Builder |
clearPollingInactiveDelayMsecs()
This field is deprecated and ignored.
|
GPUOptions.Builder |
clearVisibleDeviceList()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices.
|
GPUOptions.Builder |
clone() |
String |
getAllocatorType()
The type of GPU allocation strategy to use.
|
com.google.protobuf.ByteString |
getAllocatorTypeBytes()
The type of GPU allocation strategy to use.
|
boolean |
getAllowGrowth()
If true, the allocator does not pre-allocate the entire specified
GPU memory region, instead starting small and growing as needed.
|
GPUOptions |
getDefaultInstanceForType() |
long |
getDeferredDeletionBytes()
Delay deletion of up to this many bytes to reduce the number of
interactions with gpu driver code.
|
static com.google.protobuf.Descriptors.Descriptor |
getDescriptor() |
com.google.protobuf.Descriptors.Descriptor |
getDescriptorForType() |
GPUOptions.Experimental |
getExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.Experimental.Builder |
getExperimentalBuilder()
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.ExperimentalOrBuilder |
getExperimentalOrBuilder()
.tensorflow.GPUOptions.Experimental experimental = 9; |
boolean |
getForceGpuCompatible()
Force all tensors to be gpu_compatible.
|
double |
getPerProcessGpuMemoryFraction()
A value between 0 and 1 that indicates what fraction of the
available GPU memory to pre-allocate for each process.
|
int |
getPollingActiveDelayUsecs()
In the event polling loop sleep this many microseconds between
PollEvents calls, when the queue is not empty.
|
int |
getPollingInactiveDelayMsecs()
This field is deprecated and ignored.
|
String |
getVisibleDeviceList()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices.
|
com.google.protobuf.ByteString |
getVisibleDeviceListBytes()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices.
|
boolean |
hasExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9; |
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable |
internalGetFieldAccessorTable() |
boolean |
isInitialized() |
GPUOptions.Builder |
mergeExperimental(GPUOptions.Experimental value)
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.Builder |
mergeFrom(com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
GPUOptions.Builder |
mergeFrom(GPUOptions other) |
GPUOptions.Builder |
mergeFrom(com.google.protobuf.Message other) |
GPUOptions.Builder |
mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) |
GPUOptions.Builder |
setAllocatorType(String value)
The type of GPU allocation strategy to use.
|
GPUOptions.Builder |
setAllocatorTypeBytes(com.google.protobuf.ByteString value)
The type of GPU allocation strategy to use.
|
GPUOptions.Builder |
setAllowGrowth(boolean value)
If true, the allocator does not pre-allocate the entire specified
GPU memory region, instead starting small and growing as needed.
|
GPUOptions.Builder |
setDeferredDeletionBytes(long value)
Delay deletion of up to this many bytes to reduce the number of
interactions with gpu driver code.
|
GPUOptions.Builder |
setExperimental(GPUOptions.Experimental.Builder builderForValue)
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.Builder |
setExperimental(GPUOptions.Experimental value)
.tensorflow.GPUOptions.Experimental experimental = 9; |
GPUOptions.Builder |
setField(com.google.protobuf.Descriptors.FieldDescriptor field,
Object value) |
GPUOptions.Builder |
setForceGpuCompatible(boolean value)
Force all tensors to be gpu_compatible.
|
GPUOptions.Builder |
setPerProcessGpuMemoryFraction(double value)
A value between 0 and 1 that indicates what fraction of the
available GPU memory to pre-allocate for each process.
|
GPUOptions.Builder |
setPollingActiveDelayUsecs(int value)
In the event polling loop sleep this many microseconds between
PollEvents calls, when the queue is not empty.
|
GPUOptions.Builder |
setPollingInactiveDelayMsecs(int value)
This field is deprecated and ignored.
|
GPUOptions.Builder |
setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field,
int index,
Object value) |
GPUOptions.Builder |
setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields) |
GPUOptions.Builder |
setVisibleDeviceList(String value)
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices.
|
GPUOptions.Builder |
setVisibleDeviceListBytes(com.google.protobuf.ByteString value)
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices.
|
getAllFields, getField, getFieldBuilder, getOneofFieldDescriptor, getParentForChildren, getRepeatedField, getRepeatedFieldBuilder, getRepeatedFieldCount, getUnknownFields, hasField, hasOneof, internalGetMapField, internalGetMutableMapField, isClean, markClean, newBuilderForField, onBuilt, onChanged, parseUnknownFieldfindInitializationErrors, getInitializationErrorString, internalMergeFrom, mergeDelimitedFrom, mergeDelimitedFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, mergeFrom, newUninitializedMessageException, toStringaddAll, mergeFrom, newUninitializedMessageExceptionequals, finalize, getClass, hashCode, notify, notifyAll, wait, wait, waitpublic static final com.google.protobuf.Descriptors.Descriptor getDescriptor()
protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable()
internalGetFieldAccessorTable in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder clear()
clear in interface com.google.protobuf.Message.Builderclear in interface com.google.protobuf.MessageLite.Builderclear in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public com.google.protobuf.Descriptors.Descriptor getDescriptorForType()
getDescriptorForType in interface com.google.protobuf.Message.BuildergetDescriptorForType in interface com.google.protobuf.MessageOrBuildergetDescriptorForType in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions getDefaultInstanceForType()
getDefaultInstanceForType in interface com.google.protobuf.MessageLiteOrBuildergetDefaultInstanceForType in interface com.google.protobuf.MessageOrBuilderpublic GPUOptions build()
build in interface com.google.protobuf.Message.Builderbuild in interface com.google.protobuf.MessageLite.Builderpublic GPUOptions buildPartial()
buildPartial in interface com.google.protobuf.Message.BuilderbuildPartial in interface com.google.protobuf.MessageLite.Builderpublic GPUOptions.Builder clone()
clone in interface com.google.protobuf.Message.Builderclone in interface com.google.protobuf.MessageLite.Builderclone in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder setField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
setField in interface com.google.protobuf.Message.BuildersetField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field)
clearField in interface com.google.protobuf.Message.BuilderclearField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof)
clearOneof in interface com.google.protobuf.Message.BuilderclearOneof in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder setRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value)
setRepeatedField in interface com.google.protobuf.Message.BuildersetRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder addRepeatedField(com.google.protobuf.Descriptors.FieldDescriptor field, Object value)
addRepeatedField in interface com.google.protobuf.Message.BuilderaddRepeatedField in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder mergeFrom(com.google.protobuf.Message other)
mergeFrom in interface com.google.protobuf.Message.BuildermergeFrom in class com.google.protobuf.AbstractMessage.Builder<GPUOptions.Builder>public GPUOptions.Builder mergeFrom(GPUOptions other)
public final boolean isInitialized()
isInitialized in interface com.google.protobuf.MessageLiteOrBuilderisInitialized in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public GPUOptions.Builder mergeFrom(com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws IOException
mergeFrom in interface com.google.protobuf.Message.BuildermergeFrom in interface com.google.protobuf.MessageLite.BuildermergeFrom in class com.google.protobuf.AbstractMessage.Builder<GPUOptions.Builder>IOExceptionpublic double getPerProcessGpuMemoryFraction()
A value between 0 and 1 that indicates what fraction of the available GPU memory to pre-allocate for each process. 1 means to pre-allocate all of the GPU memory, 0.5 means the process allocates ~50% of the available GPU memory.
double per_process_gpu_memory_fraction = 1;getPerProcessGpuMemoryFraction in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setPerProcessGpuMemoryFraction(double value)
A value between 0 and 1 that indicates what fraction of the available GPU memory to pre-allocate for each process. 1 means to pre-allocate all of the GPU memory, 0.5 means the process allocates ~50% of the available GPU memory.
double per_process_gpu_memory_fraction = 1;public GPUOptions.Builder clearPerProcessGpuMemoryFraction()
A value between 0 and 1 that indicates what fraction of the available GPU memory to pre-allocate for each process. 1 means to pre-allocate all of the GPU memory, 0.5 means the process allocates ~50% of the available GPU memory.
double per_process_gpu_memory_fraction = 1;public String getAllocatorType()
The type of GPU allocation strategy to use.
Allowed values:
"": The empty string (default) uses a system-chosen default
which may change over time.
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
version of dlmalloc.
string allocator_type = 2;getAllocatorType in interface GPUOptionsOrBuilderpublic com.google.protobuf.ByteString getAllocatorTypeBytes()
The type of GPU allocation strategy to use.
Allowed values:
"": The empty string (default) uses a system-chosen default
which may change over time.
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
version of dlmalloc.
string allocator_type = 2;getAllocatorTypeBytes in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setAllocatorType(String value)
The type of GPU allocation strategy to use.
Allowed values:
"": The empty string (default) uses a system-chosen default
which may change over time.
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
version of dlmalloc.
string allocator_type = 2;public GPUOptions.Builder clearAllocatorType()
The type of GPU allocation strategy to use.
Allowed values:
"": The empty string (default) uses a system-chosen default
which may change over time.
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
version of dlmalloc.
string allocator_type = 2;public GPUOptions.Builder setAllocatorTypeBytes(com.google.protobuf.ByteString value)
The type of GPU allocation strategy to use.
Allowed values:
"": The empty string (default) uses a system-chosen default
which may change over time.
"BFC": A "Best-fit with coalescing" algorithm, simplified from a
version of dlmalloc.
string allocator_type = 2;public long getDeferredDeletionBytes()
Delay deletion of up to this many bytes to reduce the number of interactions with gpu driver code. If 0, the system chooses a reasonable default (several MBs).
int64 deferred_deletion_bytes = 3;getDeferredDeletionBytes in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setDeferredDeletionBytes(long value)
Delay deletion of up to this many bytes to reduce the number of interactions with gpu driver code. If 0, the system chooses a reasonable default (several MBs).
int64 deferred_deletion_bytes = 3;public GPUOptions.Builder clearDeferredDeletionBytes()
Delay deletion of up to this many bytes to reduce the number of interactions with gpu driver code. If 0, the system chooses a reasonable default (several MBs).
int64 deferred_deletion_bytes = 3;public boolean getAllowGrowth()
If true, the allocator does not pre-allocate the entire specified GPU memory region, instead starting small and growing as needed.
bool allow_growth = 4;getAllowGrowth in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setAllowGrowth(boolean value)
If true, the allocator does not pre-allocate the entire specified GPU memory region, instead starting small and growing as needed.
bool allow_growth = 4;public GPUOptions.Builder clearAllowGrowth()
If true, the allocator does not pre-allocate the entire specified GPU memory region, instead starting small and growing as needed.
bool allow_growth = 4;public String getVisibleDeviceList()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices. For example, if TensorFlow
can see 8 GPU devices in the process, and one wanted to map
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
then one would specify this field as "5,3". This field is similar in
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
it applies to the visible GPU devices in the process.
NOTE:
1. The GPU driver provides the process with the visible GPUs
in an order which is not guaranteed to have any correlation to
the *physical* GPU id in the machine. This field is used for
remapping "visible" to "virtual", which means this operates only
after the process starts. Users are required to use vendor
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
physical to visible device mapping prior to invoking TensorFlow.
2. In the code, the ids in this list are also called "CUDA GPU id"s,
and the 'virtual' ids of GPU devices (i.e. the ids in the device
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
for more information.
string visible_device_list = 5;getVisibleDeviceList in interface GPUOptionsOrBuilderpublic com.google.protobuf.ByteString getVisibleDeviceListBytes()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices. For example, if TensorFlow
can see 8 GPU devices in the process, and one wanted to map
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
then one would specify this field as "5,3". This field is similar in
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
it applies to the visible GPU devices in the process.
NOTE:
1. The GPU driver provides the process with the visible GPUs
in an order which is not guaranteed to have any correlation to
the *physical* GPU id in the machine. This field is used for
remapping "visible" to "virtual", which means this operates only
after the process starts. Users are required to use vendor
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
physical to visible device mapping prior to invoking TensorFlow.
2. In the code, the ids in this list are also called "CUDA GPU id"s,
and the 'virtual' ids of GPU devices (i.e. the ids in the device
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
for more information.
string visible_device_list = 5;getVisibleDeviceListBytes in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setVisibleDeviceList(String value)
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices. For example, if TensorFlow
can see 8 GPU devices in the process, and one wanted to map
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
then one would specify this field as "5,3". This field is similar in
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
it applies to the visible GPU devices in the process.
NOTE:
1. The GPU driver provides the process with the visible GPUs
in an order which is not guaranteed to have any correlation to
the *physical* GPU id in the machine. This field is used for
remapping "visible" to "virtual", which means this operates only
after the process starts. Users are required to use vendor
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
physical to visible device mapping prior to invoking TensorFlow.
2. In the code, the ids in this list are also called "CUDA GPU id"s,
and the 'virtual' ids of GPU devices (i.e. the ids in the device
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
for more information.
string visible_device_list = 5;public GPUOptions.Builder clearVisibleDeviceList()
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices. For example, if TensorFlow
can see 8 GPU devices in the process, and one wanted to map
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
then one would specify this field as "5,3". This field is similar in
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
it applies to the visible GPU devices in the process.
NOTE:
1. The GPU driver provides the process with the visible GPUs
in an order which is not guaranteed to have any correlation to
the *physical* GPU id in the machine. This field is used for
remapping "visible" to "virtual", which means this operates only
after the process starts. Users are required to use vendor
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
physical to visible device mapping prior to invoking TensorFlow.
2. In the code, the ids in this list are also called "CUDA GPU id"s,
and the 'virtual' ids of GPU devices (i.e. the ids in the device
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
for more information.
string visible_device_list = 5;public GPUOptions.Builder setVisibleDeviceListBytes(com.google.protobuf.ByteString value)
A comma-separated list of GPU ids that determines the 'visible'
to 'virtual' mapping of GPU devices. For example, if TensorFlow
can see 8 GPU devices in the process, and one wanted to map
visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
then one would specify this field as "5,3". This field is similar in
spirit to the CUDA_VISIBLE_DEVICES environment variable, except
it applies to the visible GPU devices in the process.
NOTE:
1. The GPU driver provides the process with the visible GPUs
in an order which is not guaranteed to have any correlation to
the *physical* GPU id in the machine. This field is used for
remapping "visible" to "virtual", which means this operates only
after the process starts. Users are required to use vendor
specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
physical to visible device mapping prior to invoking TensorFlow.
2. In the code, the ids in this list are also called "CUDA GPU id"s,
and the 'virtual' ids of GPU devices (i.e. the ids in the device
name "/device:GPU:<id>") are also called "TF GPU id"s. Please
refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
for more information.
string visible_device_list = 5;public int getPollingActiveDelayUsecs()
In the event polling loop sleep this many microseconds between PollEvents calls, when the queue is not empty. If value is not set or set to 0, gets set to a non-zero default.
int32 polling_active_delay_usecs = 6;getPollingActiveDelayUsecs in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setPollingActiveDelayUsecs(int value)
In the event polling loop sleep this many microseconds between PollEvents calls, when the queue is not empty. If value is not set or set to 0, gets set to a non-zero default.
int32 polling_active_delay_usecs = 6;public GPUOptions.Builder clearPollingActiveDelayUsecs()
In the event polling loop sleep this many microseconds between PollEvents calls, when the queue is not empty. If value is not set or set to 0, gets set to a non-zero default.
int32 polling_active_delay_usecs = 6;public int getPollingInactiveDelayMsecs()
This field is deprecated and ignored.
int32 polling_inactive_delay_msecs = 7;getPollingInactiveDelayMsecs in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setPollingInactiveDelayMsecs(int value)
This field is deprecated and ignored.
int32 polling_inactive_delay_msecs = 7;public GPUOptions.Builder clearPollingInactiveDelayMsecs()
This field is deprecated and ignored.
int32 polling_inactive_delay_msecs = 7;public boolean getForceGpuCompatible()
Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, enabling this option forces all CPU tensors to be allocated with Cuda pinned memory. Normally, TensorFlow will infer which tensors should be allocated as the pinned memory. But in case where the inference is incomplete, this option can significantly speed up the cross-device memory copy performance as long as it fits the memory. Note that this option is not something that should be enabled by default for unknown or very large models, since all Cuda pinned memory is unpageable, having too much pinned memory might negatively impact the overall host system performance.
bool force_gpu_compatible = 8;getForceGpuCompatible in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setForceGpuCompatible(boolean value)
Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, enabling this option forces all CPU tensors to be allocated with Cuda pinned memory. Normally, TensorFlow will infer which tensors should be allocated as the pinned memory. But in case where the inference is incomplete, this option can significantly speed up the cross-device memory copy performance as long as it fits the memory. Note that this option is not something that should be enabled by default for unknown or very large models, since all Cuda pinned memory is unpageable, having too much pinned memory might negatively impact the overall host system performance.
bool force_gpu_compatible = 8;public GPUOptions.Builder clearForceGpuCompatible()
Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow, enabling this option forces all CPU tensors to be allocated with Cuda pinned memory. Normally, TensorFlow will infer which tensors should be allocated as the pinned memory. But in case where the inference is incomplete, this option can significantly speed up the cross-device memory copy performance as long as it fits the memory. Note that this option is not something that should be enabled by default for unknown or very large models, since all Cuda pinned memory is unpageable, having too much pinned memory might negatively impact the overall host system performance.
bool force_gpu_compatible = 8;public boolean hasExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9;hasExperimental in interface GPUOptionsOrBuilderpublic GPUOptions.Experimental getExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9;getExperimental in interface GPUOptionsOrBuilderpublic GPUOptions.Builder setExperimental(GPUOptions.Experimental value)
.tensorflow.GPUOptions.Experimental experimental = 9;public GPUOptions.Builder setExperimental(GPUOptions.Experimental.Builder builderForValue)
.tensorflow.GPUOptions.Experimental experimental = 9;public GPUOptions.Builder mergeExperimental(GPUOptions.Experimental value)
.tensorflow.GPUOptions.Experimental experimental = 9;public GPUOptions.Builder clearExperimental()
.tensorflow.GPUOptions.Experimental experimental = 9;public GPUOptions.Experimental.Builder getExperimentalBuilder()
.tensorflow.GPUOptions.Experimental experimental = 9;public GPUOptions.ExperimentalOrBuilder getExperimentalOrBuilder()
.tensorflow.GPUOptions.Experimental experimental = 9;getExperimentalOrBuilder in interface GPUOptionsOrBuilderpublic final GPUOptions.Builder setUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
setUnknownFields in interface com.google.protobuf.Message.BuildersetUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>public final GPUOptions.Builder mergeUnknownFields(com.google.protobuf.UnknownFieldSet unknownFields)
mergeUnknownFields in interface com.google.protobuf.Message.BuildermergeUnknownFields in class com.google.protobuf.GeneratedMessageV3.Builder<GPUOptions.Builder>Copyright © 2015–2018. All rights reserved.