diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4eeaf85de8a..dd93a316172 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ env: jobs: quick-build: if: github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 steps: - name: Install environment @@ -47,7 +47,7 @@ jobs: mvn compiler:compile -Pdev,jdk17 -B -U -e check-format: if: github.event_name == 'pull_request' - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 steps: - name: Install environment @@ -76,7 +76,7 @@ jobs: run: | mvn spotless:check -Pdev,jdk17 -B -U -e prepare: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 outputs: stagingRepositoryId: ${{ steps.staging.outputs.stagingRepositoryId }} steps: @@ -93,7 +93,7 @@ jobs: echo "::set-output name=stagingRepositoryId::$STAGING_REPOSITORY_ID" linux-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 container: centos:7 needs: prepare strategy: @@ -113,7 +113,7 @@ jobs: tar xzf $HOME/apache-maven-3.6.3-bin.tar.gz -C /opt/ ln -sf /opt/apache-maven-3.6.3/bin/mvn /usr/bin/mvn echo Downloading Bazel - curl -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-linux-x86_64.sh -o bazel.sh --retry 10 + curl -L https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-installer-linux-x86_64.sh -o bazel.sh --retry 10 bash bazel.sh if [[ "${{ matrix.ext }}" == *-gpu ]]; then echo Installing CUDA @@ -163,7 +163,7 @@ jobs: df -h macosx-x86_64: if: github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'CI build') - runs-on: macos-latest + runs-on: macos-10.15 needs: prepare strategy: matrix: @@ -173,7 +173,7 @@ jobs: run: | python3 -m pip install numpy six echo Downloading Bazel - curl -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-installer-darwin-x86_64.sh -o bazel.sh --retry 10 + curl -L https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-installer-darwin-x86_64.sh -o bazel.sh --retry 10 bash bazel.sh brew install libomp perl - name: Configure Java @@ -227,7 +227,7 @@ jobs: bash.exe -lc "find 'C:/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/VC/' -iname '14.1*' -exec rm -Rf {} \;" echo Downloading Bazel mkdir C:\bazel - curl.exe -L https://github.com/bazelbuild/bazel/releases/download/3.7.2/bazel-3.7.2-windows-x86_64.exe -o C:/bazel/bazel.exe --retry 10 + curl.exe -L https://github.com/bazelbuild/bazel/releases/download/4.2.1/bazel-4.2.1-windows-x86_64.exe -o C:/bazel/bazel.exe --retry 10 set "EXT=${{ matrix.ext }}" if "%EXT:~-4%" == "-gpu" ( echo Removing some unused stuff to avoid running out of disk space @@ -287,7 +287,7 @@ jobs: deploy: if: github.event_name == 'push' && contains(github.ref, 'master') needs: [linux-x86_64, macosx-x86_64, windows-x86_64] - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - name: Checkout repository uses: actions/checkout@v1 diff --git a/tensorflow-core/pom.xml b/tensorflow-core/pom.xml index 7d3dd3ca324..1fd4c71debe 100644 --- a/tensorflow-core/pom.xml +++ b/tensorflow-core/pom.xml @@ -67,7 +67,7 @@ macosx-x86_64${javacpp.platform.extension} windows-x86${javacpp.platform.extension} windows-x86_64${javacpp.platform.extension} - 1.5.6 + 1.5.7 diff --git a/tensorflow-core/tensorflow-core-api/.bazelrc b/tensorflow-core/tensorflow-core-api/.bazelrc index 26f501ff451..9f875293e54 100644 --- a/tensorflow-core/tensorflow-core-api/.bazelrc +++ b/tensorflow-core/tensorflow-core-api/.bazelrc @@ -1,2 +1,3 @@ build --remote_cache=https://storage.googleapis.com/tensorflow-sigs-jvm -build --remote_upload_local_results=false \ No newline at end of file +build --remote_upload_local_results=false +build --incompatible_restrict_string_escapes=false \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-api/.bazelversion b/tensorflow-core/tensorflow-core-api/.bazelversion index 0b2eb36f508..fae6e3d04b2 100644 --- a/tensorflow-core/tensorflow-core-api/.bazelversion +++ b/tensorflow-core/tensorflow-core-api/.bazelversion @@ -1 +1 @@ -3.7.2 +4.2.1 diff --git a/tensorflow-core/tensorflow-core-api/WORKSPACE b/tensorflow-core/tensorflow-core-api/WORKSPACE index 0ac4d82a193..28f2bb27e6f 100644 --- a/tensorflow-core/tensorflow-core-api/WORKSPACE +++ b/tensorflow-core/tensorflow-core-api/WORKSPACE @@ -18,10 +18,10 @@ http_archive( patch_args = ["-p1"], patch_cmds = ["grep -rl 'java_package' tensorflow/core | xargs sed -i.bak 's/^\(.* java_package = \"org\.tensorflow\.\)\(.*\"\)/\\1proto.\\2'/"], urls = [ - "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.7.1.tar.gz", + "https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.8.0.tar.gz", ], - sha256 = "abebe2cf5ca379e18071693ca5f45b88ade941b16258a21cc1f12d77d5387a21", - strip_prefix = "tensorflow-2.7.1" + sha256 = "66b953ae7fba61fd78969a2e24e350b26ec116cf2e6a7eb93d02c63939c6f9f7", + strip_prefix = "tensorflow-2.8.0" ) # START: Upstream TensorFlow dependencies diff --git a/tensorflow-core/tensorflow-core-api/build.sh b/tensorflow-core/tensorflow-core-api/build.sh index b5d25277ea0..368eb43d74f 100755 --- a/tensorflow-core/tensorflow-core-api/build.sh +++ b/tensorflow-core/tensorflow-core-api/build.sh @@ -7,9 +7,11 @@ export BAZEL_USE_CPP_ONLY_TOOLCHAIN=1 export BAZEL_VC="${VCINSTALLDIR:-}" if [[ -d $BAZEL_VC ]]; then + export BAZEL_BUILD="--output_user_root=$(cygpath -w $TMP) build" export BUILD_FLAGS="--copt=//arch:AVX `#--copt=//arch:AVX2` --define=override_eigen_strong_inline=true" export PYTHON_BIN_PATH=$(which python.exe) else + export BAZEL_BUILD="build" export BUILD_FLAGS="--copt=-msse4.1 --copt=-msse4.2 --copt=-mavx `#--copt=-mavx2 --copt=-mfma` --linkopt=-lstdc++ --host_linkopt=-lstdc++" export PYTHON_BIN_PATH=$(which python3) fi @@ -33,7 +35,7 @@ BUILD_FLAGS="$BUILD_FLAGS --experimental_repo_remote_exec --python_path="$PYTHON BUILD_FLAGS="$BUILD_FLAGS --distinct_host_configuration=true" # Build C/C++ API of TensorFlow itself including a target to generate ops for Java -bazel --bazelrc=tensorflow.bazelrc build $BUILD_FLAGS ${BUILD_USER_FLAGS:-} \ +bazel --bazelrc=tensorflow.bazelrc $BAZEL_BUILD $BUILD_FLAGS ${BUILD_USER_FLAGS:-} \ @org_tensorflow//tensorflow:tensorflow_cc \ @org_tensorflow//tensorflow/tools/lib_package:jnilicenses_generate \ :java_proto_gen_sources \ diff --git a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch index beef5d567fd..97a01022aa0 100644 --- a/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch +++ b/tensorflow-core/tensorflow-core-api/external/tensorflow-proto.patch @@ -461,3 +461,25 @@ diff -ruN tensorflow-2.7.0/tensorflow/core/util/memmapped_file_system.proto tens // A message that describes one region of memmapped file. message MemmappedFileSystemDirectoryElement { +diff -ruN tensorflow-2.8.0/tensorflow/core/protobuf/coordination_config.proto tensorflow-2.8.0-proto/tensorflow/core/protobuf/coordination_config.proto +--- tensorflow-2.8.0/tensorflow/core/protobuf/coordination_config.proto 2022-02-01 04:17:33.000000000 +0900 ++++ tensorflow-2.8.0-proto/tensorflow/core/protobuf/coordination_config.proto 2022-02-16 11:02:10.162709816 +0900 +@@ -3,6 +3,7 @@ + package tensorflow; + + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; ++option java_package = "org.tensorflow.distruntime"; + + // Coordination service configuration parameters. + // The system picks appropriate values for fields that are not set. +diff -ruN tensorflow-2.8.0/tensorflow/core/protobuf/distributed_runtime_payloads.proto tensorflow-2.8.0-proto/tensorflow/core/protobuf/distributed_runtime_payloads.proto +--- tensorflow-2.8.0/tensorflow/core/protobuf/distributed_runtime_payloads.proto 2022-02-01 04:17:33.000000000 +0900 ++++ tensorflow-2.8.0-proto/tensorflow/core/protobuf/distributed_runtime_payloads.proto 2022-02-16 11:00:03.739373379 +0900 +@@ -4,6 +4,7 @@ + + option cc_enable_arenas = true; + option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto"; ++option java_package = "org.tensorflow.distruntime"; + + // Used to serialize and transmit tensorflow::Status payloads through + // grpc::Status `error_details` since grpc::Status lacks payload API. diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV2.pbtxt index 60293cad9fb..71b6959cf2d 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV2.pbtxt @@ -1,7 +1,4 @@ op { graph_op_name: "AnonymousIteratorV2" - visibility: VISIBLE - endpoint { - name: "data.AnonymousIterator" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV3.pbtxt new file mode 100644 index 00000000000..0f12f6f369c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousIteratorV3.pbtxt @@ -0,0 +1,7 @@ +op { + graph_op_name: "AnonymousIteratorV3" + visibility: VISIBLE + endpoint { + name: "data.AnonymousIterator" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableDenseHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableDenseHashTable.pbtxt new file mode 100644 index 00000000000..1774d42d57f --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableDenseHashTable.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "AnonymousMutableDenseHashTable" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTable.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTable.pbtxt new file mode 100644 index 00000000000..80aec142155 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTable.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "AnonymousMutableHashTable" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTableOfTensors.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTableOfTensors.pbtxt new file mode 100644 index 00000000000..069a81cbac7 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_AnonymousMutableHashTableOfTensors.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "AnonymousMutableHashTableOfTensors" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt index 083caab7391..63f9d0c5aae 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV2.pbtxt @@ -1,7 +1,4 @@ op { graph_op_name: "DataServiceDatasetV2" - visibility: VISIBLE - endpoint { - name: "data.DataServiceDatasetV2" - } + visibility: SKIP } diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV3.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV3.pbtxt new file mode 100644 index 00000000000..983106c8bf6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DataServiceDatasetV3.pbtxt @@ -0,0 +1,7 @@ +op { + graph_op_name: "DataServiceDatasetV3" + visibility: VISIBLE + endpoint { + name: "data.DataServiceDataset" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt new file mode 100644 index 00000000000..16882e5f1d3 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch" + endpoint { + name: "tpu.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt new file mode 100644 index 00000000000..90dc3c898d0 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_EnqueueTPUEmbeddingArbitraryTensorBatch.pbtxt @@ -0,0 +1,6 @@ +op { + graph_op_name: "EnqueueTPUEmbeddingArbitraryTensorBatch" + endpoint { + name: "tpu.EnqueueTPUEmbeddingArbitraryTensorBatch" + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FileSystemSetConfiguration.pbtxt b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FileSystemSetConfiguration.pbtxt new file mode 100644 index 00000000000..a4699e1a487 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/api_def_FileSystemSetConfiguration.pbtxt @@ -0,0 +1,3 @@ +op { + graph_op_name: "FileSystemSetConfiguration" +} diff --git a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc index e8850f7867d..2f8bf7bf8cd 100644 --- a/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc +++ b/tensorflow-core/tensorflow-core-api/src/bazel/api_def/import/api_import.cc @@ -183,8 +183,8 @@ int main(int argc, char* argv[]) { TF_CHECK_OK(env->GetMatchingPaths(golden_api_path, &golden_api_files)); LOG(INFO) << "Loading " << golden_api_files.size() << " Python API golden files"; for (const auto& filename : golden_api_files) { - // Skip the raw_ops API, as it contains all op endpoints - if (filename == "tensorflow.raw_ops.pbtxt") { + // Skip the raw_ops API, as it contains all op endpoints in a single package + if (str_util::EndsWith(filename, "tensorflow.raw_ops.pbtxt")) { continue; } string contents; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java index 5d4583c01c4..384c11a258c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/DataOps.java @@ -32,7 +32,7 @@ import org.tensorflow.op.data.ChooseFastestBranchDataset; import org.tensorflow.op.data.ChooseFastestDataset; import org.tensorflow.op.data.ConcatenateDataset; -import org.tensorflow.op.data.DataServiceDatasetV2; +import org.tensorflow.op.data.DataServiceDataset; import org.tensorflow.op.data.DatasetCardinality; import org.tensorflow.op.data.DatasetFromGraph; import org.tensorflow.op.data.DatasetToGraph; @@ -350,16 +350,17 @@ public ConcatenateDataset concatenateDataset(Operand inputDatas * @param iterationCounter The iterationCounter value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param uncompressFn The value of the uncompressFn attribute * @param options carries optional attribute values - * @return a new instance of DataServiceDatasetV2 + * @return a new instance of DataServiceDataset */ - public DataServiceDatasetV2 dataServiceDatasetV2(Operand datasetId, + public DataServiceDataset dataServiceDataset(Operand datasetId, Operand processingMode, Operand address, Operand protocol, Operand jobName, Operand consumerIndex, Operand numConsumers, Operand maxOutstandingRequests, Operand iterationCounter, List> outputTypes, List outputShapes, - DataServiceDatasetV2.Options... options) { - return DataServiceDatasetV2.create(scope, datasetId, processingMode, address, protocol, jobName, consumerIndex, numConsumers, maxOutstandingRequests, iterationCounter, outputTypes, outputShapes, options); + ConcreteFunction uncompressFn, DataServiceDataset.Options... options) { + return DataServiceDataset.create(scope, datasetId, processingMode, address, protocol, jobName, consumerIndex, numConsumers, maxOutstandingRequests, iterationCounter, outputTypes, outputShapes, uncompressFn, options); } /** @@ -1594,12 +1595,14 @@ public SleepDataset sleepDataset(Operand inputDataset, * It must be positive. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of SlidingWindowDataset */ public SlidingWindowDataset slidingWindowDataset(Operand inputDataset, Operand windowSize, Operand windowShift, Operand windowStride, - List> outputTypes, List outputShapes) { - return SlidingWindowDataset.create(scope, inputDataset, windowSize, windowShift, windowStride, outputTypes, outputShapes); + List> outputTypes, List outputShapes, + SlidingWindowDataset.Options... options) { + return SlidingWindowDataset.create(scope, inputDataset, windowSize, windowShift, windowStride, outputTypes, outputShapes, options); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java index b31ae7663b0..dc237880e1f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/IoOps.java @@ -90,8 +90,9 @@ public final class IoOps { /** * Decode web-safe base64-encoded strings. - * Input may or may not have padding at the end. See EncodeBase64 for padding. - * Web-safe means that input must use - and _ instead of + and /. + * Input may or may not have padding at the end. See + * EncodeBase64 + * for padding. Web-safe means that input must use - and _ instead of + and /. * * @param input Base64 strings to decode. * @return a new instance of DecodeBase64 @@ -242,8 +243,8 @@ public DeserializeManySparse deserializeManySparse( /** * Encode strings into web-safe base64 format. - * Refer to the following article for more information on base64 format: - * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + * Refer to this article for more information on + * base64 format. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. *

Web-safe means that the encoder uses - and _ instead of + and /. @@ -992,8 +993,8 @@ public WholeFileReader wholeFileReader(WholeFileReader.Options... options) { } /** - * Writes contents to the file at input filename. Creates file and recursively - * creates directory if not existing. + * Writes {@code contents} to the file at input {@code filename}. + * Creates the file and recursively creates directory if it does not exist. * * @param filename scalar. The name of the file to which we write the contents. * @param contents scalar. The content to be written to the output file. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java index d0e1f2ca7f7..2575d62c8d8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/MathOps.java @@ -1866,7 +1866,7 @@ public SegmentProd segmentProd(Operand data, *

For example: *

    *  c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
-   *  tf.segment_sum(c, tf.constant([0, 0, 1]))
+   *  tf.math.segment_sum(c, tf.constant([0, 0, 1]))
    *  # ==> [[5, 5, 5, 5],
    *  #      [5, 6, 7, 8]]
    *  
@@ -2056,8 +2056,8 @@ public Tan tan(Operand x) { *

x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) * tf.math.tanh(x) * <tf.Tensor: shape=(8,), dtype=float32, numpy= - * array([-1. , -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , - * 0.9640276 , 0.9950547 , 1. ], dtype=float32)> + * array([-1.0, -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , + * 0.9640276 , 0.9950547 , 1.0], dtype=float32)> * * * @@ -2111,8 +2111,7 @@ public TruncateMod truncateMod(Operand x, Operand y * Read * the section on segmentation * for an explanation of segments. - *

This operator is similar to the unsorted segment sum operator found - * (here) . + *

This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the maximum such that: *

\(output_i = \max_{j...} data[j...]\) where max is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. @@ -2121,20 +2120,33 @@ public TruncateMod truncateMod(Operand x, Operand y * {@code output[i] = numeric_limits::lowest()}. *

If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + *

Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. *

* *
*

For example: - *

-   *  c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
-   *  tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
-   *  # ==> [[ 4,  3, 3, 4],
-   *  #       [5,  6, 7, 8]]
-   *  
+ *
+ *
+ *
+ *

c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[4, 3, 3, 4], + * [5, 6, 7, 8]], dtype=int32) + *

+ *
+ *
* * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentMax} output and operands * @return a new instance of UnsortedSegmentMax @@ -2149,8 +2161,7 @@ public UnsortedSegmentMax unsortedSegmentMax(Operand d * Read * the section on segmentation * for an explanation of segments. - *

This operator is similar to the unsorted segment sum operator found - * (here) . + *

This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the minimum such that: *

\(output_i = \min_{j...} data_[j...]\) where min is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. @@ -2158,18 +2169,31 @@ public UnsortedSegmentMax unsortedSegmentMax(Operand d * possible value for the specific numeric type, * {@code output[i] = numeric_limits::max()}. *

For example: - *

-   *  c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
-   *  tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
-   *  # ==> [[ 1,  2, 2, 1],
-   *  #       [5,  6, 7, 8]]
-   *  
+ *
+ *
+ *
+ *

c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[1, 2, 2, 1], + * [5, 6, 7, 8]], dtype=int32) + *

+ *
+ *
*

If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + *

Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. * * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentMin} output and operands * @return a new instance of UnsortedSegmentMin @@ -2184,26 +2208,38 @@ public UnsortedSegmentMin unsortedSegmentMin(Operand d * Read * the section on segmentation * for an explanation of segments. - *

This operator is similar to the unsorted segment sum operator found - * (here) . + *

This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: *

\(output_i = \prod_{j...} data[j...]\) where the product is over tuples * {@code j...} such that {@code segment_ids[j...] == i}. *

For example: - *

-   *  c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
-   *  tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
-   *  # ==> [[ 4,  6, 6, 4],
-   *  #       [5,  6, 7, 8]]
-   *  
+ *
+ *
+ *
+ *

c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[4, 6, 6, 4], + * [5, 6, 7, 8]], dtype=int32) + *

+ *
+ *
*

If there is no entry for a given segment ID {@code i}, it outputs 1. *

If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + * Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. * * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentProd} output and operands * @return a new instance of UnsortedSegmentProd @@ -2227,19 +2263,32 @@ public UnsortedSegmentProd unsortedSegmentProd(Operand d * If the given segment ID {@code i} is negative, the value is dropped and will not be * added to the sum of the segment. *

{@code num_segments} should equal the number of distinct segment IDs. + *

Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. *

* *
- *
-   *  c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
-   *  tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
-   *  # ==> [[ 5, 5, 5, 5],
-   *  #       [5, 6, 7, 8]]
-   *  
+ *
+ *
+ *
+ *

c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] + * tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() + * array([[5, 5, 5, 5], + * [5, 6, 7, 8]], dtype=int32) + *

+ *
+ *
* * @param data type for {@code output} output * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentSum} output and operands * @return a new instance of UnsortedSegmentSum diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 223754b0480..c5d4de34329 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -367,20 +367,20 @@ public final class Ops { public final SparseOps sparse; - public final BitwiseOps bitwise; - public final TpuOps tpu; - public final AudioOps audio; + public final BitwiseOps bitwise; public final MathOps math; - public final SignalOps signal; + public final AudioOps audio; - public final TrainOps train; + public final SignalOps signal; public final QuantizationOps quantization; + public final TrainOps train; + private final Scope scope; Ops(Scope scope) { @@ -398,13 +398,13 @@ public final class Ops { random = new RandomOps(this); strings = new StringsOps(this); sparse = new SparseOps(this); - bitwise = new BitwiseOps(this); tpu = new TpuOps(this); - audio = new AudioOps(this); + bitwise = new BitwiseOps(this); math = new MathOps(this); + audio = new AudioOps(this); signal = new SignalOps(this); - train = new TrainOps(this); quantization = new QuantizationOps(this); + train = new TrainOps(this); } /** @@ -637,11 +637,12 @@ public AssignSubVariableOp assignSubVariableOp(Operand resource * * @param resource handle to the resource in which to store the variable. * @param value the value to set the new tensor to use. + * @param options carries optional attribute values * @return a new instance of AssignVariableOp */ public AssignVariableOp assignVariableOp(Operand resource, - Operand value) { - return AssignVariableOp.create(scope, resource, value); + Operand value, AssignVariableOp.Options... options) { + return AssignVariableOp.create(scope, resource, value, options); } /** @@ -2066,7 +2067,10 @@ public CountUpTo countUpTo(Operand ref, Long limit) { /** * The op extracts fields from a serialized protocol buffers message into tensors. - * The {@code decode_proto} op extracts fields from a serialized protocol buffers + * Note: This API is designed for orthogonality rather than human-friendliness. It + * can be used to parse input protos by hand, but it is intended for use in + * generated code. + *

The {@code decode_proto} op extracts fields from a serialized protocol buffers * message into tensors. The fields in {@code field_names} are decoded and converted * to the corresponding {@code output_types} if possible. *

A {@code message_type} name must be provided to give context for the field names. @@ -2096,6 +2100,16 @@ public CountUpTo countUpTo(Operand ref, Long limit) { * {@code DT_INT64}, or using twos-complement if the caller specifies {@code DT_INT32} in * the {@code output_types} attribute. * + *

  • + *

    {@code map} fields are not directly decoded. They are treated as {@code repeated} fields, + * of the appropriate entry type. The proto-compiler defines entry types for each + * map field. The type-name is the field name, converted to "CamelCase" with + * "Entry" appended. The {@code tf.train.Features.FeatureEntry} message is an example of + * one of these implicit {@code Entry} types. + *

  • + *
  • + *

    {@code enum} fields should be read as int32. + *

  • * *

    Both binary and text proto serializations are supported, and can be * chosen using the {@code format} attribute. @@ -7238,7 +7252,21 @@ public TensorScatterNdAdd tensorScatterNdAdd(Operand ten } /** - * The TensorScatterMax operation + * Apply a sparse update to a tensor taking the element-wise maximum. + * Returns a new tensor copied from {@code tensor} whose values are element-wise maximum between + * tensor and updates according to the indices. + *

    + *
    + *
    + *

    tensor = [0, 0, 0, 0, 0, 0, 0, 0] + * indices = [[1], [4], [5]] + * updates = [1, -1, 1] + * tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() + * array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32) + *

    + *
    + *
    + *

    Refer to {@code tf.tensor_scatter_nd_update} for more details. * * @param data type for {@code output} output * @param tensor Tensor to update. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java index fb1c5e66b71..cdd9ee0ac44 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/XlaOps.java @@ -86,12 +86,17 @@ public final class XlaOps { * @param input Array or a non-empty tuple of arrays to reduce across replicas. * @param groupAssignment Groups between which the reductions are performed. * @param reduceOp Reduction computation. + * @param mode group mode. + * CrossReplica: group_assignment contains replica_id. Each group contains the + * replicas for the current partition. + * CrossReplicaAndPartition: group_assignment contains replica_id. Each group + * contains the replicas for all partitions. * @param data type for {@code XlaAllReduce} output and operands * @return a new instance of AllReduce */ public AllReduce allReduce(Operand input, - Operand groupAssignment, String reduceOp) { - return AllReduce.create(scope, input, groupAssignment, reduceOp); + Operand groupAssignment, String reduceOp, String mode) { + return AllReduce.create(scope, input, groupAssignment, reduceOp, mode); } /** @@ -130,16 +135,17 @@ public ClusterOutput clusterOutput(Operand input) { * . * * @param data type for {@code output} output - * @param lhs the input tensor - * @param rhs the kernel tensor - * @param windowStrides the inter-window strides - * @param padding the padding to apply at the start and end of each input dimensions + * @param lhs input tensor + * @param rhs kernel tensor + * @param windowStrides inter-window strides + * @param padding padding to apply at the start and end of each input dimensions * @param lhsDilation dilation to apply between input elements * @param rhsDilation dilation to apply between kernel elements * @param featureGroupCount number of feature groups for grouped convolution. - * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. + * @param dimensionNumbers serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig serialized xla::PrecisionConfig proto. + * @param preferredElementType type of the tensor. + * @param options carries optional attribute values * @param data type for {@code XlaConvV2} output and operands * @param data type for {@code XlaConvV2} output and operands * @return a new instance of Conv @@ -147,8 +153,9 @@ public ClusterOutput clusterOutput(Operand input) { public Conv conv(Operand lhs, Operand rhs, Operand windowStrides, Operand padding, Operand lhsDilation, Operand rhsDilation, Operand featureGroupCount, - String dimensionNumbers, String precisionConfig, Class preferredElementType) { - return Conv.create(scope, lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, featureGroupCount, dimensionNumbers, precisionConfig, preferredElementType); + String dimensionNumbers, String precisionConfig, Class preferredElementType, + Conv.Options... options) { + return Conv.create(scope, lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, featureGroupCount, dimensionNumbers, precisionConfig, preferredElementType, options); } /** diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Compute_func_Pointer_TF_OpKernelContext.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Compute_func_Pointer_TF_OpKernelContext.java index 0bb944eed82..9bd5bc022b8 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Compute_func_Pointer_TF_OpKernelContext.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Compute_func_Pointer_TF_OpKernelContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Create_func_TF_OpKernelConstruction.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Create_func_TF_OpKernelConstruction.java index 01c6b8b9cd8..f06b2a1814c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Create_func_TF_OpKernelConstruction.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Create_func_TF_OpKernelConstruction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java index 245b3e2018a..c3759e7b4b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Deallocator_Pointer_long_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Delete_func_Pointer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Delete_func_Pointer.java index 2e3b079ae0a..78502fe2469 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Delete_func_Pointer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Delete_func_Pointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java index df5ceb98746..6235d14f0d2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradFunc.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java index 7a1d69bafbd..f170c382b0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/GradOpRegistry.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java index b01b2c229ea..11d41db1b5d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_BytePointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java index 2ea782928ac..07d245d0b5c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Listener_String.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java index 0be7fab2798..5657520e96f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NameMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java index b947a4b322f..eb94a898ee0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeGraphPointer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java index af771a0aa12..9883b1acbde 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOperation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java index cd3020eef5d..c513954ce41 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutput.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java index 9dc9dd5b971..d9d8f654e0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeOutputVector.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java index 193d3f86312..9307a1114a6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NativeStatus.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java index 0bb4543d41c..19fbbdf86b4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Node.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; @@ -153,4 +153,11 @@ public class Node extends Pointer { // Called after an attr has changed. Decides whether we need to update some // property of the node (stored in props_). public native void UpdateProperties(); + + // Erases type information from the node. + public native void ClearTypeInfo(); + + // Called after an incident non-control edge has changed. Does nothing if not + // all input edges are defined. + public native void RunForwardTypeInference(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java index 5922ff38b0a..9aea3e1c83d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/NodeBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Shape_inference_func_TF_ShapeInferenceContext_TF_Status.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Shape_inference_func_TF_ShapeInferenceContext_TF_Status.java index b2168d8a48c..dd0e832881a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Shape_inference_func_TF_ShapeInferenceContext_TF_Status.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Shape_inference_func_TF_ShapeInferenceContext_TF_Status.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java index f3309ed7530..548d576b560 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Context.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java index 93223d33e0c..fb64dbcb942 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_ContextOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java index 9c68d0d9920..31d2f11ad90 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_Op.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java index 30398899f83..fafd4fb4da6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_OpAttrs.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java index fcdf2858a91..81da1e06f55 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorDebugInfo.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java index 7fa3f7ec6cb..c0dfdde664e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TFE_TensorHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; @@ -13,7 +13,7 @@ // // Like a TF_Tensor, a TFE_TensorHandle refers to a tensor with a value, shape, // type etc. Unlike a TF_Tensor, a TFE_TensorHandle may refer to such tensors -// placed in memory of different devices or remote address spaces. +// placed in the memory of different devices or remote address spaces. @Opaque @Properties(inherit = org.tensorflow.internal.c_api.presets.tensorflow.class) public class TFE_TensorHandle extends org.tensorflow.internal.c_api.AbstractTFE_TensorHandle { /** Empty constructor. Calls {@code super((Pointer)null)}. */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java index 4f622e33714..357b2f9de65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AllocatorAttributes.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java index 8da3d79e7f7..ef08c2003d5 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ApiDefMap.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java index 0b83058a276..c6945d8906d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_AttrMetadata.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java index a8653a2049f..e3c0b9b5625 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Buffer.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java index c2923086177..859803d0e5a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeprecatedSession.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java index bc86ab8a823..de72b3c9503 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DeviceList.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DimensionHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DimensionHandle.java index e4bb017db8c..6743400fd7c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DimensionHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_DimensionHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java index 29d075cafc1..390a8bc77fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Function.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java index fc12f275678..ff6cd4ad17c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_FunctionOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java index c4d88baf176..b6c60720b51 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Graph.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java index 69ccb1f83ff..4c4ab67c75c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java index 269a1b07f17..19eb011cc91 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ImportGraphDefResults.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java index 9e73e8dbf78..76622aeda6f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Input.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_KernelBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_KernelBuilder.java index e36b7b206bf..cb9b79815c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_KernelBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_KernelBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java index 6be7fcbec8d..d869e510d84 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Library.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpDefinitionBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpDefinitionBuilder.java index 9949307e8e9..5b5aa507e04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpDefinitionBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpDefinitionBuilder.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelConstruction.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelConstruction.java index 0ee6afaae99..3c4bbd5ff8f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelConstruction.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelConstruction.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelContext.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelContext.java index 24c0373404d..a62de209c65 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelContext.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OpKernelContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java index 96e5ef47b38..17b40eb0b9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Operation.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java index 71738f4ac02..6531a3a5f82 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_OperationDescription.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java index bd36144620d..0498c9572fd 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Output.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java index bea7ec98a25..77a594c3848 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Scope.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java index aceb639f7af..c4abf9cd874 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Server.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java index a034dd7f647..1fbe6a0c56b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Session.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java index 749655f6209..009dda458b6 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_SessionOptions.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeHandle.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeHandle.java index 2721dcac4b6..5e1386a1e0d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeHandle.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeHandle.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeInferenceContext.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeInferenceContext.java index 15f0b1dc8b7..627bca2fd93 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeInferenceContext.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_ShapeInferenceContext.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java index 7a70dc56e95..684335ca83b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Status.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java index 85ac4ee877f..9d3768b8264 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_StringView.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java index 7b4c3ad59a4..3f4d1531e26 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java index e9031c07e18..1e1beba05f4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Large.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java index 19f06db9224..016b058e2ff 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Offset.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java index c9dca734aa0..e67cb474c79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Raw.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java index 8efeeb5176d..1be7d9f6851 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Small.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java index d7ba9e1baa4..643add57748 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_Union.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java index b6c3278403d..c5a8cc47398 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_TString_View.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java index 9966d1cbfe3..6e462cb1d9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_Tensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java index 1777d9a4d35..80dfce4c69d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/TF_WhileParams.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Tensor.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Tensor.java index 3a4952d5b63..f1efc2b1d4d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Tensor.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/Tensor.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java index 0e153289dff..06a5b588d0f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/internal/c_api/global/tensorflow.java @@ -1,4 +1,4 @@ -// Targeted by JavaCPP version 1.5.6: DO NOT EDIT THIS FILE +// Targeted by JavaCPP version 1.5.7: DO NOT EDIT THIS FILE package org.tensorflow.internal.c_api.global; @@ -3509,6 +3509,23 @@ public static native void TF_OpKernelConstruction_GetAttrString( TF_OpKernelConstruction ctx, String attr_name, @Cast("char*") byte[] val, @Cast("size_t") long max_length, TF_Status status); +// Interprets the named kernel construction attribute as tensor and places it +// into *val. Allocates a new TF_Tensor which the caller is expected to take +// ownership of (and can deallocate using TF_DeleteTensor). *status is set to +// TF_OK. +// +// If the attribute could not be found or could not be interpreted as +// tensor, *status is populated with an error. +public static native void TF_OpKernelConstruction_GetAttrTensor( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("TF_Tensor**") PointerPointer val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTensor( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Tensor val, + TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTensor( + TF_OpKernelConstruction ctx, String attr_name, @ByPtrPtr TF_Tensor val, + TF_Status status); + // Interprets the named kernel construction attribute as a TF_DataType array and // places it into *vals. *status is set to TF_OK. // `vals` must point to an array of length at least `max_values` (ideally set @@ -3671,6 +3688,24 @@ public static native void TF_OpKernelConstruction_GetAttrStringList( @Cast("size_t*") SizeTPointer lengths, int max_values, Pointer storage, @Cast("size_t") long storage_size, TF_Status status); +// Interprets the named kernel construction attribute as tensor array and places +// it into *vals. *status is set to TF_OK. +// `vals` must point to an array of length at least `max_values` +// (ideally set to list_size from TF_OpKernelConstruction_GetAttrSize(ctx, +// attr_name, list_size, total_size)). +// +// The caller takes ownership of all the non-null TF_Tensor* entries in `vals` +// (which can be deleted using TF_DeleteTensor(vals[i])). +public static native void TF_OpKernelConstruction_GetAttrTensorList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @Cast("TF_Tensor**") PointerPointer vals, + int max_values, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTensorList( + TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, @ByPtrPtr TF_Tensor vals, + int max_values, TF_Status status); +public static native void TF_OpKernelConstruction_GetAttrTensorList( + TF_OpKernelConstruction ctx, String attr_name, @ByPtrPtr TF_Tensor vals, + int max_values, TF_Status status); + // Return true if the kernel construction has the attr_name public static native @Cast("bool") boolean TF_OpKernelConstruction_HasAttr( TF_OpKernelConstruction ctx, @Cast("const char*") BytePointer attr_name, TF_Status status); @@ -4286,7 +4321,7 @@ public static native void TFE_ContextSetThreadLocalDevicePlacementPolicy( public static native @Cast("TFE_ContextDevicePlacementPolicy") int TFE_ContextGetDevicePlacementPolicy(TFE_Context ctx); // A tensorflow.ServerDef specifies remote workers (in addition to the current -// workers name). Operations created on this context can then be executed on +// workers name). Operations created in this context can then be executed on // any of these remote workers by setting an appropriate device. // // If the following is set, all servers identified by the @@ -4816,7 +4851,7 @@ public static native void TFE_ContextExportRunMetadata(TFE_Context ctx, // Ends a step. When there is no active step (that is, every started step has // been ended) step containers will be cleared. Note: it is not safe to call -// TFE_ContextEndStep while ops which rely on the step container may be running. +// TFE_ContextEndStep while ops that rely on the step container may be running. public static native void TFE_ContextEndStep(TFE_Context ctx); // #ifdef __cplusplus @@ -5042,6 +5077,8 @@ public static native void TFE_OpSetAttrValueProto(@Const TFE_Op op, @Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelperOutOfLine( @Const @ByRef NativeStatus v, String msg); +@Namespace("tensorflow") public static native @StdString BytePointer error_name(@Cast("tensorflow::error::Code") int code); + @Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelper(@ByVal NativeStatus v, @Cast("const char*") BytePointer msg); @Namespace("tensorflow") public static native @StdString BytePointer TfCheckOpHelper(@ByVal NativeStatus v, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java new file mode 100644 index 00000000000..2625d9e0395 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableDenseHashTable.java @@ -0,0 +1,259 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Creates an empty anonymous mutable hash table that uses tensors as the backing store. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a scalar. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + *

    It uses "open addressing" with quadratic reprobing to resolve + * collisions. + *

    The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + */ +@OpMetadata( + opType = AnonymousMutableDenseHashTable.OP_NAME, + inputsClass = AnonymousMutableDenseHashTable.Inputs.class +) +public final class AnonymousMutableDenseHashTable extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "AnonymousMutableDenseHashTable"; + + private Output tableHandle; + + @SuppressWarnings("unchecked") + public AnonymousMutableDenseHashTable(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + tableHandle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new AnonymousMutableDenseHashTable operation. + * + * @param scope current scope + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey The deletedKey value + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for {@code AnonymousMutableDenseHashTable} output and operands + * @param data type for {@code AnonymousMutableDenseHashTable} output and operands + * @return a new instance of AnonymousMutableDenseHashTable + */ + @Endpoint( + describeByClass = true + ) + public static AnonymousMutableDenseHashTable create( + Scope scope, Operand emptyKey, Operand deletedKey, Class valueDtype, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AnonymousMutableDenseHashTable"); + opBuilder.addInput(emptyKey.asOutput()); + opBuilder.addInput(deletedKey.asOutput()); + opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); + if (options != null) { + for (Options opts : options) { + if (opts.valueShape != null) { + opBuilder.setAttr("value_shape", opts.valueShape); + } + if (opts.initialNumBuckets != null) { + opBuilder.setAttr("initial_num_buckets", opts.initialNumBuckets); + } + if (opts.maxLoadFactor != null) { + opBuilder.setAttr("max_load_factor", opts.maxLoadFactor); + } + } + } + return new AnonymousMutableDenseHashTable(opBuilder.build()); + } + + /** + * Sets the valueShape option. + * + * @param valueShape The shape of each value. + * @return this Options instance. + */ + public static Options valueShape(Shape valueShape) { + return new Options().valueShape(valueShape); + } + + /** + * Sets the initialNumBuckets option. + * + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @return this Options instance. + */ + public static Options initialNumBuckets(Long initialNumBuckets) { + return new Options().initialNumBuckets(initialNumBuckets); + } + + /** + * Sets the maxLoadFactor option. + * + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. + */ + public static Options maxLoadFactor(Float maxLoadFactor) { + return new Options().maxLoadFactor(maxLoadFactor); + } + + /** + * Gets tableHandle. + * The resource handle to the newly created hash-table resource. + * @return tableHandle. + */ + public Output tableHandle() { + return tableHandle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) tableHandle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.core.AnonymousMutableDenseHashTable} + */ + public static class Options { + private Shape valueShape; + + private Long initialNumBuckets; + + private Float maxLoadFactor; + + private Options() { + } + + /** + * Sets the valueShape option. + * + * @param valueShape The shape of each value. + * @return this Options instance. + */ + public Options valueShape(Shape valueShape) { + this.valueShape = valueShape; + return this; + } + + /** + * Sets the initialNumBuckets option. + * + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @return this Options instance. + */ + public Options initialNumBuckets(Long initialNumBuckets) { + this.initialNumBuckets = initialNumBuckets; + return this; + } + + /** + * Sets the maxLoadFactor option. + * + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. + */ + public Options maxLoadFactor(Float maxLoadFactor) { + this.maxLoadFactor = maxLoadFactor; + return this; + } + } + + @OpInputsMetadata( + outputsClass = AnonymousMutableDenseHashTable.class + ) + public static class Inputs extends RawOpInputs { + /** + * The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + */ + public final Operand emptyKey; + + /** + * The deletedKey input + */ + public final Operand deletedKey; + + /** + * Type of the table keys. + */ + public final DataType keyDtype; + + /** + * Type of the table values. + */ + public final DataType valueDtype; + + /** + * The shape of each value. + */ + public final Shape valueShape; + + /** + * The initial number of hash table buckets. Must be a power + * to 2. + */ + public final long initialNumBuckets; + + /** + * The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + */ + public final float maxLoadFactor; + + public Inputs(GraphOperation op) { + super(new AnonymousMutableDenseHashTable(op), op, Arrays.asList("key_dtype", "value_dtype", "value_shape", "initial_num_buckets", "max_load_factor")); + int inputIndex = 0; + emptyKey = (Operand) op.input(inputIndex++); + deletedKey = (Operand) op.input(inputIndex++); + keyDtype = op.attributes().getAttrType("key_dtype"); + valueDtype = op.attributes().getAttrType("value_dtype"); + valueShape = op.attributes().getAttrShape("value_shape"); + initialNumBuckets = op.attributes().getAttrInt("initial_num_buckets"); + maxLoadFactor = op.attributes().getAttrFloat("max_load_factor"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java new file mode 100644 index 00000000000..6d9688fcbea --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTable.java @@ -0,0 +1,124 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Creates an empty anonymous mutable hash table. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a scalar. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + */ +@OpMetadata( + opType = AnonymousMutableHashTable.OP_NAME, + inputsClass = AnonymousMutableHashTable.Inputs.class +) +public final class AnonymousMutableHashTable extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "AnonymousMutableHashTable"; + + private Output tableHandle; + + @SuppressWarnings("unchecked") + public AnonymousMutableHashTable(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + tableHandle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new AnonymousMutableHashTable operation. + * + * @param scope current scope + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param data type for {@code AnonymousMutableHashTable} output and operands + * @param data type for {@code AnonymousMutableHashTable} output and operands + * @return a new instance of AnonymousMutableHashTable + */ + @Endpoint( + describeByClass = true + ) + public static AnonymousMutableHashTable create(Scope scope, + Class keyDtype, Class valueDtype) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AnonymousMutableHashTable"); + opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); + opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); + return new AnonymousMutableHashTable(opBuilder.build()); + } + + /** + * Gets tableHandle. + * The resource handle to the newly created hash-table resource. + * @return tableHandle. + */ + public Output tableHandle() { + return tableHandle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) tableHandle; + } + + @OpInputsMetadata( + outputsClass = AnonymousMutableHashTable.class + ) + public static class Inputs extends RawOpInputs { + /** + * Type of the table keys. + */ + public final DataType keyDtype; + + /** + * Type of the table values. + */ + public final DataType valueDtype; + + public Inputs(GraphOperation op) { + super(new AnonymousMutableHashTable(op), op, Arrays.asList("key_dtype", "value_dtype")); + int inputIndex = 0; + keyDtype = op.attributes().getAttrType("key_dtype"); + valueDtype = op.attributes().getAttrType("value_dtype"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java new file mode 100644 index 00000000000..f2ae78fbcc1 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AnonymousMutableHashTableOfTensors.java @@ -0,0 +1,170 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.Output; +import org.tensorflow.ndarray.Shape; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.family.TType; + +/** + * Creates an empty anonymous mutable hash table of vector values. + * This op creates a new anonymous mutable hash table (as a resource) everytime + * it is executed, with the specified dtype of its keys and values, + * returning the resource handle. Each value must be a vector. + * Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * The table is anonymous in the sense that it can only be + * accessed by the returned resource handle (e.g. it cannot be looked up + * by a name in a resource manager). The table will be automatically + * deleted when all resource handles pointing to it are gone. + */ +@OpMetadata( + opType = AnonymousMutableHashTableOfTensors.OP_NAME, + inputsClass = AnonymousMutableHashTableOfTensors.Inputs.class +) +public final class AnonymousMutableHashTableOfTensors extends RawOp implements Operand { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "AnonymousMutableHashTableOfTensors"; + + private Output tableHandle; + + @SuppressWarnings("unchecked") + public AnonymousMutableHashTableOfTensors(Operation operation) { + super(operation, OP_NAME); + int outputIdx = 0; + tableHandle = operation.output(outputIdx++); + } + + /** + * Factory method to create a class wrapping a new AnonymousMutableHashTableOfTensors operation. + * + * @param scope current scope + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attribute values + * @param data type for {@code AnonymousMutableHashTableOfTensors} output and operands + * @param data type for {@code AnonymousMutableHashTableOfTensors} output and operands + * @return a new instance of AnonymousMutableHashTableOfTensors + */ + @Endpoint( + describeByClass = true + ) + public static AnonymousMutableHashTableOfTensors create( + Scope scope, Class keyDtype, Class valueDtype, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AnonymousMutableHashTableOfTensors"); + opBuilder.setAttr("key_dtype", Operands.toDataType(keyDtype)); + opBuilder.setAttr("value_dtype", Operands.toDataType(valueDtype)); + if (options != null) { + for (Options opts : options) { + if (opts.valueShape != null) { + opBuilder.setAttr("value_shape", opts.valueShape); + } + } + } + return new AnonymousMutableHashTableOfTensors(opBuilder.build()); + } + + /** + * Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. + */ + public static Options valueShape(Shape valueShape) { + return new Options().valueShape(valueShape); + } + + /** + * Gets tableHandle. + * The resource handle to the newly created hash-table resource. + * @return tableHandle. + */ + public Output tableHandle() { + return tableHandle; + } + + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) tableHandle; + } + + /** + * Optional attributes for {@link org.tensorflow.op.core.AnonymousMutableHashTableOfTensors} + */ + public static class Options { + private Shape valueShape; + + private Options() { + } + + /** + * Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. + */ + public Options valueShape(Shape valueShape) { + this.valueShape = valueShape; + return this; + } + } + + @OpInputsMetadata( + outputsClass = AnonymousMutableHashTableOfTensors.class + ) + public static class Inputs extends RawOpInputs { + /** + * Type of the table keys. + */ + public final DataType keyDtype; + + /** + * Type of the table values. + */ + public final DataType valueDtype; + + /** + * The valueShape attribute + */ + public final Shape valueShape; + + public Inputs(GraphOperation op) { + super(new AnonymousMutableHashTableOfTensors(op), op, Arrays.asList("key_dtype", "value_dtype", "value_shape")); + int inputIndex = 0; + keyDtype = op.attributes().getAttrType("key_dtype"); + valueDtype = op.attributes().getAttrType("value_dtype"); + valueShape = op.attributes().getAttrShape("value_shape"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java index 8a158a0f014..f37034355af 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/AssignVariableOp.java @@ -58,19 +58,58 @@ public AssignVariableOp(Operation operation) { * @param scope current scope * @param resource handle to the resource in which to store the variable. * @param value the value to set the new tensor to use. + * @param options carries optional attribute values * @return a new instance of AssignVariableOp */ @Endpoint( describeByClass = true ) public static AssignVariableOp create(Scope scope, Operand resource, - Operand value) { + Operand value, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AssignVariableOp"); opBuilder.addInput(resource.asOutput()); opBuilder.addInput(value.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.validateShape != null) { + opBuilder.setAttr("validate_shape", opts.validateShape); + } + } + } return new AssignVariableOp(opBuilder.build()); } + /** + * Sets the validateShape option. + * + * @param validateShape the validateShape option + * @return this Options instance. + */ + public static Options validateShape(Boolean validateShape) { + return new Options().validateShape(validateShape); + } + + /** + * Optional attributes for {@link org.tensorflow.op.core.AssignVariableOp} + */ + public static class Options { + private Boolean validateShape; + + private Options() { + } + + /** + * Sets the validateShape option. + * + * @param validateShape the validateShape option + * @return this Options instance. + */ + public Options validateShape(Boolean validateShape) { + this.validateShape = validateShape; + return this; + } + } + @OpInputsMetadata( outputsClass = AssignVariableOp.class ) @@ -90,12 +129,18 @@ public static class Inputs extends RawOpInputs { */ public final DataType dtype; + /** + * The validateShape attribute + */ + public final boolean validateShape; + public Inputs(GraphOperation op) { - super(new AssignVariableOp(op), op, Arrays.asList("dtype")); + super(new AssignVariableOp(op), op, Arrays.asList("dtype", "validate_shape")); int inputIndex = 0; resource = (Operand) op.input(inputIndex++); value = (Operand) op.input(inputIndex++); dtype = op.attributes().getAttrType("dtype"); + validateShape = op.attributes().getAttrBool("validate_shape"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java index 7c1818a97eb..a871c46e2a0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/DecodeProto.java @@ -39,7 +39,10 @@ /** * The op extracts fields from a serialized protocol buffers message into tensors. - * The {@code decode_proto} op extracts fields from a serialized protocol buffers + * Note: This API is designed for orthogonality rather than human-friendliness. It + * can be used to parse input protos by hand, but it is intended for use in + * generated code. + *

    The {@code decode_proto} op extracts fields from a serialized protocol buffers * message into tensors. The fields in {@code field_names} are decoded and converted * to the corresponding {@code output_types} if possible. *

    A {@code message_type} name must be provided to give context for the field names. @@ -69,6 +72,16 @@ * {@code DT_INT64}, or using twos-complement if the caller specifies {@code DT_INT32} in * the {@code output_types} attribute. * + *

  • + *

    {@code map} fields are not directly decoded. They are treated as {@code repeated} fields, + * of the appropriate entry type. The proto-compiler defines entry types for each + * map field. The type-name is the field name, converted to "CamelCase" with + * "Entry" appended. The {@code tf.train.Features.FeatureEntry} message is an example of + * one of these implicit {@code Entry} types. + *

  • + *
  • + *

    {@code enum} fields should be read as int32. + *

  • * *

    Both binary and text proto serializations are supported, and can be * chosen using the {@code format} attribute. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java new file mode 100644 index 00000000000..ee73c5ee071 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/FileSystemSetConfiguration.java @@ -0,0 +1,98 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.core; + +import java.util.Arrays; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.types.TString; + +/** + * Set configuration of the file system. + */ +@OpMetadata( + opType = FileSystemSetConfiguration.OP_NAME, + inputsClass = FileSystemSetConfiguration.Inputs.class +) +public final class FileSystemSetConfiguration extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "FileSystemSetConfiguration"; + + public FileSystemSetConfiguration(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new FileSystemSetConfiguration operation. + * + * @param scope current scope + * @param scheme File system scheme. + * @param key The name of the configuration option. + * @param value The value of the configuration option. + * @return a new instance of FileSystemSetConfiguration + */ + @Endpoint( + describeByClass = true + ) + public static FileSystemSetConfiguration create(Scope scope, Operand scheme, + Operand key, Operand value) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "FileSystemSetConfiguration"); + opBuilder.addInput(scheme.asOutput()); + opBuilder.addInput(key.asOutput()); + opBuilder.addInput(value.asOutput()); + return new FileSystemSetConfiguration(opBuilder.build()); + } + + @OpInputsMetadata( + outputsClass = FileSystemSetConfiguration.class + ) + public static class Inputs extends RawOpInputs { + /** + * File system scheme. + */ + public final Operand scheme; + + /** + * The name of the configuration option. + */ + public final Operand key; + + /** + * The value of the configuration option. + */ + public final Operand value; + + public Inputs(GraphOperation op) { + super(new FileSystemSetConfiguration(op), op, Arrays.asList()); + int inputIndex = 0; + scheme = (Operand) op.input(inputIndex++); + key = (Operand) op.input(inputIndex++); + value = (Operand) op.input(inputIndex++); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java index 07de1b38c81..cd6f801f7a9 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/core/TensorScatterNdMax.java @@ -35,7 +35,21 @@ import org.tensorflow.types.family.TType; /** - * The TensorScatterMax operation + * Apply a sparse update to a tensor taking the element-wise maximum. + * Returns a new tensor copied from {@code tensor} whose values are element-wise maximum between + * tensor and updates according to the indices. + *

    + *
    + *
    + *

    tensor = [0, 0, 0, 0, 0, 0, 0, 0] + * indices = [[1], [4], [5]] + * updates = [1, -1, 1] + * tf.tensor_scatter_nd_max(tensor, indices, updates).numpy() + * array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32) + *

    + *
    + *
    + *

    Refer to {@code tf.tensor_scatter_nd_update} for more details. * * @param data type for {@code output} output */ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java index 5ecb7359a44..b5b5afeef67 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/AnonymousIterator.java @@ -20,6 +20,7 @@ import java.util.Arrays; import java.util.List; import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; import org.tensorflow.Operation; import org.tensorflow.OperationBuilder; import org.tensorflow.Output; @@ -45,26 +46,23 @@ @Operator( group = "data" ) -public final class AnonymousIterator extends RawOp { +public final class AnonymousIterator extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "AnonymousIteratorV2"; + public static final String OP_NAME = "AnonymousIteratorV3"; private Output handle; - private Output deleter; - @SuppressWarnings("unchecked") public AnonymousIterator(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); - deleter = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new AnonymousIteratorV2 operation. + * Factory method to create a class wrapping a new AnonymousIteratorV3 operation. * * @param scope current scope * @param outputTypes The value of the outputTypes attribute @@ -98,13 +96,10 @@ public Output handle() { return handle; } - /** - * Gets deleter. - * A variant deleter that should be passed into the op that deletes the iterator. - * @return deleter. - */ - public Output deleter() { - return deleter; + @Override + @SuppressWarnings("unchecked") + public Output asOutput() { + return (Output) handle; } @OpInputsMetadata( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDataset.java similarity index 83% rename from tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java rename to tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDataset.java index 0574b406440..1b4392f97bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDatasetV2.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/DataServiceDataset.java @@ -19,6 +19,7 @@ import java.util.Arrays; import java.util.List; +import org.tensorflow.ConcreteFunction; import org.tensorflow.GraphOperation; import org.tensorflow.Operand; import org.tensorflow.Operation; @@ -42,29 +43,29 @@ * Creates a dataset that reads data from the tf.data service. */ @OpMetadata( - opType = DataServiceDatasetV2.OP_NAME, - inputsClass = DataServiceDatasetV2.Inputs.class + opType = DataServiceDataset.OP_NAME, + inputsClass = DataServiceDataset.Inputs.class ) @Operator( group = "data" ) -public final class DataServiceDatasetV2 extends RawOp implements Operand { +public final class DataServiceDataset extends RawOp implements Operand { /** * The name of this op, as known by TensorFlow core engine */ - public static final String OP_NAME = "DataServiceDatasetV2"; + public static final String OP_NAME = "DataServiceDatasetV3"; private Output handle; @SuppressWarnings("unchecked") - public DataServiceDatasetV2(Operation operation) { + public DataServiceDataset(Operation operation) { super(operation, OP_NAME); int outputIdx = 0; handle = operation.output(outputIdx++); } /** - * Factory method to create a class wrapping a new DataServiceDatasetV2 operation. + * Factory method to create a class wrapping a new DataServiceDatasetV3 operation. * * @param scope current scope * @param datasetId The datasetId value @@ -78,18 +79,20 @@ public DataServiceDatasetV2(Operation operation) { * @param iterationCounter The iterationCounter value * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param uncompressFn The value of the uncompressFn attribute * @param options carries optional attribute values - * @return a new instance of DataServiceDatasetV2 + * @return a new instance of DataServiceDataset */ @Endpoint( describeByClass = true ) - public static DataServiceDatasetV2 create(Scope scope, Operand datasetId, + public static DataServiceDataset create(Scope scope, Operand datasetId, Operand processingMode, Operand address, Operand protocol, Operand jobName, Operand consumerIndex, Operand numConsumers, Operand maxOutstandingRequests, Operand iterationCounter, - List> outputTypes, List outputShapes, Options... options) { - OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DataServiceDatasetV2"); + List> outputTypes, List outputShapes, + ConcreteFunction uncompressFn, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DataServiceDataset"); opBuilder.addInput(datasetId.asOutput()); opBuilder.addInput(processingMode.asOutput()); opBuilder.addInput(address.asOutput()); @@ -105,6 +108,7 @@ public static DataServiceDatasetV2 create(Scope scope, Operand datasetId outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + opBuilder.setAttr("uncompress_fn", uncompressFn); if (options != null) { for (Options opts : options) { if (opts.taskRefreshIntervalHintMs != null) { @@ -116,9 +120,12 @@ public static DataServiceDatasetV2 create(Scope scope, Operand datasetId if (opts.targetWorkers != null) { opBuilder.setAttr("target_workers", opts.targetWorkers); } + if (opts.uncompress != null) { + opBuilder.setAttr("uncompress", opts.uncompress); + } } } - return new DataServiceDatasetV2(opBuilder.build()); + return new DataServiceDataset(opBuilder.build()); } /** @@ -151,6 +158,16 @@ public static Options targetWorkers(String targetWorkers) { return new Options().targetWorkers(targetWorkers); } + /** + * Sets the uncompress option. + * + * @param uncompress the uncompress option + * @return this Options instance. + */ + public static Options uncompress(Boolean uncompress) { + return new Options().uncompress(uncompress); + } + /** * Gets handle. * @@ -167,7 +184,7 @@ public Output asOutput() { } /** - * Optional attributes for {@link org.tensorflow.op.data.DataServiceDatasetV2} + * Optional attributes for {@link org.tensorflow.op.data.DataServiceDataset} */ public static class Options { private Long taskRefreshIntervalHintMs; @@ -176,6 +193,8 @@ public static class Options { private String targetWorkers; + private Boolean uncompress; + private Options() { } @@ -211,12 +230,23 @@ public Options targetWorkers(String targetWorkers) { this.targetWorkers = targetWorkers; return this; } + + /** + * Sets the uncompress option. + * + * @param uncompress the uncompress option + * @return this Options instance. + */ + public Options uncompress(Boolean uncompress) { + this.uncompress = uncompress; + return this; + } } @OpInputsMetadata( - outputsClass = DataServiceDatasetV2.class + outputsClass = DataServiceDataset.class ) - public static class Inputs extends RawOpInputs { + public static class Inputs extends RawOpInputs { /** * The datasetId input */ @@ -287,8 +317,13 @@ public static class Inputs extends RawOpInputs { */ public final String targetWorkers; + /** + * The uncompress attribute + */ + public final boolean uncompress; + public Inputs(GraphOperation op) { - super(new DataServiceDatasetV2(op), op, Arrays.asList("task_refresh_interval_hint_ms", "output_types", "output_shapes", "data_transfer_protocol", "target_workers")); + super(new DataServiceDataset(op), op, Arrays.asList("task_refresh_interval_hint_ms", "output_types", "output_shapes", "data_transfer_protocol", "target_workers", "uncompress")); int inputIndex = 0; datasetId = (Operand) op.input(inputIndex++); processingMode = (Operand) op.input(inputIndex++); @@ -304,6 +339,7 @@ public Inputs(GraphOperation op) { outputShapes = op.attributes().getAttrShapeList("output_shapes"); dataTransferProtocol = op.attributes().getAttrString("data_transfer_protocol"); targetWorkers = op.attributes().getAttrString("target_workers"); + uncompress = op.attributes().getAttrBool("uncompress"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java index 466a00ae942..896daf95539 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/RegisterDataset.java @@ -85,6 +85,9 @@ public static RegisterDataset create(Scope scope, Operand datas if (opts.elementSpec != null) { opBuilder.setAttr("element_spec", opts.elementSpec); } + if (opts.metadata != null) { + opBuilder.setAttr("metadata", opts.metadata); + } } } return new RegisterDataset(opBuilder.build()); @@ -100,6 +103,16 @@ public static Options elementSpec(String elementSpec) { return new Options().elementSpec(elementSpec); } + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public static Options metadata(String metadata) { + return new Options().metadata(metadata); + } + /** * Gets datasetId. * @@ -120,6 +133,8 @@ public Output asOutput() { public static class Options { private String elementSpec; + private String metadata; + private Options() { } @@ -133,6 +148,17 @@ public Options elementSpec(String elementSpec) { this.elementSpec = elementSpec; return this; } + + /** + * Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public Options metadata(String metadata) { + this.metadata = metadata; + return this; + } } @OpInputsMetadata( @@ -164,14 +190,20 @@ public static class Inputs extends RawOpInputs { */ public final String elementSpec; + /** + * The metadata attribute + */ + public final String metadata; + public Inputs(GraphOperation op) { - super(new RegisterDataset(op), op, Arrays.asList("external_state_policy", "element_spec")); + super(new RegisterDataset(op), op, Arrays.asList("external_state_policy", "element_spec", "metadata")); int inputIndex = 0; dataset = (Operand) op.input(inputIndex++); address = (Operand) op.input(inputIndex++); protocol = (Operand) op.input(inputIndex++); externalStatePolicy = op.attributes().getAttrInt("external_state_policy"); elementSpec = op.attributes().getAttrString("element_spec"); + metadata = op.attributes().getAttrString("metadata"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java index 673e100adc6..947bd11e669 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/data/SlidingWindowDataset.java @@ -75,6 +75,7 @@ public SlidingWindowDataset(Operation operation) { * It must be positive. * @param outputTypes The value of the outputTypes attribute * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of SlidingWindowDataset */ @Endpoint( @@ -82,7 +83,7 @@ public SlidingWindowDataset(Operation operation) { ) public static SlidingWindowDataset create(Scope scope, Operand inputDataset, Operand windowSize, Operand windowShift, Operand windowStride, - List> outputTypes, List outputShapes) { + List> outputTypes, List outputShapes, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "SlidingWindowDataset"); opBuilder.addInput(inputDataset.asOutput()); opBuilder.addInput(windowSize.asOutput()); @@ -94,9 +95,26 @@ public static SlidingWindowDataset create(Scope scope, Operand outputShapesArray[i] = outputShapes.get(i); } opBuilder.setAttr("output_shapes", outputShapesArray); + if (options != null) { + for (Options opts : options) { + if (opts.dropRemainder != null) { + opBuilder.setAttr("drop_remainder", opts.dropRemainder); + } + } + } return new SlidingWindowDataset(opBuilder.build()); } + /** + * Sets the dropRemainder option. + * + * @param dropRemainder the dropRemainder option + * @return this Options instance. + */ + public static Options dropRemainder(Boolean dropRemainder) { + return new Options().dropRemainder(dropRemainder); + } + /** * Gets handle. * @@ -112,6 +130,27 @@ public Output asOutput() { return (Output) handle; } + /** + * Optional attributes for {@link org.tensorflow.op.data.SlidingWindowDataset} + */ + public static class Options { + private Boolean dropRemainder; + + private Options() { + } + + /** + * Sets the dropRemainder option. + * + * @param dropRemainder the dropRemainder option + * @return this Options instance. + */ + public Options dropRemainder(Boolean dropRemainder) { + this.dropRemainder = dropRemainder; + return this; + } + } + @OpInputsMetadata( outputsClass = SlidingWindowDataset.class ) @@ -139,6 +178,11 @@ public static class Inputs extends RawOpInputs { */ public final Operand windowStride; + /** + * The dropRemainder attribute + */ + public final boolean dropRemainder; + /** * The outputTypes attribute */ @@ -150,12 +194,13 @@ public static class Inputs extends RawOpInputs { public final Shape[] outputShapes; public Inputs(GraphOperation op) { - super(new SlidingWindowDataset(op), op, Arrays.asList("output_types", "output_shapes")); + super(new SlidingWindowDataset(op), op, Arrays.asList("drop_remainder", "output_types", "output_shapes")); int inputIndex = 0; inputDataset = (Operand) op.input(inputIndex++); windowSize = (Operand) op.input(inputIndex++); windowShift = (Operand) op.input(inputIndex++); windowStride = (Operand) op.input(inputIndex++); + dropRemainder = op.attributes().getAttrBool("drop_remainder"); outputTypes = op.attributes().getAttrTypeList("output_types"); outputShapes = op.attributes().getAttrShapeList("output_shapes"); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java index 473246f9290..f99f11dad2a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/DecodeBase64.java @@ -34,8 +34,9 @@ /** * Decode web-safe base64-encoded strings. - * Input may or may not have padding at the end. See EncodeBase64 for padding. - * Web-safe means that input must use - and _ instead of + and /. + * Input may or may not have padding at the end. See + * EncodeBase64 + * for padding. Web-safe means that input must use - and _ instead of + and /. */ @OpMetadata( opType = DecodeBase64.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java index daddfee308a..fddd029f343 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/EncodeBase64.java @@ -34,8 +34,8 @@ /** * Encode strings into web-safe base64 format. - * Refer to the following article for more information on base64 format: - * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + * Refer to this article for more information on + * base64 format. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. *

    Web-safe means that the encoder uses - and _ instead of + and /. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java index a2d86a01565..9bfb57ec11e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/io/WriteFile.java @@ -32,8 +32,8 @@ import org.tensorflow.types.TString; /** - * Writes contents to the file at input filename. Creates file and recursively - * creates directory if not existing. + * Writes {@code contents} to the file at input {@code filename}. + * Creates the file and recursively creates directory if it does not exist. */ @OpMetadata( opType = WriteFile.OP_NAME, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java index bfdd2182153..ba3ca3f0af4 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/SegmentSum.java @@ -49,7 +49,7 @@ *

    For example: *

      * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
    - * tf.segment_sum(c, tf.constant([0, 0, 1]))
    + * tf.math.segment_sum(c, tf.constant([0, 0, 1]))
      * # ==> [[5, 5, 5, 5],
      * #      [5, 6, 7, 8]]
      * 
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java index c9e2f43ca94..70b3b54957b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/Tanh.java @@ -44,8 +44,8 @@ *

    x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) * tf.math.tanh(x) * <tf.Tensor: shape=(8,), dtype=float32, numpy= - * array([-1. , -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , - * 0.9640276 , 0.9950547 , 1. ], dtype=float32)> + * array([-1.0, -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , + * 0.9640276 , 0.9950547 , 1.0], dtype=float32)> * * * diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java index 785503c0af5..6364bae41aa 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMax.java @@ -38,8 +38,7 @@ * Read * the section on segmentation * for an explanation of segments. - *

    This operator is similar to the unsorted segment sum operator found - * (here) . + *

    This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the maximum such that: *

    \(output_i = \max_{j...} data[j...]\) where max is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. @@ -48,16 +47,26 @@ * {@code output[i] = numeric_limits::lowest()}. *

    If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + *

    Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. *

    * *
    *

    For example: - *

    - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
    - * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
    - * # ==> [[ 4,  3, 3, 4],
    - * #       [5,  6, 7, 8]]
    - * 
    + *
    + *
    + *
    + *

    c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[4, 3, 3, 4], + * [5, 6, 7, 8]], dtype=int32) + *

    + *
    + *
    * * @param data type for {@code output} output */ @@ -88,6 +97,9 @@ public UnsortedSegmentMax(Operation operation) { * @param scope current scope * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

    Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentMax} output and operands * @return a new instance of UnsortedSegmentMax @@ -131,6 +143,9 @@ public static class Inputs extends RawOpInputsCaution: The values are always validated to be in range on CPU, never validated + * on GPU. */ public final Operand segmentIds; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java index 541a0f15b47..48026c68c0b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentMin.java @@ -38,8 +38,7 @@ * Read * the section on segmentation * for an explanation of segments. - *

    This operator is similar to the unsorted segment sum operator found - * (here) . + *

    This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the minimum such that: *

    \(output_i = \min_{j...} data_[j...]\) where min is over tuples {@code j...} such * that {@code segment_ids[j...] == i}. @@ -47,14 +46,24 @@ * possible value for the specific numeric type, * {@code output[i] = numeric_limits::max()}. *

    For example: - *

    - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
    - * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
    - * # ==> [[ 1,  2, 2, 1],
    - * #       [5,  6, 7, 8]]
    - * 
    + *
    + *
    + *
    + *

    c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[1, 2, 2, 1], + * [5, 6, 7, 8]], dtype=int32) + *

    + *
    + *
    *

    If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + *

    Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. * * @param data type for {@code output} output */ @@ -85,6 +94,9 @@ public UnsortedSegmentMin(Operation operation) { * @param scope current scope * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

    Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentMin} output and operands * @return a new instance of UnsortedSegmentMin @@ -128,6 +140,9 @@ public static class Inputs extends RawOpInputsCaution: The values are always validated to be in range on CPU, never validated + * on GPU. */ public final Operand segmentIds; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java index 4d80fde16df..d9827eb6825 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentProd.java @@ -39,22 +39,31 @@ * Read * the section on segmentation * for an explanation of segments. - *

    This operator is similar to the unsorted segment sum operator found - * (here) . + *

    This operator is similar to {@code tf.math.unsorted_segment_sum}, * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: *

    \(output_i = \prod_{j...} data[j...]\) where the product is over tuples * {@code j...} such that {@code segment_ids[j...] == i}. *

    For example: - *

    - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
    - * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
    - * # ==> [[ 4,  6, 6, 4],
    - * #       [5,  6, 7, 8]]
    - * 
    + *
    + *
    + *
    + *

    c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy() + * array([[4, 6, 6, 4], + * [5, 6, 7, 8]], dtype=int32) + *

    + *
    + *
    *

    If there is no entry for a given segment ID {@code i}, it outputs 1. *

    If the given segment ID {@code i} is negative, then the corresponding value is * dropped, and will not be included in the result. + * Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. * * @param data type for {@code output} output */ @@ -85,6 +94,9 @@ public UnsortedSegmentProd(Operation operation) { * @param scope current scope * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

    Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentProd} output and operands * @return a new instance of UnsortedSegmentProd @@ -128,6 +140,9 @@ public static class Inputs extends RawOpInputsCaution: The values are always validated to be in range on CPU, never validated + * on GPU. */ public final Operand segmentIds; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java index 8c271e284e9..0dc30ebf9bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/math/UnsortedSegmentSum.java @@ -48,15 +48,25 @@ * If the given segment ID {@code i} is negative, the value is dropped and will not be * added to the sum of the segment. *

    {@code num_segments} should equal the number of distinct segment IDs. + *

    Caution: On CPU, values in {@code segment_ids} are always validated to be less than + * {@code num_segments}, and an error is thrown for out-of-bound indices. On GPU, this + * does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices + * result in safe but unspecified behavior, which may include ignoring + * out-of-bound indices or outputting a tensor with a 0 stored in the first + * dimension of its shape if {@code num_segments} is 0. *

    * *
    - *
    - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
    - * tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
    - * # ==> [[ 5, 5, 5, 5],
    - * #       [5, 6, 7, 8]]
    - * 
    + *
    + *
    + *
    + *

    c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]] + * tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy() + * array([[5, 5, 5, 5], + * [5, 6, 7, 8]], dtype=int32) + *

    + *
    + *
    * * @param data type for {@code output} output */ @@ -87,6 +97,9 @@ public UnsortedSegmentSum(Operation operation) { * @param scope current scope * @param data The data value * @param segmentIds A tensor whose shape is a prefix of {@code data.shape}. + * The values must be less than {@code num_segments}. + *

    Caution: The values are always validated to be in range on CPU, never validated + * on GPU. * @param numSegments The numSegments value * @param data type for {@code UnsortedSegmentSum} output and operands * @return a new instance of UnsortedSegmentSum @@ -130,6 +143,9 @@ public static class Inputs extends RawOpInputsCaution: The values are always validated to be in range on CPU, never validated + * on GPU. */ public final Operand segmentIds; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java new file mode 100644 index 00000000000..04c9f77373d --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.java @@ -0,0 +1,285 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TInt32; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TNumber; + +/** + * Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + * embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. + *

    The tensors at corresponding positions in the three input lists (sample_indices, + * embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + */ +@OpMetadata( + opType = DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.OP_NAME, + inputsClass = DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.Inputs.class +) +public final class DynamicEnqueueTPUEmbeddingArbitraryTensorBatch extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch"; + + public DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new DynamicEnqueueTPUEmbeddingArbitraryTensorBatch operation. + * + * @param scope current scope + * @param sampleIndicesOrRowLengths A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

    Or a list of rank 1 Tensors specifying the row lengths for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_lengths in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row lengths is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param aggregationWeights A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param deviceOrdinal The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + * @param options carries optional attribute values + * @return a new instance of DynamicEnqueueTPUEmbeddingArbitraryTensorBatch + */ + @Endpoint( + describeByClass = true + ) + public static DynamicEnqueueTPUEmbeddingArbitraryTensorBatch create(Scope scope, + Iterable> sampleIndicesOrRowLengths, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + Operand deviceOrdinal, Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch"); + opBuilder.addInputList(Operands.asOutputs(sampleIndicesOrRowLengths)); + opBuilder.addInputList(Operands.asOutputs(embeddingIndices)); + opBuilder.addInputList(Operands.asOutputs(aggregationWeights)); + opBuilder.addInput(modeOverride.asOutput()); + opBuilder.addInput(deviceOrdinal.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.combiners != null) { + String[] combinersArray = new String[opts.combiners.size()]; + for (int i = 0 ; i < combinersArray.length ; i++) { + combinersArray[i] = opts.combiners.get(i); + } + opBuilder.setAttr("combiners", combinersArray); + } + } + } + return new DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(opBuilder.build()); + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public static Options combiners(List combiners) { + return new Options().combiners(combiners); + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public static Options combiners(String... combiners) { + return new Options().combiners(combiners); + } + + /** + * Optional attributes for {@link org.tensorflow.op.tpu.DynamicEnqueueTPUEmbeddingArbitraryTensorBatch} + */ + public static class Options { + private List combiners; + + private Options() { + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public Options combiners(List combiners) { + this.combiners = combiners; + return this; + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public Options combiners(String... combiners) { + this.combiners = Arrays.asList(combiners); + return this; + } + } + + @OpInputsMetadata( + outputsClass = DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.class + ) + public static class Inputs extends RawOpInputs { + /** + * A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

    Or a list of rank 1 Tensors specifying the row lengths for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_lengths in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row lengths is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + */ + public final Iterable> sampleIndicesOrRowLengths; + + /** + * A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + */ + public final Iterable> embeddingIndices; + + /** + * A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + */ + public final Iterable> aggregationWeights; + + /** + * A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + */ + public final Operand modeOverride; + + /** + * The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + */ + public final Operand deviceOrdinal; + + /** + * The T1 attribute + */ + public final DataType T1; + + /** + * The T2 attribute + */ + public final DataType T2; + + /** + * The T3 attribute + */ + public final DataType T3; + + /** + * A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + */ + public final String[] combiners; + + public Inputs(GraphOperation op) { + super(new DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(op), op, Arrays.asList("T1", "T2", "T3", "combiners")); + int inputIndex = 0; + int sampleIndicesOrRowLengthsLength = op.inputListLength("sample_indices_or_row_lengths"); + sampleIndicesOrRowLengths = Arrays.asList((Operand[]) op.inputList(inputIndex, sampleIndicesOrRowLengthsLength)); + inputIndex += sampleIndicesOrRowLengthsLength; + int embeddingIndicesLength = op.inputListLength("embedding_indices"); + embeddingIndices = Arrays.asList((Operand[]) op.inputList(inputIndex, embeddingIndicesLength)); + inputIndex += embeddingIndicesLength; + int aggregationWeightsLength = op.inputListLength("aggregation_weights"); + aggregationWeights = Arrays.asList((Operand[]) op.inputList(inputIndex, aggregationWeightsLength)); + inputIndex += aggregationWeightsLength; + modeOverride = (Operand) op.input(inputIndex++); + deviceOrdinal = (Operand) op.input(inputIndex++); + T1 = op.attributes().getAttrType("T1"); + T2 = op.attributes().getAttrType("T2"); + T3 = op.attributes().getAttrType("T3"); + combiners = op.attributes().getAttrStringList("combiners"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java new file mode 100644 index 00000000000..c0ecaa9593c --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/EnqueueTPUEmbeddingArbitraryTensorBatch.java @@ -0,0 +1,309 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +=======================================================================*/ + +// This class has been generated, DO NOT EDIT! + +package org.tensorflow.op.tpu; + +import java.util.Arrays; +import java.util.List; +import org.tensorflow.GraphOperation; +import org.tensorflow.Operand; +import org.tensorflow.Operation; +import org.tensorflow.OperationBuilder; +import org.tensorflow.op.Operands; +import org.tensorflow.op.RawOp; +import org.tensorflow.op.RawOpInputs; +import org.tensorflow.op.Scope; +import org.tensorflow.op.annotation.Endpoint; +import org.tensorflow.op.annotation.OpInputsMetadata; +import org.tensorflow.op.annotation.OpMetadata; +import org.tensorflow.proto.framework.DataType; +import org.tensorflow.types.TString; +import org.tensorflow.types.family.TNumber; + +/** + * Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + * embedding_indices[i] and aggregation_weights[i] correspond + * to the ith feature. + *

    The tensors at corresponding positions in the three input lists (sample_indices, + * embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + * with dim_size() equal to the total number of lookups into the table described by + * the corresponding feature. + */ +@OpMetadata( + opType = EnqueueTPUEmbeddingArbitraryTensorBatch.OP_NAME, + inputsClass = EnqueueTPUEmbeddingArbitraryTensorBatch.Inputs.class +) +public final class EnqueueTPUEmbeddingArbitraryTensorBatch extends RawOp { + /** + * The name of this op, as known by TensorFlow core engine + */ + public static final String OP_NAME = "EnqueueTPUEmbeddingArbitraryTensorBatch"; + + public EnqueueTPUEmbeddingArbitraryTensorBatch(Operation operation) { + super(operation, OP_NAME); + } + + /** + * Factory method to create a class wrapping a new EnqueueTPUEmbeddingArbitraryTensorBatch operation. + * + * @param scope current scope + * @param sampleIndicesOrRowLengths A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

    Or a list of rank 1 Tensors specifying the row lengths for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_lengths in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row lengths is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param embeddingIndices A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + * @param aggregationWeights A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + * @param modeOverride A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + * @param options carries optional attribute values + * @return a new instance of EnqueueTPUEmbeddingArbitraryTensorBatch + */ + @Endpoint( + describeByClass = true + ) + public static EnqueueTPUEmbeddingArbitraryTensorBatch create(Scope scope, + Iterable> sampleIndicesOrRowLengths, + Iterable> embeddingIndices, + Iterable> aggregationWeights, Operand modeOverride, + Options... options) { + OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "EnqueueTPUEmbeddingArbitraryTensorBatch"); + opBuilder.addInputList(Operands.asOutputs(sampleIndicesOrRowLengths)); + opBuilder.addInputList(Operands.asOutputs(embeddingIndices)); + opBuilder.addInputList(Operands.asOutputs(aggregationWeights)); + opBuilder.addInput(modeOverride.asOutput()); + if (options != null) { + for (Options opts : options) { + if (opts.deviceOrdinal != null) { + opBuilder.setAttr("device_ordinal", opts.deviceOrdinal); + } + if (opts.combiners != null) { + String[] combinersArray = new String[opts.combiners.size()]; + for (int i = 0 ; i < combinersArray.length ; i++) { + combinersArray[i] = opts.combiners.get(i); + } + opBuilder.setAttr("combiners", combinersArray); + } + } + } + return new EnqueueTPUEmbeddingArbitraryTensorBatch(opBuilder.build()); + } + + /** + * Sets the deviceOrdinal option. + * + * @param deviceOrdinal The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + * @return this Options instance. + */ + public static Options deviceOrdinal(Long deviceOrdinal) { + return new Options().deviceOrdinal(deviceOrdinal); + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public static Options combiners(List combiners) { + return new Options().combiners(combiners); + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public static Options combiners(String... combiners) { + return new Options().combiners(combiners); + } + + /** + * Optional attributes for {@link org.tensorflow.op.tpu.EnqueueTPUEmbeddingArbitraryTensorBatch} + */ + public static class Options { + private Long deviceOrdinal; + + private List combiners; + + private Options() { + } + + /** + * Sets the deviceOrdinal option. + * + * @param deviceOrdinal The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + * @return this Options instance. + */ + public Options deviceOrdinal(Long deviceOrdinal) { + this.deviceOrdinal = deviceOrdinal; + return this; + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public Options combiners(List combiners) { + this.combiners = combiners; + return this; + } + + /** + * Sets the combiners option. + * + * @param combiners A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + * @return this Options instance. + */ + public Options combiners(String... combiners) { + this.combiners = Arrays.asList(combiners); + return this; + } + } + + @OpInputsMetadata( + outputsClass = EnqueueTPUEmbeddingArbitraryTensorBatch.class + ) + public static class Inputs extends RawOpInputs { + /** + * A list of rank 2 Tensors specifying the training example to which the + * corresponding embedding_indices and aggregation_weights values belong. + * If the size of its first dimension is 0, we assume each embedding_indices + * belongs to a different sample. Both int32 and int64 are allowed and will + * be converted to int32 internally. + *

    Or a list of rank 1 Tensors specifying the row lengths for splitting + * embedding_indices and aggregation_weights into rows. It corresponds to + * ids.row_lengths in embedding_lookup(), when ids is a RaggedTensor. When + * enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged. + * the row lengths is 1-D dense tensor. When empty, we assume a dense tensor is + * passed to the op Both int32 and int64 are allowed and will be converted to + * int32 internally. + */ + public final Iterable> sampleIndicesOrRowLengths; + + /** + * A list of rank 1 Tensors, indices into the embedding + * tables. Both int32 and int64 are allowed and will be converted to + * int32 internally. + */ + public final Iterable> embeddingIndices; + + /** + * A list of rank 1 Tensors containing per training + * example aggregation weights. Both float32 and float64 are allowed and will + * be converted to float32 internally. + */ + public final Iterable> aggregationWeights; + + /** + * A string input that overrides the mode specified in the + * TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + * 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + * in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + */ + public final Operand modeOverride; + + /** + * The T1 attribute + */ + public final DataType T1; + + /** + * The T2 attribute + */ + public final DataType T2; + + /** + * The T3 attribute + */ + public final DataType T3; + + /** + * The TPU device to use. Should be >= 0 and less than the number + * of TPU cores in the task on which the node is placed. + */ + public final long deviceOrdinal; + + /** + * A list of string scalars, one for each embedding table that specify + * how to normalize the embedding activations after weighted summation. + * Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + * the sum of the weights be 0 for 'mean' or the sum of the squared weights be + * 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + * all tables. + */ + public final String[] combiners; + + public Inputs(GraphOperation op) { + super(new EnqueueTPUEmbeddingArbitraryTensorBatch(op), op, Arrays.asList("T1", "T2", "T3", "device_ordinal", "combiners")); + int inputIndex = 0; + int sampleIndicesOrRowLengthsLength = op.inputListLength("sample_indices_or_row_lengths"); + sampleIndicesOrRowLengths = Arrays.asList((Operand[]) op.inputList(inputIndex, sampleIndicesOrRowLengthsLength)); + inputIndex += sampleIndicesOrRowLengthsLength; + int embeddingIndicesLength = op.inputListLength("embedding_indices"); + embeddingIndices = Arrays.asList((Operand[]) op.inputList(inputIndex, embeddingIndicesLength)); + inputIndex += embeddingIndicesLength; + int aggregationWeightsLength = op.inputListLength("aggregation_weights"); + aggregationWeights = Arrays.asList((Operand[]) op.inputList(inputIndex, aggregationWeightsLength)); + inputIndex += aggregationWeightsLength; + modeOverride = (Operand) op.input(inputIndex++); + T1 = op.attributes().getAttrType("T1"); + T2 = op.attributes().getAttrType("T2"); + T3 = op.attributes().getAttrType("T3"); + deviceOrdinal = op.attributes().getAttrInt("device_ordinal"); + combiners = op.attributes().getAttrStringList("combiners"); + } + } +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java index b70ad115492..c94354b555f 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/tpu/IsTPUEmbeddingInitialized.java @@ -56,16 +56,34 @@ public IsTPUEmbeddingInitialized(Operation operation) { * Factory method to create a class wrapping a new IsTPUEmbeddingInitialized operation. * * @param scope current scope + * @param options carries optional attribute values * @return a new instance of IsTPUEmbeddingInitialized */ @Endpoint( describeByClass = true ) - public static IsTPUEmbeddingInitialized create(Scope scope) { + public static IsTPUEmbeddingInitialized create(Scope scope, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "IsTPUEmbeddingInitialized"); + if (options != null) { + for (Options opts : options) { + if (opts.config != null) { + opBuilder.setAttr("config", opts.config); + } + } + } return new IsTPUEmbeddingInitialized(opBuilder.build()); } + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public static Options config(String config) { + return new Options().config(config); + } + /** * Gets isTpuEmbeddingInitialized. * @@ -80,13 +98,40 @@ public Output asOutput() { return isTpuEmbeddingInitialized; } + /** + * Optional attributes for {@link org.tensorflow.op.tpu.IsTPUEmbeddingInitialized} + */ + public static class Options { + private String config; + + private Options() { + } + + /** + * Sets the config option. + * + * @param config the config option + * @return this Options instance. + */ + public Options config(String config) { + this.config = config; + return this; + } + } + @OpInputsMetadata( outputsClass = IsTPUEmbeddingInitialized.class ) public static class Inputs extends RawOpInputs { + /** + * The config attribute + */ + public final String config; + public Inputs(GraphOperation op) { - super(new IsTPUEmbeddingInitialized(op), op, Arrays.asList()); + super(new IsTPUEmbeddingInitialized(op), op, Arrays.asList("config")); int inputIndex = 0; + config = op.attributes().getAttrString("config"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java index 3ce3b44888d..acf120281c3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/AllReduce.java @@ -68,6 +68,11 @@ public AllReduce(Operation operation) { * @param input Array or a non-empty tuple of arrays to reduce across replicas. * @param groupAssignment Groups between which the reductions are performed. * @param reduceOp Reduction computation. + * @param mode group mode. + * CrossReplica: group_assignment contains replica_id. Each group contains the + * replicas for the current partition. + * CrossReplicaAndPartition: group_assignment contains replica_id. Each group + * contains the replicas for all partitions. * @param data type for {@code XlaAllReduce} output and operands * @return a new instance of AllReduce */ @@ -75,11 +80,12 @@ public AllReduce(Operation operation) { describeByClass = true ) public static AllReduce create(Scope scope, Operand input, - Operand groupAssignment, String reduceOp) { + Operand groupAssignment, String reduceOp, String mode) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "AllReduce"); opBuilder.addInput(input.asOutput()); opBuilder.addInput(groupAssignment.asOutput()); opBuilder.setAttr("reduce_op", reduceOp); + opBuilder.setAttr("mode", mode); return new AllReduce<>(opBuilder.build()); } @@ -121,13 +127,23 @@ public static class Inputs extends RawOpInputs> */ public final String reduceOp; + /** + * group mode. + * CrossReplica: group_assignment contains replica_id. Each group contains the + * replicas for the current partition. + * CrossReplicaAndPartition: group_assignment contains replica_id. Each group + * contains the replicas for all partitions. + */ + public final String mode; + public Inputs(GraphOperation op) { - super(new AllReduce<>(op), op, Arrays.asList("T", "reduce_op")); + super(new AllReduce<>(op), op, Arrays.asList("T", "reduce_op", "mode")); int inputIndex = 0; input = (Operand) op.input(inputIndex++); groupAssignment = (Operand) op.input(inputIndex++); T = op.attributes().getAttrType("T"); reduceOp = op.attributes().getAttrString("reduce_op"); + mode = op.attributes().getAttrString("mode"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java index 0c57763cd5c..227c8a36b83 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/op/xla/Conv.java @@ -67,16 +67,17 @@ public Conv(Operation operation) { * Factory method to create a class wrapping a new XlaConvV2 operation. * * @param scope current scope - * @param lhs the input tensor - * @param rhs the kernel tensor - * @param windowStrides the inter-window strides - * @param padding the padding to apply at the start and end of each input dimensions + * @param lhs input tensor + * @param rhs kernel tensor + * @param windowStrides inter-window strides + * @param padding padding to apply at the start and end of each input dimensions * @param lhsDilation dilation to apply between input elements * @param rhsDilation dilation to apply between kernel elements * @param featureGroupCount number of feature groups for grouped convolution. - * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. + * @param dimensionNumbers serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig serialized xla::PrecisionConfig proto. + * @param preferredElementType type of the tensor. + * @param options carries optional attribute values * @param data type for {@code XlaConvV2} output and operands * @param data type for {@code XlaConvV2} output and operands * @return a new instance of Conv @@ -88,7 +89,7 @@ public static Conv create(Scope scope, Operand lhs, Operand rhs, Operand windowStrides, Operand padding, Operand lhsDilation, Operand rhsDilation, Operand featureGroupCount, String dimensionNumbers, String precisionConfig, - Class preferredElementType) { + Class preferredElementType, Options... options) { OperationBuilder opBuilder = scope.opBuilder(OP_NAME, "Conv"); opBuilder.addInput(lhs.asOutput()); opBuilder.addInput(rhs.asOutput()); @@ -100,9 +101,26 @@ public static Conv create(Scope scope, opBuilder.setAttr("dimension_numbers", dimensionNumbers); opBuilder.setAttr("precision_config", precisionConfig); opBuilder.setAttr("preferred_element_type", Operands.toDataType(preferredElementType)); + if (options != null) { + for (Options opts : options) { + if (opts.batchGroupCount != null) { + opBuilder.setAttr("batch_group_count", opts.batchGroupCount); + } + } + } return new Conv<>(opBuilder.build()); } + /** + * Sets the batchGroupCount option. + * + * @param batchGroupCount number of batch groups or grouped filters. + * @return this Options instance. + */ + public static Options batchGroupCount(Long batchGroupCount) { + return new Options().batchGroupCount(batchGroupCount); + } + /** * Gets output. * @@ -117,27 +135,48 @@ public Output asOutput() { return output; } + /** + * Optional attributes for {@link org.tensorflow.op.xla.Conv} + */ + public static class Options { + private Long batchGroupCount; + + private Options() { + } + + /** + * Sets the batchGroupCount option. + * + * @param batchGroupCount number of batch groups or grouped filters. + * @return this Options instance. + */ + public Options batchGroupCount(Long batchGroupCount) { + this.batchGroupCount = batchGroupCount; + return this; + } + } + @OpInputsMetadata( outputsClass = Conv.class ) public static class Inputs extends RawOpInputs> { /** - * the input tensor + * input tensor */ public final Operand lhs; /** - * the kernel tensor + * kernel tensor */ public final Operand rhs; /** - * the inter-window strides + * inter-window strides */ public final Operand windowStrides; /** - * the padding to apply at the start and end of each input dimensions + * padding to apply at the start and end of each input dimensions */ public final Operand padding; @@ -172,22 +211,27 @@ public static class Inputs extends RawOpInputs> { public final DataType Tindices; /** - * a serialized xla::ConvolutionDimensionNumbers proto. + * serialized xla::ConvolutionDimensionNumbers proto. */ public final String dimensionNumbers; /** - * a serialized xla::PrecisionConfig proto. + * serialized xla::PrecisionConfig proto. */ public final String precisionConfig; /** - * The type of the tensor. + * type of the tensor. */ public final DataType preferredElementType; + /** + * number of batch groups or grouped filters. + */ + public final long batchGroupCount; + public Inputs(GraphOperation op) { - super(new Conv<>(op), op, Arrays.asList("LhsT", "RhsT", "Tindices", "dimension_numbers", "precision_config", "preferred_element_type")); + super(new Conv<>(op), op, Arrays.asList("LhsT", "RhsT", "Tindices", "dimension_numbers", "precision_config", "preferred_element_type", "batch_group_count")); int inputIndex = 0; lhs = (Operand) op.input(inputIndex++); rhs = (Operand) op.input(inputIndex++); @@ -202,6 +246,7 @@ public Inputs(GraphOperation op) { dimensionNumbers = op.attributes().getAttrString("dimension_numbers"); precisionConfig = op.attributes().getAttrString("precision_config"); preferredElementType = op.attributes().getAttrType("preferred_element_type"); + batchGroupCount = op.attributes().getAttrInt("batch_group_count"); } } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java index 610ab74e7f2..159bc6d64d3 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptions.java @@ -5,7 +5,7 @@ /** *

    - * next: 4
    + * next: 5
      * 
    * * Protobuf type {@code tensorflow.data.AutotuneOptions} @@ -67,6 +67,12 @@ private AutotuneOptions( optionalRamBudget_ = input.readInt64(); break; } + case 32: { + int rawValue = input.readEnum(); + optionalAutotuneAlgorithmCase_ = 4; + optionalAutotuneAlgorithm_ = rawValue; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -207,6 +213,42 @@ public int getNumber() { optionalRamBudgetCase_); } + private int optionalAutotuneAlgorithmCase_ = 0; + private java.lang.Object optionalAutotuneAlgorithm_; + public enum OptionalAutotuneAlgorithmCase + implements com.google.protobuf.Internal.EnumLite { + AUTOTUNE_ALGORITHM(4), + OPTIONALAUTOTUNEALGORITHM_NOT_SET(0); + private final int value; + private OptionalAutotuneAlgorithmCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalAutotuneAlgorithmCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalAutotuneAlgorithmCase forNumber(int value) { + switch (value) { + case 4: return AUTOTUNE_ALGORITHM; + case 0: return OPTIONALAUTOTUNEALGORITHM_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalAutotuneAlgorithmCase + getOptionalAutotuneAlgorithmCase() { + return OptionalAutotuneAlgorithmCase.forNumber( + optionalAutotuneAlgorithmCase_); + } + public static final int ENABLED_FIELD_NUMBER = 1; /** * bool enabled = 1; @@ -240,6 +282,29 @@ public long getRamBudget() { return 0L; } + public static final int AUTOTUNE_ALGORITHM_FIELD_NUMBER = 4; + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public int getAutotuneAlgorithmValue() { + if (optionalAutotuneAlgorithmCase_ == 4) { + return (java.lang.Integer) optionalAutotuneAlgorithm_; + } + return 0; + } + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public org.tensorflow.proto.data.model.AutotuneAlgorithm getAutotuneAlgorithm() { + if (optionalAutotuneAlgorithmCase_ == 4) { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.AutotuneAlgorithm result = org.tensorflow.proto.data.model.AutotuneAlgorithm.valueOf( + (java.lang.Integer) optionalAutotuneAlgorithm_); + return result == null ? org.tensorflow.proto.data.model.AutotuneAlgorithm.UNRECOGNIZED : result; + } + return org.tensorflow.proto.data.model.AutotuneAlgorithm.DEFAULT; + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -266,6 +331,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeInt64( 3, (long)((java.lang.Long) optionalRamBudget_)); } + if (optionalAutotuneAlgorithmCase_ == 4) { + output.writeEnum(4, ((java.lang.Integer) optionalAutotuneAlgorithm_)); + } unknownFields.writeTo(output); } @@ -290,6 +358,10 @@ public int getSerializedSize() { .computeInt64Size( 3, (long)((java.lang.Long) optionalRamBudget_)); } + if (optionalAutotuneAlgorithmCase_ == 4) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, ((java.lang.Integer) optionalAutotuneAlgorithm_)); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -332,6 +404,15 @@ public boolean equals(final java.lang.Object obj) { case 0: default: } + if (!getOptionalAutotuneAlgorithmCase().equals(other.getOptionalAutotuneAlgorithmCase())) return false; + switch (optionalAutotuneAlgorithmCase_) { + case 4: + if (getAutotuneAlgorithmValue() + != other.getAutotuneAlgorithmValue()) return false; + break; + case 0: + default: + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -369,6 +450,14 @@ public int hashCode() { case 0: default: } + switch (optionalAutotuneAlgorithmCase_) { + case 4: + hash = (37 * hash) + AUTOTUNE_ALGORITHM_FIELD_NUMBER; + hash = (53 * hash) + getAutotuneAlgorithmValue(); + break; + case 0: + default: + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -466,7 +555,7 @@ protected Builder newBuilderForType( } /** *
    -   * next: 4
    +   * next: 5
        * 
    * * Protobuf type {@code tensorflow.data.AutotuneOptions} @@ -512,6 +601,8 @@ public Builder clear() { optionalCpuBudget_ = null; optionalRamBudgetCase_ = 0; optionalRamBudget_ = null; + optionalAutotuneAlgorithmCase_ = 0; + optionalAutotuneAlgorithm_ = null; return this; } @@ -547,9 +638,13 @@ public org.tensorflow.proto.data.AutotuneOptions buildPartial() { if (optionalRamBudgetCase_ == 3) { result.optionalRamBudget_ = optionalRamBudget_; } + if (optionalAutotuneAlgorithmCase_ == 4) { + result.optionalAutotuneAlgorithm_ = optionalAutotuneAlgorithm_; + } result.optionalEnabledCase_ = optionalEnabledCase_; result.optionalCpuBudgetCase_ = optionalCpuBudgetCase_; result.optionalRamBudgetCase_ = optionalRamBudgetCase_; + result.optionalAutotuneAlgorithmCase_ = optionalAutotuneAlgorithmCase_; onBuilt(); return result; } @@ -625,6 +720,15 @@ public Builder mergeFrom(org.tensorflow.proto.data.AutotuneOptions other) { break; } } + switch (other.getOptionalAutotuneAlgorithmCase()) { + case AUTOTUNE_ALGORITHM: { + setAutotuneAlgorithmValue(other.getAutotuneAlgorithmValue()); + break; + } + case OPTIONALAUTOTUNEALGORITHM_NOT_SET: { + break; + } + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -698,6 +802,21 @@ public Builder clearOptionalRamBudget() { return this; } + private int optionalAutotuneAlgorithmCase_ = 0; + private java.lang.Object optionalAutotuneAlgorithm_; + public OptionalAutotuneAlgorithmCase + getOptionalAutotuneAlgorithmCase() { + return OptionalAutotuneAlgorithmCase.forNumber( + optionalAutotuneAlgorithmCase_); + } + + public Builder clearOptionalAutotuneAlgorithm() { + optionalAutotuneAlgorithmCase_ = 0; + optionalAutotuneAlgorithm_ = null; + onChanged(); + return this; + } + /** * bool enabled = 1; @@ -788,6 +907,60 @@ public Builder clearRamBudget() { } return this; } + + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public int getAutotuneAlgorithmValue() { + if (optionalAutotuneAlgorithmCase_ == 4) { + return ((java.lang.Integer) optionalAutotuneAlgorithm_).intValue(); + } + return 0; + } + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public Builder setAutotuneAlgorithmValue(int value) { + optionalAutotuneAlgorithmCase_ = 4; + optionalAutotuneAlgorithm_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public org.tensorflow.proto.data.model.AutotuneAlgorithm getAutotuneAlgorithm() { + if (optionalAutotuneAlgorithmCase_ == 4) { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.model.AutotuneAlgorithm result = org.tensorflow.proto.data.model.AutotuneAlgorithm.valueOf( + (java.lang.Integer) optionalAutotuneAlgorithm_); + return result == null ? org.tensorflow.proto.data.model.AutotuneAlgorithm.UNRECOGNIZED : result; + } + return org.tensorflow.proto.data.model.AutotuneAlgorithm.DEFAULT; + } + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public Builder setAutotuneAlgorithm(org.tensorflow.proto.data.model.AutotuneAlgorithm value) { + if (value == null) { + throw new NullPointerException(); + } + optionalAutotuneAlgorithmCase_ = 4; + optionalAutotuneAlgorithm_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + public Builder clearAutotuneAlgorithm() { + if (optionalAutotuneAlgorithmCase_ == 4) { + optionalAutotuneAlgorithmCase_ = 0; + optionalAutotuneAlgorithm_ = null; + onChanged(); + } + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java index fefc95fb410..0346bcf57a1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/AutotuneOptionsOrBuilder.java @@ -22,9 +22,20 @@ public interface AutotuneOptionsOrBuilder extends */ long getRamBudget(); + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + int getAutotuneAlgorithmValue(); + /** + * .tensorflow.data.model.AutotuneAlgorithm autotune_algorithm = 4; + */ + org.tensorflow.proto.data.model.AutotuneAlgorithm getAutotuneAlgorithm(); + public org.tensorflow.proto.data.AutotuneOptions.OptionalEnabledCase getOptionalEnabledCase(); public org.tensorflow.proto.data.AutotuneOptions.OptionalCpuBudgetCase getOptionalCpuBudgetCase(); public org.tensorflow.proto.data.AutotuneOptions.OptionalRamBudgetCase getOptionalRamBudgetCase(); + + public org.tensorflow.proto.data.AutotuneOptions.OptionalAutotuneAlgorithmCase getOptionalAutotuneAlgorithmCase(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptions.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptions.java new file mode 100644 index 00000000000..811a53d1182 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptions.java @@ -0,0 +1,645 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +/** + *
    + * next: 2
    + * 
    + * + * Protobuf type {@code tensorflow.data.CardinalityOptions} + */ +public final class CardinalityOptions extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.CardinalityOptions) + CardinalityOptionsOrBuilder { +private static final long serialVersionUID = 0L; + // Use CardinalityOptions.newBuilder() to construct. + private CardinalityOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CardinalityOptions() { + computeLevel_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new CardinalityOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CardinalityOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + int rawValue = input.readEnum(); + + computeLevel_ = rawValue; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_CardinalityOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_CardinalityOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.CardinalityOptions.class, org.tensorflow.proto.data.CardinalityOptions.Builder.class); + } + + /** + * Protobuf enum {@code tensorflow.data.CardinalityOptions.ComputeLevel} + */ + public enum ComputeLevel + implements com.google.protobuf.ProtocolMessageEnum { + /** + * CARDINALITY_COMPUTE_UNSPECIFIED = 0; + */ + CARDINALITY_COMPUTE_UNSPECIFIED(0), + /** + *
    +     * Cardinality will only be computed if it can be determined in a cheap
    +     * manner (ie. without reading from file sources). If the cardinality would
    +     * be nontrivial to compute, Cardinality() will return UNKNOWN_CARDINALITY.
    +     * 
    + * + * CARDINALITY_COMPUTE_LOW = 1; + */ + CARDINALITY_COMPUTE_LOW(1), + /** + *
    +     * Moderate effort will be made to determine cardinality, such as reading
    +     * index data from source files. If significant work is needed to compute
    +     * cardinality (e.g. reading entire source file contents or executing user
    +     * defined functions), Cardinality() will return UNKNOWN_CARDINALITY.
    +     * 
    + * + * CARDINALITY_COMPUTE_MODERATE = 2; + */ + CARDINALITY_COMPUTE_MODERATE(2), + UNRECOGNIZED(-1), + ; + + /** + * CARDINALITY_COMPUTE_UNSPECIFIED = 0; + */ + public static final int CARDINALITY_COMPUTE_UNSPECIFIED_VALUE = 0; + /** + *
    +     * Cardinality will only be computed if it can be determined in a cheap
    +     * manner (ie. without reading from file sources). If the cardinality would
    +     * be nontrivial to compute, Cardinality() will return UNKNOWN_CARDINALITY.
    +     * 
    + * + * CARDINALITY_COMPUTE_LOW = 1; + */ + public static final int CARDINALITY_COMPUTE_LOW_VALUE = 1; + /** + *
    +     * Moderate effort will be made to determine cardinality, such as reading
    +     * index data from source files. If significant work is needed to compute
    +     * cardinality (e.g. reading entire source file contents or executing user
    +     * defined functions), Cardinality() will return UNKNOWN_CARDINALITY.
    +     * 
    + * + * CARDINALITY_COMPUTE_MODERATE = 2; + */ + public static final int CARDINALITY_COMPUTE_MODERATE_VALUE = 2; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ComputeLevel valueOf(int value) { + return forNumber(value); + } + + public static ComputeLevel forNumber(int value) { + switch (value) { + case 0: return CARDINALITY_COMPUTE_UNSPECIFIED; + case 1: return CARDINALITY_COMPUTE_LOW; + case 2: return CARDINALITY_COMPUTE_MODERATE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + ComputeLevel> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ComputeLevel findValueByNumber(int number) { + return ComputeLevel.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.CardinalityOptions.getDescriptor().getEnumTypes().get(0); + } + + private static final ComputeLevel[] VALUES = values(); + + public static ComputeLevel valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ComputeLevel(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.CardinalityOptions.ComputeLevel) + } + + public static final int COMPUTE_LEVEL_FIELD_NUMBER = 1; + private int computeLevel_; + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public int getComputeLevelValue() { + return computeLevel_; + } + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public org.tensorflow.proto.data.CardinalityOptions.ComputeLevel getComputeLevel() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.CardinalityOptions.ComputeLevel result = org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.valueOf(computeLevel_); + return result == null ? org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (computeLevel_ != org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.CARDINALITY_COMPUTE_UNSPECIFIED.getNumber()) { + output.writeEnum(1, computeLevel_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (computeLevel_ != org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.CARDINALITY_COMPUTE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, computeLevel_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.CardinalityOptions)) { + return super.equals(obj); + } + org.tensorflow.proto.data.CardinalityOptions other = (org.tensorflow.proto.data.CardinalityOptions) obj; + + if (computeLevel_ != other.computeLevel_) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + COMPUTE_LEVEL_FIELD_NUMBER; + hash = (53 * hash) + computeLevel_; + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.CardinalityOptions parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.CardinalityOptions parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.CardinalityOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.CardinalityOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +   * next: 2
    +   * 
    + * + * Protobuf type {@code tensorflow.data.CardinalityOptions} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.CardinalityOptions) + org.tensorflow.proto.data.CardinalityOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_CardinalityOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_CardinalityOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.CardinalityOptions.class, org.tensorflow.proto.data.CardinalityOptions.Builder.class); + } + + // Construct using org.tensorflow.proto.data.CardinalityOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + computeLevel_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DatasetOptionsProtos.internal_static_tensorflow_data_CardinalityOptions_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.CardinalityOptions getDefaultInstanceForType() { + return org.tensorflow.proto.data.CardinalityOptions.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.CardinalityOptions build() { + org.tensorflow.proto.data.CardinalityOptions result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.CardinalityOptions buildPartial() { + org.tensorflow.proto.data.CardinalityOptions result = new org.tensorflow.proto.data.CardinalityOptions(this); + result.computeLevel_ = computeLevel_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.CardinalityOptions) { + return mergeFrom((org.tensorflow.proto.data.CardinalityOptions)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.CardinalityOptions other) { + if (other == org.tensorflow.proto.data.CardinalityOptions.getDefaultInstance()) return this; + if (other.computeLevel_ != 0) { + setComputeLevelValue(other.getComputeLevelValue()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.CardinalityOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.CardinalityOptions) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int computeLevel_ = 0; + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public int getComputeLevelValue() { + return computeLevel_; + } + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public Builder setComputeLevelValue(int value) { + computeLevel_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public org.tensorflow.proto.data.CardinalityOptions.ComputeLevel getComputeLevel() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.CardinalityOptions.ComputeLevel result = org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.valueOf(computeLevel_); + return result == null ? org.tensorflow.proto.data.CardinalityOptions.ComputeLevel.UNRECOGNIZED : result; + } + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public Builder setComputeLevel(org.tensorflow.proto.data.CardinalityOptions.ComputeLevel value) { + if (value == null) { + throw new NullPointerException(); + } + + computeLevel_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + public Builder clearComputeLevel() { + + computeLevel_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.CardinalityOptions) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.CardinalityOptions) + private static final org.tensorflow.proto.data.CardinalityOptions DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.CardinalityOptions(); + } + + public static org.tensorflow.proto.data.CardinalityOptions getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CardinalityOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CardinalityOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.CardinalityOptions getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptionsOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptionsOrBuilder.java new file mode 100644 index 00000000000..6e666b632bd --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/CardinalityOptionsOrBuilder.java @@ -0,0 +1,18 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/framework/dataset_options.proto + +package org.tensorflow.proto.data; + +public interface CardinalityOptionsOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.CardinalityOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + int getComputeLevelValue(); + /** + * .tensorflow.data.CardinalityOptions.ComputeLevel compute_level = 1; + */ + org.tensorflow.proto.data.CardinalityOptions.ComputeLevel getComputeLevel(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java index 207a8af5191..973055e60c1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DataService.java @@ -14,6 +14,152 @@ public static void registerAllExtensions( registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } + /** + *
    +   * tf.data service deployment mode.
    +   * 
    + * + * Protobuf enum {@code tensorflow.data.DeploymentMode} + */ + public enum DeploymentMode + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DEPLOYMENT_MODE_UNSPECIFIED = 0; + */ + DEPLOYMENT_MODE_UNSPECIFIED(0), + /** + *
    +     * tf.data service workers colocate with TF workers.
    +     * 
    + * + * DEPLOYMENT_MODE_COLOCATED = 1; + */ + DEPLOYMENT_MODE_COLOCATED(1), + /** + *
    +     * tf.data service workers run in dedicated tf.data hosts.
    +     * 
    + * + * DEPLOYMENT_MODE_REMOTE = 2; + */ + DEPLOYMENT_MODE_REMOTE(2), + /** + *
    +     * tf.data service workers run in colocated TF hosts and dedicated tf.data
    +     * hosts.
    +     * 
    + * + * DEPLOYMENT_MODE_HYBRID = 3; + */ + DEPLOYMENT_MODE_HYBRID(3), + UNRECOGNIZED(-1), + ; + + /** + * DEPLOYMENT_MODE_UNSPECIFIED = 0; + */ + public static final int DEPLOYMENT_MODE_UNSPECIFIED_VALUE = 0; + /** + *
    +     * tf.data service workers colocate with TF workers.
    +     * 
    + * + * DEPLOYMENT_MODE_COLOCATED = 1; + */ + public static final int DEPLOYMENT_MODE_COLOCATED_VALUE = 1; + /** + *
    +     * tf.data service workers run in dedicated tf.data hosts.
    +     * 
    + * + * DEPLOYMENT_MODE_REMOTE = 2; + */ + public static final int DEPLOYMENT_MODE_REMOTE_VALUE = 2; + /** + *
    +     * tf.data service workers run in colocated TF hosts and dedicated tf.data
    +     * hosts.
    +     * 
    + * + * DEPLOYMENT_MODE_HYBRID = 3; + */ + public static final int DEPLOYMENT_MODE_HYBRID_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DeploymentMode valueOf(int value) { + return forNumber(value); + } + + public static DeploymentMode forNumber(int value) { + switch (value) { + case 0: return DEPLOYMENT_MODE_UNSPECIFIED; + case 1: return DEPLOYMENT_MODE_COLOCATED; + case 2: return DEPLOYMENT_MODE_REMOTE; + case 3: return DEPLOYMENT_MODE_HYBRID; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + DeploymentMode> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DeploymentMode findValueByNumber(int number) { + return DeploymentMode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.getDescriptor().getEnumTypes().get(0); + } + + private static final DeploymentMode[] VALUES = values(); + + public static DeploymentMode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DeploymentMode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.DeploymentMode) + } + public interface ProcessingModeDefOrBuilder extends // @@protoc_insertion_point(interface_extends:tensorflow.data.ProcessingModeDef) com.google.protobuf.MessageOrBuilder { @@ -28,6 +174,10 @@ public interface ProcessingModeDefOrBuilder extends org.tensorflow.proto.data.DataService.ProcessingModeDef.ShardingPolicy getShardingPolicy(); } /** + *
    +   * Next tag: 2
    +   * 
    + * * Protobuf type {@code tensorflow.data.ProcessingModeDef} */ public static final class ProcessingModeDef extends @@ -505,6 +655,10 @@ protected Builder newBuilderForType( return builder; } /** + *
    +     * Next tag: 2
    +     * 
    + * * Protobuf type {@code tensorflow.data.ProcessingModeDef} */ public static final class Builder extends @@ -748,41 +902,957 @@ public org.tensorflow.proto.data.DataService.ProcessingModeDef getDefaultInstanc } - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_tensorflow_data_ProcessingModeDef_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable; + public interface DataServiceMetadataOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.data.DataServiceMetadata) + com.google.protobuf.MessageOrBuilder { - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; + /** + *
    +     * Serialized element spec.
    +     * 
    + * + * bytes element_spec = 1; + */ + com.google.protobuf.ByteString getElementSpec(); + + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + int getCompressionValue(); + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression getCompression(); + + /** + *
    +     * Cardinality of the dataset.
    +     * 
    + * + * int64 cardinality = 3; + */ + long getCardinality(); + + public org.tensorflow.proto.data.DataService.DataServiceMetadata.OptionalElementSpecCase getOptionalElementSpecCase(); } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n+tensorflow/core/protobuf/data_service." + - "proto\022\017tensorflow.data\"\267\001\n\021ProcessingMod" + - "eDef\022J\n\017sharding_policy\030\001 \001(\01621.tensorfl" + - "ow.data.ProcessingModeDef.ShardingPolicy" + - "\"V\n\016ShardingPolicy\022\007\n\003OFF\020\000\022\013\n\007DYNAMIC\020\001" + - "\022\010\n\004FILE\020\002\022\010\n\004DATA\020\003\022\020\n\014FILE_OR_DATA\020\004\022\010" + - "\n\004HINT\020\005Br\n\031org.tensorflow.proto.dataZUg" + - "ithub.com/tensorflow/tensorflow/tensorfl" + - "ow/go/core/protobuf/for_core_protos_go_p" + - "rotob\006proto3" - }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_tensorflow_data_ProcessingModeDef_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_tensorflow_data_ProcessingModeDef_descriptor, - new java.lang.String[] { "ShardingPolicy", }); + /** + *
    +   * Metadata related to tf.data service datasets.
    +   * Next tag: 4
    +   * 
    + * + * Protobuf type {@code tensorflow.data.DataServiceMetadata} + */ + public static final class DataServiceMetadata extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.data.DataServiceMetadata) + DataServiceMetadataOrBuilder { + private static final long serialVersionUID = 0L; + // Use DataServiceMetadata.newBuilder() to construct. + private DataServiceMetadata(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private DataServiceMetadata() { + compression_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new DataServiceMetadata(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DataServiceMetadata( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + optionalElementSpecCase_ = 1; + optionalElementSpec_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + + compression_ = rawValue; + break; + } + case 24: { + + cardinality_ = input.readInt64(); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_DataServiceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_DataServiceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DataService.DataServiceMetadata.class, org.tensorflow.proto.data.DataService.DataServiceMetadata.Builder.class); + } + + /** + * Protobuf enum {@code tensorflow.data.DataServiceMetadata.Compression} + */ + public enum Compression + implements com.google.protobuf.ProtocolMessageEnum { + /** + * COMPRESSION_UNSPECIFIED = 0; + */ + COMPRESSION_UNSPECIFIED(0), + /** + *
    +       * No compression.
    +       * 
    + * + * OFF = 1; + */ + OFF(1), + /** + *
    +       * Snappy compression as defined in tensorflow/core/platform/snappy.h.
    +       * 
    + * + * SNAPPY = 2; + */ + SNAPPY(2), + UNRECOGNIZED(-1), + ; + + /** + * COMPRESSION_UNSPECIFIED = 0; + */ + public static final int COMPRESSION_UNSPECIFIED_VALUE = 0; + /** + *
    +       * No compression.
    +       * 
    + * + * OFF = 1; + */ + public static final int OFF_VALUE = 1; + /** + *
    +       * Snappy compression as defined in tensorflow/core/platform/snappy.h.
    +       * 
    + * + * SNAPPY = 2; + */ + public static final int SNAPPY_VALUE = 2; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Compression valueOf(int value) { + return forNumber(value); + } + + public static Compression forNumber(int value) { + switch (value) { + case 0: return COMPRESSION_UNSPECIFIED; + case 1: return OFF; + case 2: return SNAPPY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + Compression> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Compression findValueByNumber(int number) { + return Compression.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.DataServiceMetadata.getDescriptor().getEnumTypes().get(0); + } + + private static final Compression[] VALUES = values(); + + public static Compression valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Compression(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:tensorflow.data.DataServiceMetadata.Compression) + } + + private int optionalElementSpecCase_ = 0; + private java.lang.Object optionalElementSpec_; + public enum OptionalElementSpecCase + implements com.google.protobuf.Internal.EnumLite { + ELEMENT_SPEC(1), + OPTIONALELEMENTSPEC_NOT_SET(0); + private final int value; + private OptionalElementSpecCase(int value) { + this.value = value; + } + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static OptionalElementSpecCase valueOf(int value) { + return forNumber(value); + } + + public static OptionalElementSpecCase forNumber(int value) { + switch (value) { + case 1: return ELEMENT_SPEC; + case 0: return OPTIONALELEMENTSPEC_NOT_SET; + default: return null; + } + } + public int getNumber() { + return this.value; + } + }; + + public OptionalElementSpecCase + getOptionalElementSpecCase() { + return OptionalElementSpecCase.forNumber( + optionalElementSpecCase_); + } + + public static final int ELEMENT_SPEC_FIELD_NUMBER = 1; + /** + *
    +     * Serialized element spec.
    +     * 
    + * + * bytes element_spec = 1; + */ + public com.google.protobuf.ByteString getElementSpec() { + if (optionalElementSpecCase_ == 1) { + return (com.google.protobuf.ByteString) optionalElementSpec_; + } + return com.google.protobuf.ByteString.EMPTY; + } + + public static final int COMPRESSION_FIELD_NUMBER = 2; + private int compression_; + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public int getCompressionValue() { + return compression_; + } + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression getCompression() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression result = org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.valueOf(compression_); + return result == null ? org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.UNRECOGNIZED : result; + } + + public static final int CARDINALITY_FIELD_NUMBER = 3; + private long cardinality_; + /** + *
    +     * Cardinality of the dataset.
    +     * 
    + * + * int64 cardinality = 3; + */ + public long getCardinality() { + return cardinality_; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (optionalElementSpecCase_ == 1) { + output.writeBytes( + 1, (com.google.protobuf.ByteString) optionalElementSpec_); + } + if (compression_ != org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.COMPRESSION_UNSPECIFIED.getNumber()) { + output.writeEnum(2, compression_); + } + if (cardinality_ != 0L) { + output.writeInt64(3, cardinality_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (optionalElementSpecCase_ == 1) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize( + 1, (com.google.protobuf.ByteString) optionalElementSpec_); + } + if (compression_ != org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.COMPRESSION_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, compression_); + } + if (cardinality_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, cardinality_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.data.DataService.DataServiceMetadata)) { + return super.equals(obj); + } + org.tensorflow.proto.data.DataService.DataServiceMetadata other = (org.tensorflow.proto.data.DataService.DataServiceMetadata) obj; + + if (compression_ != other.compression_) return false; + if (getCardinality() + != other.getCardinality()) return false; + if (!getOptionalElementSpecCase().equals(other.getOptionalElementSpecCase())) return false; + switch (optionalElementSpecCase_) { + case 1: + if (!getElementSpec() + .equals(other.getElementSpec())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + COMPRESSION_FIELD_NUMBER; + hash = (53 * hash) + compression_; + hash = (37 * hash) + CARDINALITY_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getCardinality()); + switch (optionalElementSpecCase_) { + case 1: + hash = (37 * hash) + ELEMENT_SPEC_FIELD_NUMBER; + hash = (53 * hash) + getElementSpec().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.data.DataService.DataServiceMetadata parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.data.DataService.DataServiceMetadata prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * Metadata related to tf.data service datasets.
    +     * Next tag: 4
    +     * 
    + * + * Protobuf type {@code tensorflow.data.DataServiceMetadata} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.data.DataServiceMetadata) + org.tensorflow.proto.data.DataService.DataServiceMetadataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_DataServiceMetadata_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_DataServiceMetadata_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.data.DataService.DataServiceMetadata.class, org.tensorflow.proto.data.DataService.DataServiceMetadata.Builder.class); + } + + // Construct using org.tensorflow.proto.data.DataService.DataServiceMetadata.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + compression_ = 0; + + cardinality_ = 0L; + + optionalElementSpecCase_ = 0; + optionalElementSpec_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.data.DataService.internal_static_tensorflow_data_DataServiceMetadata_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.DataServiceMetadata getDefaultInstanceForType() { + return org.tensorflow.proto.data.DataService.DataServiceMetadata.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.DataServiceMetadata build() { + org.tensorflow.proto.data.DataService.DataServiceMetadata result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.DataServiceMetadata buildPartial() { + org.tensorflow.proto.data.DataService.DataServiceMetadata result = new org.tensorflow.proto.data.DataService.DataServiceMetadata(this); + if (optionalElementSpecCase_ == 1) { + result.optionalElementSpec_ = optionalElementSpec_; + } + result.compression_ = compression_; + result.cardinality_ = cardinality_; + result.optionalElementSpecCase_ = optionalElementSpecCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.data.DataService.DataServiceMetadata) { + return mergeFrom((org.tensorflow.proto.data.DataService.DataServiceMetadata)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.data.DataService.DataServiceMetadata other) { + if (other == org.tensorflow.proto.data.DataService.DataServiceMetadata.getDefaultInstance()) return this; + if (other.compression_ != 0) { + setCompressionValue(other.getCompressionValue()); + } + if (other.getCardinality() != 0L) { + setCardinality(other.getCardinality()); + } + switch (other.getOptionalElementSpecCase()) { + case ELEMENT_SPEC: { + setElementSpec(other.getElementSpec()); + break; + } + case OPTIONALELEMENTSPEC_NOT_SET: { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.data.DataService.DataServiceMetadata parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.data.DataService.DataServiceMetadata) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int optionalElementSpecCase_ = 0; + private java.lang.Object optionalElementSpec_; + public OptionalElementSpecCase + getOptionalElementSpecCase() { + return OptionalElementSpecCase.forNumber( + optionalElementSpecCase_); + } + + public Builder clearOptionalElementSpec() { + optionalElementSpecCase_ = 0; + optionalElementSpec_ = null; + onChanged(); + return this; + } + + + /** + *
    +       * Serialized element spec.
    +       * 
    + * + * bytes element_spec = 1; + */ + public com.google.protobuf.ByteString getElementSpec() { + if (optionalElementSpecCase_ == 1) { + return (com.google.protobuf.ByteString) optionalElementSpec_; + } + return com.google.protobuf.ByteString.EMPTY; + } + /** + *
    +       * Serialized element spec.
    +       * 
    + * + * bytes element_spec = 1; + */ + public Builder setElementSpec(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + optionalElementSpecCase_ = 1; + optionalElementSpec_ = value; + onChanged(); + return this; + } + /** + *
    +       * Serialized element spec.
    +       * 
    + * + * bytes element_spec = 1; + */ + public Builder clearElementSpec() { + if (optionalElementSpecCase_ == 1) { + optionalElementSpecCase_ = 0; + optionalElementSpec_ = null; + onChanged(); + } + return this; + } + + private int compression_ = 0; + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public int getCompressionValue() { + return compression_; + } + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public Builder setCompressionValue(int value) { + compression_ = value; + onChanged(); + return this; + } + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression getCompression() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression result = org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.valueOf(compression_); + return result == null ? org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression.UNRECOGNIZED : result; + } + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public Builder setCompression(org.tensorflow.proto.data.DataService.DataServiceMetadata.Compression value) { + if (value == null) { + throw new NullPointerException(); + } + + compression_ = value.getNumber(); + onChanged(); + return this; + } + /** + * .tensorflow.data.DataServiceMetadata.Compression compression = 2; + */ + public Builder clearCompression() { + + compression_ = 0; + onChanged(); + return this; + } + + private long cardinality_ ; + /** + *
    +       * Cardinality of the dataset.
    +       * 
    + * + * int64 cardinality = 3; + */ + public long getCardinality() { + return cardinality_; + } + /** + *
    +       * Cardinality of the dataset.
    +       * 
    + * + * int64 cardinality = 3; + */ + public Builder setCardinality(long value) { + + cardinality_ = value; + onChanged(); + return this; + } + /** + *
    +       * Cardinality of the dataset.
    +       * 
    + * + * int64 cardinality = 3; + */ + public Builder clearCardinality() { + + cardinality_ = 0L; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.data.DataServiceMetadata) + } + + // @@protoc_insertion_point(class_scope:tensorflow.data.DataServiceMetadata) + private static final org.tensorflow.proto.data.DataService.DataServiceMetadata DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.data.DataService.DataServiceMetadata(); + } + + public static org.tensorflow.proto.data.DataService.DataServiceMetadata getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DataServiceMetadata parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DataServiceMetadata(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.data.DataService.DataServiceMetadata getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_ProcessingModeDef_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_DataServiceMetadata_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_DataServiceMetadata_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n+tensorflow/core/protobuf/data_service." + + "proto\022\017tensorflow.data\"\267\001\n\021ProcessingMod" + + "eDef\022J\n\017sharding_policy\030\001 \001(\01621.tensorfl" + + "ow.data.ProcessingModeDef.ShardingPolicy" + + "\"V\n\016ShardingPolicy\022\007\n\003OFF\020\000\022\013\n\007DYNAMIC\020\001" + + "\022\010\n\004FILE\020\002\022\010\n\004DATA\020\003\022\020\n\014FILE_OR_DATA\020\004\022\010" + + "\n\004HINT\020\005\"\343\001\n\023DataServiceMetadata\022\026\n\014elem" + + "ent_spec\030\001 \001(\014H\000\022E\n\013compression\030\002 \001(\01620." + + "tensorflow.data.DataServiceMetadata.Comp" + + "ression\022\023\n\013cardinality\030\003 \001(\003\"?\n\013Compress" + + "ion\022\033\n\027COMPRESSION_UNSPECIFIED\020\000\022\007\n\003OFF\020" + + "\001\022\n\n\006SNAPPY\020\002B\027\n\025optional_element_spec*\210" + + "\001\n\016DeploymentMode\022\037\n\033DEPLOYMENT_MODE_UNS" + + "PECIFIED\020\000\022\035\n\031DEPLOYMENT_MODE_COLOCATED\020" + + "\001\022\032\n\026DEPLOYMENT_MODE_REMOTE\020\002\022\032\n\026DEPLOYM" + + "ENT_MODE_HYBRID\020\003Br\n\031org.tensorflow.prot" + + "o.dataZUgithub.com/tensorflow/tensorflow" + + "/tensorflow/go/core/protobuf/for_core_pr" + + "otos_go_protob\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_data_ProcessingModeDef_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_data_ProcessingModeDef_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_ProcessingModeDef_descriptor, + new java.lang.String[] { "ShardingPolicy", }); + internal_static_tensorflow_data_DataServiceMetadata_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_data_DataServiceMetadata_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_DataServiceMetadata_descriptor, + new java.lang.String[] { "ElementSpec", "Compression", "Cardinality", "OptionalElementSpec", }); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java index 78ec0c7a58f..33a72c8d536 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/DatasetOptionsProtos.java @@ -19,6 +19,11 @@ public static void registerAllExtensions( static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_data_CardinalityOptions_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_data_CardinalityOptions_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_tensorflow_data_DistributeOptions_descriptor; static final @@ -49,88 +54,105 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n/tensorflow/core/framework/dataset_opti" + - "ons.proto\022\017tensorflow.data\"\222\001\n\017AutotuneO" + - "ptions\022\021\n\007enabled\030\001 \001(\010H\000\022\024\n\ncpu_budget\030" + - "\002 \001(\005H\001\022\024\n\nram_budget\030\003 \001(\003H\002B\022\n\020optiona" + - "l_enabledB\025\n\023optional_cpu_budgetB\025\n\023opti" + - "onal_ram_budget\"\177\n\021DistributeOptions\022;\n\021" + - "auto_shard_policy\030\001 \001(\0162 .tensorflow.dat" + - "a.AutoShardPolicy\022\025\n\013num_devices\030\002 \001(\005H\000" + - "B\026\n\024optional_num_devices\"\360\004\n\023Optimizatio" + - "nOptions\022%\n\033apply_default_optimizations\030" + - "\001 \001(\010H\000\022\027\n\rfilter_fusion\030\006 \001(\010H\001\022\036\n\024map_" + - "and_batch_fusion\030\t \001(\010H\002\022\037\n\025map_and_filt" + - "er_fusion\030\n \001(\010H\003\022\024\n\nmap_fusion\030\013 \001(\010H\004\022" + - "\035\n\023map_parallelization\030\014 \001(\010H\005\022\032\n\020noop_e" + - "limination\030\016 \001(\010H\006\022\030\n\016parallel_batch\030\017 \001" + - "(\010H\007\022#\n\031shuffle_and_repeat_fusion\030\021 \001(\010H" + - "\010B&\n$optional_apply_default_optimization" + - "sB\030\n\026optional_filter_fusionB\037\n\035optional_" + - "map_and_batch_fusionB \n\036optional_map_and" + - "_filter_fusionB\025\n\023optional_map_fusionB\036\n" + - "\034optional_map_parallelizationB\033\n\031optiona" + - "l_noop_eliminationB\031\n\027optional_parallel_" + - "batchB$\n\"optional_shuffle_and_repeat_fus" + - "ionJ\004\010\002\020\003J\004\010\003\020\004J\004\010\004\020\005J\004\010\005\020\006J\004\010\007\020\010J\004\010\010\020\tJ" + - "\004\010\r\020\016J\004\010\020\020\021\"\242\001\n\020ThreadingOptions\022\"\n\030max_" + - "intra_op_parallelism\030\001 \001(\005H\000\022!\n\027private_" + - "threadpool_size\030\002 \001(\005H\001B#\n!optional_max_" + - "intra_op_parallelismB\"\n optional_private" + - "_threadpool_size\"\306\003\n\007Options\022\027\n\rdetermin" + - "istic\030\001 \001(\010H\000\022:\n\020autotune_options\030\007 \001(\0132" + - " .tensorflow.data.AutotuneOptions\022>\n\022dis" + - "tribute_options\030\002 \001(\0132\".tensorflow.data." + - "DistributeOptions\022B\n\024optimization_option" + - "s\030\003 \001(\0132$.tensorflow.data.OptimizationOp" + - "tions\022\017\n\005slack\030\004 \001(\010H\001\022<\n\021threading_opti" + - "ons\030\005 \001(\0132!.tensorflow.data.ThreadingOpt" + - "ions\022E\n\025external_state_policy\030\006 \001(\0162$.te" + - "nsorflow.data.ExternalStatePolicyH\002B\030\n\026o" + - "ptional_deterministicB\020\n\016optional_slackB" + - " \n\036optional_external_state_policy*K\n\017Aut" + - "oShardPolicy\022\010\n\004AUTO\020\000\022\010\n\004FILE\020\001\022\010\n\004DATA" + - "\020\002\022\010\n\004HINT\020\003\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001*J\n\023Extern" + - "alStatePolicy\022\017\n\013POLICY_WARN\020\000\022\021\n\rPOLICY" + - "_IGNORE\020\001\022\017\n\013POLICY_FAIL\020\002B\213\001\n\031org.tenso" + - "rflow.proto.dataB\024DatasetOptionsProtosP\001" + - "ZVgithub.com/tensorflow/tensorflow/tenso" + - "rflow/go/core/framework/dataset_options_" + - "go_protob\006proto3" + "ons.proto\022\017tensorflow.data\032%tensorflow/c" + + "ore/framework/model.proto\"\371\001\n\017AutotuneOp" + + "tions\022\021\n\007enabled\030\001 \001(\010H\000\022\024\n\ncpu_budget\030\002" + + " \001(\005H\001\022\024\n\nram_budget\030\003 \001(\003H\002\022F\n\022autotune" + + "_algorithm\030\004 \001(\0162(.tensorflow.data.model" + + ".AutotuneAlgorithmH\003B\022\n\020optional_enabled" + + "B\025\n\023optional_cpu_budgetB\025\n\023optional_ram_" + + "budgetB\035\n\033optional_autotune_algorithm\"\321\001" + + "\n\022CardinalityOptions\022G\n\rcompute_level\030\001 " + + "\001(\01620.tensorflow.data.CardinalityOptions" + + ".ComputeLevel\"r\n\014ComputeLevel\022#\n\037CARDINA" + + "LITY_COMPUTE_UNSPECIFIED\020\000\022\033\n\027CARDINALIT" + + "Y_COMPUTE_LOW\020\001\022 \n\034CARDINALITY_COMPUTE_M" + + "ODERATE\020\002\"\177\n\021DistributeOptions\022;\n\021auto_s" + + "hard_policy\030\001 \001(\0162 .tensorflow.data.Auto" + + "ShardPolicy\022\025\n\013num_devices\030\002 \001(\005H\000B\026\n\024op" + + "tional_num_devices\"\360\004\n\023OptimizationOptio" + + "ns\022%\n\033apply_default_optimizations\030\001 \001(\010H" + + "\000\022\027\n\rfilter_fusion\030\006 \001(\010H\001\022\036\n\024map_and_ba" + + "tch_fusion\030\t \001(\010H\002\022\037\n\025map_and_filter_fus" + + "ion\030\n \001(\010H\003\022\024\n\nmap_fusion\030\013 \001(\010H\004\022\035\n\023map" + + "_parallelization\030\014 \001(\010H\005\022\032\n\020noop_elimina" + + "tion\030\016 \001(\010H\006\022\030\n\016parallel_batch\030\017 \001(\010H\007\022#" + + "\n\031shuffle_and_repeat_fusion\030\021 \001(\010H\010B&\n$o" + + "ptional_apply_default_optimizationsB\030\n\026o" + + "ptional_filter_fusionB\037\n\035optional_map_an" + + "d_batch_fusionB \n\036optional_map_and_filte" + + "r_fusionB\025\n\023optional_map_fusionB\036\n\034optio" + + "nal_map_parallelizationB\033\n\031optional_noop" + + "_eliminationB\031\n\027optional_parallel_batchB" + + "$\n\"optional_shuffle_and_repeat_fusionJ\004\010" + + "\002\020\003J\004\010\003\020\004J\004\010\004\020\005J\004\010\005\020\006J\004\010\007\020\010J\004\010\010\020\tJ\004\010\r\020\016J" + + "\004\010\020\020\021\"\242\001\n\020ThreadingOptions\022\"\n\030max_intra_" + + "op_parallelism\030\001 \001(\005H\000\022!\n\027private_thread" + + "pool_size\030\002 \001(\005H\001B#\n!optional_max_intra_" + + "op_parallelismB\"\n optional_private_threa" + + "dpool_size\"\306\003\n\007Options\022\027\n\rdeterministic\030" + + "\001 \001(\010H\000\022:\n\020autotune_options\030\007 \001(\0132 .tens" + + "orflow.data.AutotuneOptions\022>\n\022distribut" + + "e_options\030\002 \001(\0132\".tensorflow.data.Distri" + + "buteOptions\022B\n\024optimization_options\030\003 \001(" + + "\0132$.tensorflow.data.OptimizationOptions\022" + + "\017\n\005slack\030\004 \001(\010H\001\022<\n\021threading_options\030\005 " + + "\001(\0132!.tensorflow.data.ThreadingOptions\022E" + + "\n\025external_state_policy\030\006 \001(\0162$.tensorfl" + + "ow.data.ExternalStatePolicyH\002B\030\n\026optiona" + + "l_deterministicB\020\n\016optional_slackB \n\036opt" + + "ional_external_state_policy*K\n\017AutoShard" + + "Policy\022\010\n\004AUTO\020\000\022\010\n\004FILE\020\001\022\010\n\004DATA\020\002\022\010\n\004" + + "HINT\020\003\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001*J\n\023ExternalStat" + + "ePolicy\022\017\n\013POLICY_WARN\020\000\022\021\n\rPOLICY_IGNOR" + + "E\020\001\022\017\n\013POLICY_FAIL\020\002B\213\001\n\031org.tensorflow." + + "proto.dataB\024DatasetOptionsProtosP\001ZVgith" + + "ub.com/tensorflow/tensorflow/tensorflow/" + + "go/core/framework/dataset_options_go_pro" + + "tob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.tensorflow.proto.data.model.ModelProtos.getDescriptor(), }); internal_static_tensorflow_data_AutotuneOptions_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_tensorflow_data_AutotuneOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_AutotuneOptions_descriptor, - new java.lang.String[] { "Enabled", "CpuBudget", "RamBudget", "OptionalEnabled", "OptionalCpuBudget", "OptionalRamBudget", }); - internal_static_tensorflow_data_DistributeOptions_descriptor = + new java.lang.String[] { "Enabled", "CpuBudget", "RamBudget", "AutotuneAlgorithm", "OptionalEnabled", "OptionalCpuBudget", "OptionalRamBudget", "OptionalAutotuneAlgorithm", }); + internal_static_tensorflow_data_CardinalityOptions_descriptor = getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_data_CardinalityOptions_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_data_CardinalityOptions_descriptor, + new java.lang.String[] { "ComputeLevel", }); + internal_static_tensorflow_data_DistributeOptions_descriptor = + getDescriptor().getMessageTypes().get(2); internal_static_tensorflow_data_DistributeOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_DistributeOptions_descriptor, new java.lang.String[] { "AutoShardPolicy", "NumDevices", "OptionalNumDevices", }); internal_static_tensorflow_data_OptimizationOptions_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_tensorflow_data_OptimizationOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_OptimizationOptions_descriptor, new java.lang.String[] { "ApplyDefaultOptimizations", "FilterFusion", "MapAndBatchFusion", "MapAndFilterFusion", "MapFusion", "MapParallelization", "NoopElimination", "ParallelBatch", "ShuffleAndRepeatFusion", "OptionalApplyDefaultOptimizations", "OptionalFilterFusion", "OptionalMapAndBatchFusion", "OptionalMapAndFilterFusion", "OptionalMapFusion", "OptionalMapParallelization", "OptionalNoopElimination", "OptionalParallelBatch", "OptionalShuffleAndRepeatFusion", }); internal_static_tensorflow_data_ThreadingOptions_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_tensorflow_data_ThreadingOptions_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_ThreadingOptions_descriptor, new java.lang.String[] { "MaxIntraOpParallelism", "PrivateThreadpoolSize", "OptionalMaxIntraOpParallelism", "OptionalPrivateThreadpoolSize", }); internal_static_tensorflow_data_Options_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_tensorflow_data_Options_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_data_Options_descriptor, new java.lang.String[] { "Deterministic", "AutotuneOptions", "DistributeOptions", "OptimizationOptions", "Slack", "ThreadingOptions", "ExternalStatePolicy", "OptionalDeterministic", "OptionalSlack", "OptionalExternalStatePolicy", }); + org.tensorflow.proto.data.model.ModelProtos.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java index 06ab63a0693..ff21d54a354 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/experimental/ServiceConfig.java @@ -82,8 +82,6 @@ public interface DispatcherConfigOrBuilder extends * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -96,8 +94,6 @@ public interface DispatcherConfigOrBuilder extends * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -109,8 +105,6 @@ public interface DispatcherConfigOrBuilder extends * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -122,8 +116,6 @@ public interface DispatcherConfigOrBuilder extends * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -131,6 +123,25 @@ public interface DispatcherConfigOrBuilder extends com.google.protobuf.ByteString getWorkerAddressesBytes(int index); + /** + *
    +     * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +     * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +     * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + int getDeploymentModeValue(); + /** + *
    +     * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +     * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +     * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + org.tensorflow.proto.data.DataService.DeploymentMode getDeploymentMode(); + /** *
          * How often the dispatcher should scan through to delete old and unused
    @@ -168,7 +179,7 @@ public interface DispatcherConfigOrBuilder extends
       /**
        * 
        * Configuration for a tf.data service DispatchServer.
    -   * Next id: 9
    +   * Next id: 10
        * 
    * * Protobuf type {@code tensorflow.data.experimental.DispatcherConfig} @@ -186,6 +197,7 @@ private DispatcherConfig() { protocol_ = ""; workDir_ = ""; workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; + deploymentMode_ = 0; } @java.lang.Override @@ -265,6 +277,12 @@ private DispatcherConfig( clientTimeoutMs_ = input.readInt64(); break; } + case 72: { + int rawValue = input.readEnum(); + + deploymentMode_ = rawValue; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -422,8 +440,6 @@ public boolean getFaultTolerantMode() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. *
    * * repeated string worker_addresses = 7; @@ -438,8 +454,6 @@ public boolean getFaultTolerantMode() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -453,8 +467,6 @@ public int getWorkerAddressesCount() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -468,8 +480,6 @@ public java.lang.String getWorkerAddresses(int index) { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -479,6 +489,33 @@ public java.lang.String getWorkerAddresses(int index) { return workerAddresses_.getByteString(index); } + public static final int DEPLOYMENT_MODE_FIELD_NUMBER = 9; + private int deploymentMode_; + /** + *
    +     * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +     * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +     * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public int getDeploymentModeValue() { + return deploymentMode_; + } + /** + *
    +     * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +     * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +     * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public org.tensorflow.proto.data.DataService.DeploymentMode getDeploymentMode() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.DeploymentMode result = org.tensorflow.proto.data.DataService.DeploymentMode.valueOf(deploymentMode_); + return result == null ? org.tensorflow.proto.data.DataService.DeploymentMode.UNRECOGNIZED : result; + } + public static final int JOB_GC_CHECK_INTERVAL_MS_FIELD_NUMBER = 5; private long jobGcCheckIntervalMs_; /** @@ -563,6 +600,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (clientTimeoutMs_ != 0L) { output.writeInt64(8, clientTimeoutMs_); } + if (deploymentMode_ != org.tensorflow.proto.data.DataService.DeploymentMode.DEPLOYMENT_MODE_UNSPECIFIED.getNumber()) { + output.writeEnum(9, deploymentMode_); + } unknownFields.writeTo(output); } @@ -606,6 +646,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(8, clientTimeoutMs_); } + if (deploymentMode_ != org.tensorflow.proto.data.DataService.DeploymentMode.DEPLOYMENT_MODE_UNSPECIFIED.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(9, deploymentMode_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -631,6 +675,7 @@ public boolean equals(final java.lang.Object obj) { != other.getFaultTolerantMode()) return false; if (!getWorkerAddressesList() .equals(other.getWorkerAddressesList())) return false; + if (deploymentMode_ != other.deploymentMode_) return false; if (getJobGcCheckIntervalMs() != other.getJobGcCheckIntervalMs()) return false; if (getJobGcTimeoutMs() @@ -662,6 +707,8 @@ public int hashCode() { hash = (37 * hash) + WORKER_ADDRESSES_FIELD_NUMBER; hash = (53 * hash) + getWorkerAddressesList().hashCode(); } + hash = (37 * hash) + DEPLOYMENT_MODE_FIELD_NUMBER; + hash = (53 * hash) + deploymentMode_; hash = (37 * hash) + JOB_GC_CHECK_INTERVAL_MS_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getJobGcCheckIntervalMs()); @@ -769,7 +816,7 @@ protected Builder newBuilderForType( /** *
          * Configuration for a tf.data service DispatchServer.
    -     * Next id: 9
    +     * Next id: 10
          * 
    * * Protobuf type {@code tensorflow.data.experimental.DispatcherConfig} @@ -819,6 +866,8 @@ public Builder clear() { workerAddresses_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); + deploymentMode_ = 0; + jobGcCheckIntervalMs_ = 0L; jobGcTimeoutMs_ = 0L; @@ -861,6 +910,7 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.DispatcherConfig bui bitField0_ = (bitField0_ & ~0x00000001); } result.workerAddresses_ = workerAddresses_; + result.deploymentMode_ = deploymentMode_; result.jobGcCheckIntervalMs_ = jobGcCheckIntervalMs_; result.jobGcTimeoutMs_ = jobGcTimeoutMs_; result.clientTimeoutMs_ = clientTimeoutMs_; @@ -936,6 +986,9 @@ public Builder mergeFrom(org.tensorflow.proto.data.experimental.ServiceConfig.Di } onChanged(); } + if (other.deploymentMode_ != 0) { + setDeploymentModeValue(other.getDeploymentModeValue()); + } if (other.getJobGcCheckIntervalMs() != 0L) { setJobGcCheckIntervalMs(other.getJobGcCheckIntervalMs()); } @@ -1253,8 +1306,6 @@ private void ensureWorkerAddressesIsMutable() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1269,8 +1320,6 @@ private void ensureWorkerAddressesIsMutable() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1284,8 +1333,6 @@ public int getWorkerAddressesCount() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1299,8 +1346,6 @@ public java.lang.String getWorkerAddresses(int index) { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1315,8 +1360,6 @@ public java.lang.String getWorkerAddresses(int index) { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1337,8 +1380,6 @@ public Builder setWorkerAddresses( * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1359,8 +1400,6 @@ public Builder addWorkerAddresses( * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1379,8 +1418,6 @@ public Builder addAllWorkerAddresses( * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1397,8 +1434,6 @@ public Builder clearWorkerAddresses() { * of worker addresses that will register with the dispatcher. The worker * addresses should be in the format "host" or "host:port", where "port" is an * integer, named port, or %port% to match any port. - * TODO(yangchen): Also add a deployment mode flag. Auto-sharding will only be - * allowed in the "COLOCATED" mode. * * * repeated string worker_addresses = 7; @@ -1415,6 +1450,76 @@ public Builder addWorkerAddressesBytes( return this; } + private int deploymentMode_ = 0; + /** + *
    +       * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +       * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +       * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public int getDeploymentModeValue() { + return deploymentMode_; + } + /** + *
    +       * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +       * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +       * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public Builder setDeploymentModeValue(int value) { + deploymentMode_ = value; + onChanged(); + return this; + } + /** + *
    +       * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +       * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +       * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public org.tensorflow.proto.data.DataService.DeploymentMode getDeploymentMode() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.data.DataService.DeploymentMode result = org.tensorflow.proto.data.DataService.DeploymentMode.valueOf(deploymentMode_); + return result == null ? org.tensorflow.proto.data.DataService.DeploymentMode.UNRECOGNIZED : result; + } + /** + *
    +       * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +       * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +       * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public Builder setDeploymentMode(org.tensorflow.proto.data.DataService.DeploymentMode value) { + if (value == null) { + throw new NullPointerException(); + } + + deploymentMode_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
    +       * (Optional.) tf.data service deployment mode. Supported values are "REMOTE",
    +       * "COLOCATED", and "HYBRID". If unspecified, it is assumed to be "REMOTE".
    +       * 
    + * + * .tensorflow.data.DeploymentMode deployment_mode = 9; + */ + public Builder clearDeploymentMode() { + + deploymentMode_ = 0; + onChanged(); + return this; + } + private long jobGcCheckIntervalMs_ ; /** *
    @@ -3624,40 +3729,45 @@ public org.tensorflow.proto.data.experimental.ServiceConfig.WorkerConfig getDefa
       static {
         java.lang.String[] descriptorData = {
           "\n-tensorflow/core/protobuf/service_confi" +
    -      "g.proto\022\034tensorflow.data.experimental\"\323\001" +
    -      "\n\020DispatcherConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010prot" +
    -      "ocol\030\002 \001(\t\022\020\n\010work_dir\030\003 \001(\t\022\033\n\023fault_to" +
    -      "lerant_mode\030\004 \001(\010\022\030\n\020worker_addresses\030\007 " +
    -      "\003(\t\022 \n\030job_gc_check_interval_ms\030\005 \001(\003\022\031\n" +
    -      "\021job_gc_timeout_ms\030\006 \001(\003\022\031\n\021client_timeo" +
    -      "ut_ms\030\010 \001(\003\"\226\002\n\014WorkerConfig\022\014\n\004port\030\001 \001" +
    -      "(\003\022\020\n\010protocol\030\002 \001(\t\022\032\n\022dispatcher_addre" +
    -      "ss\030\003 \001(\t\022\026\n\016worker_address\030\004 \001(\t\022\023\n\013work" +
    -      "er_tags\030\n \003(\t\022\035\n\025heartbeat_interval_ms\030\005" +
    -      " \001(\003\022\035\n\025dispatcher_timeout_ms\030\006 \001(\003\022\036\n\026d" +
    -      "ata_transfer_protocol\030\007 \001(\t\022\035\n\025data_tran" +
    -      "sfer_address\030\010 \001(\t\022 \n\030shutdown_quiet_per" +
    -      "iod_ms\030\t \001(\003B\177\n&org.tensorflow.proto.dat" +
    -      "a.experimentalZUgithub.com/tensorflow/te" +
    -      "nsorflow/tensorflow/go/core/protobuf/for" +
    -      "_core_protos_go_protob\006proto3"
    +      "g.proto\022\034tensorflow.data.experimental\032+t" +
    +      "ensorflow/core/protobuf/data_service.pro" +
    +      "to\"\215\002\n\020DispatcherConfig\022\014\n\004port\030\001 \001(\003\022\020\n" +
    +      "\010protocol\030\002 \001(\t\022\020\n\010work_dir\030\003 \001(\t\022\033\n\023fau" +
    +      "lt_tolerant_mode\030\004 \001(\010\022\030\n\020worker_address" +
    +      "es\030\007 \003(\t\0228\n\017deployment_mode\030\t \001(\0162\037.tens" +
    +      "orflow.data.DeploymentMode\022 \n\030job_gc_che" +
    +      "ck_interval_ms\030\005 \001(\003\022\031\n\021job_gc_timeout_m" +
    +      "s\030\006 \001(\003\022\031\n\021client_timeout_ms\030\010 \001(\003\"\226\002\n\014W" +
    +      "orkerConfig\022\014\n\004port\030\001 \001(\003\022\020\n\010protocol\030\002 " +
    +      "\001(\t\022\032\n\022dispatcher_address\030\003 \001(\t\022\026\n\016worke" +
    +      "r_address\030\004 \001(\t\022\023\n\013worker_tags\030\n \003(\t\022\035\n\025" +
    +      "heartbeat_interval_ms\030\005 \001(\003\022\035\n\025dispatche" +
    +      "r_timeout_ms\030\006 \001(\003\022\036\n\026data_transfer_prot" +
    +      "ocol\030\007 \001(\t\022\035\n\025data_transfer_address\030\010 \001(" +
    +      "\t\022 \n\030shutdown_quiet_period_ms\030\t \001(\003B\177\n&o" +
    +      "rg.tensorflow.proto.data.experimentalZUg" +
    +      "ithub.com/tensorflow/tensorflow/tensorfl" +
    +      "ow/go/core/protobuf/for_core_protos_go_p" +
    +      "rotob\006proto3"
         };
         descriptor = com.google.protobuf.Descriptors.FileDescriptor
           .internalBuildGeneratedFileFrom(descriptorData,
             new com.google.protobuf.Descriptors.FileDescriptor[] {
    +          org.tensorflow.proto.data.DataService.getDescriptor(),
             });
         internal_static_tensorflow_data_experimental_DispatcherConfig_descriptor =
           getDescriptor().getMessageTypes().get(0);
         internal_static_tensorflow_data_experimental_DispatcherConfig_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_data_experimental_DispatcherConfig_descriptor,
    -        new java.lang.String[] { "Port", "Protocol", "WorkDir", "FaultTolerantMode", "WorkerAddresses", "JobGcCheckIntervalMs", "JobGcTimeoutMs", "ClientTimeoutMs", });
    +        new java.lang.String[] { "Port", "Protocol", "WorkDir", "FaultTolerantMode", "WorkerAddresses", "DeploymentMode", "JobGcCheckIntervalMs", "JobGcTimeoutMs", "ClientTimeoutMs", });
         internal_static_tensorflow_data_experimental_WorkerConfig_descriptor =
           getDescriptor().getMessageTypes().get(1);
         internal_static_tensorflow_data_experimental_WorkerConfig_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_data_experimental_WorkerConfig_descriptor,
             new java.lang.String[] { "Port", "Protocol", "DispatcherAddress", "WorkerAddress", "WorkerTags", "HeartbeatIntervalMs", "DispatcherTimeoutMs", "DataTransferProtocol", "DataTransferAddress", "ShutdownQuietPeriodMs", });
    +    org.tensorflow.proto.data.DataService.getDescriptor();
       }
     
       // @@protoc_insertion_point(outer_class_scope)
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java
    index 773c76aff89..8b63eb05383 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/AutotuneAlgorithm.java
    @@ -13,24 +13,40 @@
     public enum AutotuneAlgorithm
         implements com.google.protobuf.ProtocolMessageEnum {
       /**
    -   * HILL_CLIMB = 0;
    +   * DEFAULT = 0;
        */
    -  HILL_CLIMB(0),
    +  DEFAULT(0),
       /**
    -   * GRADIENT_DESCENT = 1;
    +   * HILL_CLIMB = 1;
        */
    -  GRADIENT_DESCENT(1),
    +  HILL_CLIMB(1),
    +  /**
    +   * GRADIENT_DESCENT = 2;
    +   */
    +  GRADIENT_DESCENT(2),
    +  /**
    +   * MAX_PARALLELISM = 3;
    +   */
    +  MAX_PARALLELISM(3),
       UNRECOGNIZED(-1),
       ;
     
       /**
    -   * HILL_CLIMB = 0;
    +   * DEFAULT = 0;
    +   */
    +  public static final int DEFAULT_VALUE = 0;
    +  /**
    +   * HILL_CLIMB = 1;
    +   */
    +  public static final int HILL_CLIMB_VALUE = 1;
    +  /**
    +   * GRADIENT_DESCENT = 2;
        */
    -  public static final int HILL_CLIMB_VALUE = 0;
    +  public static final int GRADIENT_DESCENT_VALUE = 2;
       /**
    -   * GRADIENT_DESCENT = 1;
    +   * MAX_PARALLELISM = 3;
        */
    -  public static final int GRADIENT_DESCENT_VALUE = 1;
    +  public static final int MAX_PARALLELISM_VALUE = 3;
     
     
       public final int getNumber() {
    @@ -51,8 +67,10 @@ public static AutotuneAlgorithm valueOf(int value) {
     
       public static AutotuneAlgorithm forNumber(int value) {
         switch (value) {
    -      case 0: return HILL_CLIMB;
    -      case 1: return GRADIENT_DESCENT;
    +      case 0: return DEFAULT;
    +      case 1: return HILL_CLIMB;
    +      case 2: return GRADIENT_DESCENT;
    +      case 3: return MAX_PARALLELISM;
           default: return null;
         }
       }
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java
    index 9648902d4e4..4faaa9e9e3e 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProto.java
    @@ -3901,7 +3901,7 @@ public final boolean isInitialized() {
         @java.lang.Override
         public void writeTo(com.google.protobuf.CodedOutputStream output)
                             throws java.io.IOException {
    -      if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.HILL_CLIMB.getNumber()) {
    +      if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.DEFAULT.getNumber()) {
             output.writeEnum(1, algorithm_);
           }
           if (cpuBudget_ != 0L) {
    @@ -3922,7 +3922,7 @@ public int getSerializedSize() {
           if (size != -1) return size;
     
           size = 0;
    -      if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.HILL_CLIMB.getNumber()) {
    +      if (algorithm_ != org.tensorflow.proto.data.model.AutotuneAlgorithm.DEFAULT.getNumber()) {
             size += com.google.protobuf.CodedOutputStream
               .computeEnumSize(1, algorithm_);
           }
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java
    index aa3343dfac6..15a0acab86a 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/data/model/ModelProtos.java
    @@ -78,12 +78,13 @@ public static void registerAllExtensions(
           "\n\tNodeClass\022\013\n\007UNKNOWN\020\000\022\023\n\017INTERLEAVE_M" +
           "ANY\020\001\022\031\n\025ASYNC_INTERLEAVE_MANY\020\002\022\017\n\013KNOW" +
           "N_RATIO\020\003\022\025\n\021ASYNC_KNOWN_RATIO\020\004\022\021\n\rUNKN" +
    -      "OWN_RATIO\020\005*9\n\021AutotuneAlgorithm\022\016\n\nHILL" +
    -      "_CLIMB\020\000\022\024\n\020GRADIENT_DESCENT\020\001B\201\001\n\037org.t" +
    -      "ensorflow.proto.data.modelB\013ModelProtosP" +
    -      "\001ZLgithub.com/tensorflow/tensorflow/tens" +
    -      "orflow/go/core/framework/model_go_proto\370" +
    -      "\001\001b\006proto3"
    +      "OWN_RATIO\020\005*[\n\021AutotuneAlgorithm\022\013\n\007DEFA" +
    +      "ULT\020\000\022\016\n\nHILL_CLIMB\020\001\022\024\n\020GRADIENT_DESCEN" +
    +      "T\020\002\022\023\n\017MAX_PARALLELISM\020\003B\201\001\n\037org.tensorf" +
    +      "low.proto.data.modelB\013ModelProtosP\001ZLgit" +
    +      "hub.com/tensorflow/tensorflow/tensorflow" +
    +      "/go/core/framework/model_go_proto\370\001\001b\006pr" +
    +      "oto3"
         };
         descriptor = com.google.protobuf.Descriptors.FileDescriptor
           .internalBuildGeneratedFileFrom(descriptorData,
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/CoordinationConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/CoordinationConfig.java
    new file mode 100644
    index 00000000000..be963dcf2ff
    --- /dev/null
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/CoordinationConfig.java
    @@ -0,0 +1,1387 @@
    +// Generated by the protocol buffer compiler.  DO NOT EDIT!
    +// source: tensorflow/core/protobuf/coordination_config.proto
    +
    +package org.tensorflow.proto.distruntime;
    +
    +public final class CoordinationConfig {
    +  private CoordinationConfig() {}
    +  public static void registerAllExtensions(
    +      com.google.protobuf.ExtensionRegistryLite registry) {
    +  }
    +
    +  public static void registerAllExtensions(
    +      com.google.protobuf.ExtensionRegistry registry) {
    +    registerAllExtensions(
    +        (com.google.protobuf.ExtensionRegistryLite) registry);
    +  }
    +  public interface CoordinationServiceConfigOrBuilder extends
    +      // @@protoc_insertion_point(interface_extends:tensorflow.CoordinationServiceConfig)
    +      com.google.protobuf.MessageOrBuilder {
    +
    +    /**
    +     * 
    +     * Type of coordination service implementation to enable.
    +     * For example, setting the service type as "standalone" starts a service
    +     * instance on the leader task to provide the coordination services such as
    +     * heartbeats and consistent key-value store.
    +     * 
    + * + * string service_type = 1; + */ + java.lang.String getServiceType(); + /** + *
    +     * Type of coordination service implementation to enable.
    +     * For example, setting the service type as "standalone" starts a service
    +     * instance on the leader task to provide the coordination services such as
    +     * heartbeats and consistent key-value store.
    +     * 
    + * + * string service_type = 1; + */ + com.google.protobuf.ByteString + getServiceTypeBytes(); + + /** + *
    +     * Address where the coordination service instance is hosted.
    +     * 
    + * + * string service_leader = 2; + */ + java.lang.String getServiceLeader(); + /** + *
    +     * Address where the coordination service instance is hosted.
    +     * 
    + * + * string service_leader = 2; + */ + com.google.protobuf.ByteString + getServiceLeaderBytes(); + + /** + *
    +     * Whether to enable the health check mechanism.
    +     * 
    + * + * bool enable_health_check = 3; + */ + boolean getEnableHealthCheck(); + + /** + *
    +     * Maximum wait time for all members in the cluster to be registered.
    +     * 
    + * + * int64 cluster_register_timeout_in_ms = 4; + */ + long getClusterRegisterTimeoutInMs(); + + /** + *
    +     * Heartbeat timeout, if a worker does not record heartbeat in this time
    +     * window, it will be considered disconnected.
    +     * 
    + * + * int64 heartbeat_timeout_in_ms = 5; + */ + long getHeartbeatTimeoutInMs(); + + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + java.util.List + getCoordinatedJobsList(); + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + int getCoordinatedJobsCount(); + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + java.lang.String getCoordinatedJobs(int index); + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + com.google.protobuf.ByteString + getCoordinatedJobsBytes(int index); + } + /** + *
    +   * Coordination service configuration parameters.
    +   * The system picks appropriate values for fields that are not set.
    +   * 
    + * + * Protobuf type {@code tensorflow.CoordinationServiceConfig} + */ + public static final class CoordinationServiceConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.CoordinationServiceConfig) + CoordinationServiceConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use CoordinationServiceConfig.newBuilder() to construct. + private CoordinationServiceConfig(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private CoordinationServiceConfig() { + serviceType_ = ""; + serviceLeader_ = ""; + coordinatedJobs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new CoordinationServiceConfig(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CoordinationServiceConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + serviceType_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + serviceLeader_ = s; + break; + } + case 24: { + + enableHealthCheck_ = input.readBool(); + break; + } + case 32: { + + clusterRegisterTimeoutInMs_ = input.readInt64(); + break; + } + case 40: { + + heartbeatTimeoutInMs_ = input.readInt64(); + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + coordinatedJobs_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + coordinatedJobs_.add(s); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + coordinatedJobs_ = coordinatedJobs_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.CoordinationConfig.internal_static_tensorflow_CoordinationServiceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.CoordinationConfig.internal_static_tensorflow_CoordinationServiceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.class, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder.class); + } + + public static final int SERVICE_TYPE_FIELD_NUMBER = 1; + private volatile java.lang.Object serviceType_; + /** + *
    +     * Type of coordination service implementation to enable.
    +     * For example, setting the service type as "standalone" starts a service
    +     * instance on the leader task to provide the coordination services such as
    +     * heartbeats and consistent key-value store.
    +     * 
    + * + * string service_type = 1; + */ + public java.lang.String getServiceType() { + java.lang.Object ref = serviceType_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceType_ = s; + return s; + } + } + /** + *
    +     * Type of coordination service implementation to enable.
    +     * For example, setting the service type as "standalone" starts a service
    +     * instance on the leader task to provide the coordination services such as
    +     * heartbeats and consistent key-value store.
    +     * 
    + * + * string service_type = 1; + */ + public com.google.protobuf.ByteString + getServiceTypeBytes() { + java.lang.Object ref = serviceType_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int SERVICE_LEADER_FIELD_NUMBER = 2; + private volatile java.lang.Object serviceLeader_; + /** + *
    +     * Address where the coordination service instance is hosted.
    +     * 
    + * + * string service_leader = 2; + */ + public java.lang.String getServiceLeader() { + java.lang.Object ref = serviceLeader_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceLeader_ = s; + return s; + } + } + /** + *
    +     * Address where the coordination service instance is hosted.
    +     * 
    + * + * string service_leader = 2; + */ + public com.google.protobuf.ByteString + getServiceLeaderBytes() { + java.lang.Object ref = serviceLeader_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceLeader_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int ENABLE_HEALTH_CHECK_FIELD_NUMBER = 3; + private boolean enableHealthCheck_; + /** + *
    +     * Whether to enable the health check mechanism.
    +     * 
    + * + * bool enable_health_check = 3; + */ + public boolean getEnableHealthCheck() { + return enableHealthCheck_; + } + + public static final int CLUSTER_REGISTER_TIMEOUT_IN_MS_FIELD_NUMBER = 4; + private long clusterRegisterTimeoutInMs_; + /** + *
    +     * Maximum wait time for all members in the cluster to be registered.
    +     * 
    + * + * int64 cluster_register_timeout_in_ms = 4; + */ + public long getClusterRegisterTimeoutInMs() { + return clusterRegisterTimeoutInMs_; + } + + public static final int HEARTBEAT_TIMEOUT_IN_MS_FIELD_NUMBER = 5; + private long heartbeatTimeoutInMs_; + /** + *
    +     * Heartbeat timeout, if a worker does not record heartbeat in this time
    +     * window, it will be considered disconnected.
    +     * 
    + * + * int64 heartbeat_timeout_in_ms = 5; + */ + public long getHeartbeatTimeoutInMs() { + return heartbeatTimeoutInMs_; + } + + public static final int COORDINATED_JOBS_FIELD_NUMBER = 6; + private com.google.protobuf.LazyStringList coordinatedJobs_; + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + public com.google.protobuf.ProtocolStringList + getCoordinatedJobsList() { + return coordinatedJobs_; + } + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + public int getCoordinatedJobsCount() { + return coordinatedJobs_.size(); + } + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + public java.lang.String getCoordinatedJobs(int index) { + return coordinatedJobs_.get(index); + } + /** + *
    +     * The list of jobs that partipate in the coordination service. If empty, all
    +     * jobs will be included in the coordination service by default.
    +     * 
    + * + * repeated string coordinated_jobs = 6; + */ + public com.google.protobuf.ByteString + getCoordinatedJobsBytes(int index) { + return coordinatedJobs_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getServiceTypeBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, serviceType_); + } + if (!getServiceLeaderBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, serviceLeader_); + } + if (enableHealthCheck_ != false) { + output.writeBool(3, enableHealthCheck_); + } + if (clusterRegisterTimeoutInMs_ != 0L) { + output.writeInt64(4, clusterRegisterTimeoutInMs_); + } + if (heartbeatTimeoutInMs_ != 0L) { + output.writeInt64(5, heartbeatTimeoutInMs_); + } + for (int i = 0; i < coordinatedJobs_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, coordinatedJobs_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getServiceTypeBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, serviceType_); + } + if (!getServiceLeaderBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, serviceLeader_); + } + if (enableHealthCheck_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, enableHealthCheck_); + } + if (clusterRegisterTimeoutInMs_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, clusterRegisterTimeoutInMs_); + } + if (heartbeatTimeoutInMs_ != 0L) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, heartbeatTimeoutInMs_); + } + { + int dataSize = 0; + for (int i = 0; i < coordinatedJobs_.size(); i++) { + dataSize += computeStringSizeNoTag(coordinatedJobs_.getRaw(i)); + } + size += dataSize; + size += 1 * getCoordinatedJobsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig)) { + return super.equals(obj); + } + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig other = (org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig) obj; + + if (!getServiceType() + .equals(other.getServiceType())) return false; + if (!getServiceLeader() + .equals(other.getServiceLeader())) return false; + if (getEnableHealthCheck() + != other.getEnableHealthCheck()) return false; + if (getClusterRegisterTimeoutInMs() + != other.getClusterRegisterTimeoutInMs()) return false; + if (getHeartbeatTimeoutInMs() + != other.getHeartbeatTimeoutInMs()) return false; + if (!getCoordinatedJobsList() + .equals(other.getCoordinatedJobsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERVICE_TYPE_FIELD_NUMBER; + hash = (53 * hash) + getServiceType().hashCode(); + hash = (37 * hash) + SERVICE_LEADER_FIELD_NUMBER; + hash = (53 * hash) + getServiceLeader().hashCode(); + hash = (37 * hash) + ENABLE_HEALTH_CHECK_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getEnableHealthCheck()); + hash = (37 * hash) + CLUSTER_REGISTER_TIMEOUT_IN_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getClusterRegisterTimeoutInMs()); + hash = (37 * hash) + HEARTBEAT_TIMEOUT_IN_MS_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getHeartbeatTimeoutInMs()); + if (getCoordinatedJobsCount() > 0) { + hash = (37 * hash) + COORDINATED_JOBS_FIELD_NUMBER; + hash = (53 * hash) + getCoordinatedJobsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * Coordination service configuration parameters.
    +     * The system picks appropriate values for fields that are not set.
    +     * 
    + * + * Protobuf type {@code tensorflow.CoordinationServiceConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.CoordinationServiceConfig) + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.CoordinationConfig.internal_static_tensorflow_CoordinationServiceConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.CoordinationConfig.internal_static_tensorflow_CoordinationServiceConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.class, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder.class); + } + + // Construct using org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + serviceType_ = ""; + + serviceLeader_ = ""; + + enableHealthCheck_ = false; + + clusterRegisterTimeoutInMs_ = 0L; + + heartbeatTimeoutInMs_ = 0L; + + coordinatedJobs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.distruntime.CoordinationConfig.internal_static_tensorflow_CoordinationServiceConfig_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getDefaultInstanceForType() { + return org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig build() { + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig buildPartial() { + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig result = new org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig(this); + int from_bitField0_ = bitField0_; + result.serviceType_ = serviceType_; + result.serviceLeader_ = serviceLeader_; + result.enableHealthCheck_ = enableHealthCheck_; + result.clusterRegisterTimeoutInMs_ = clusterRegisterTimeoutInMs_; + result.heartbeatTimeoutInMs_ = heartbeatTimeoutInMs_; + if (((bitField0_ & 0x00000001) != 0)) { + coordinatedJobs_ = coordinatedJobs_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.coordinatedJobs_ = coordinatedJobs_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig) { + return mergeFrom((org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig other) { + if (other == org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.getDefaultInstance()) return this; + if (!other.getServiceType().isEmpty()) { + serviceType_ = other.serviceType_; + onChanged(); + } + if (!other.getServiceLeader().isEmpty()) { + serviceLeader_ = other.serviceLeader_; + onChanged(); + } + if (other.getEnableHealthCheck() != false) { + setEnableHealthCheck(other.getEnableHealthCheck()); + } + if (other.getClusterRegisterTimeoutInMs() != 0L) { + setClusterRegisterTimeoutInMs(other.getClusterRegisterTimeoutInMs()); + } + if (other.getHeartbeatTimeoutInMs() != 0L) { + setHeartbeatTimeoutInMs(other.getHeartbeatTimeoutInMs()); + } + if (!other.coordinatedJobs_.isEmpty()) { + if (coordinatedJobs_.isEmpty()) { + coordinatedJobs_ = other.coordinatedJobs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCoordinatedJobsIsMutable(); + coordinatedJobs_.addAll(other.coordinatedJobs_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.lang.Object serviceType_ = ""; + /** + *
    +       * Type of coordination service implementation to enable.
    +       * For example, setting the service type as "standalone" starts a service
    +       * instance on the leader task to provide the coordination services such as
    +       * heartbeats and consistent key-value store.
    +       * 
    + * + * string service_type = 1; + */ + public java.lang.String getServiceType() { + java.lang.Object ref = serviceType_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +       * Type of coordination service implementation to enable.
    +       * For example, setting the service type as "standalone" starts a service
    +       * instance on the leader task to provide the coordination services such as
    +       * heartbeats and consistent key-value store.
    +       * 
    + * + * string service_type = 1; + */ + public com.google.protobuf.ByteString + getServiceTypeBytes() { + java.lang.Object ref = serviceType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +       * Type of coordination service implementation to enable.
    +       * For example, setting the service type as "standalone" starts a service
    +       * instance on the leader task to provide the coordination services such as
    +       * heartbeats and consistent key-value store.
    +       * 
    + * + * string service_type = 1; + */ + public Builder setServiceType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + serviceType_ = value; + onChanged(); + return this; + } + /** + *
    +       * Type of coordination service implementation to enable.
    +       * For example, setting the service type as "standalone" starts a service
    +       * instance on the leader task to provide the coordination services such as
    +       * heartbeats and consistent key-value store.
    +       * 
    + * + * string service_type = 1; + */ + public Builder clearServiceType() { + + serviceType_ = getDefaultInstance().getServiceType(); + onChanged(); + return this; + } + /** + *
    +       * Type of coordination service implementation to enable.
    +       * For example, setting the service type as "standalone" starts a service
    +       * instance on the leader task to provide the coordination services such as
    +       * heartbeats and consistent key-value store.
    +       * 
    + * + * string service_type = 1; + */ + public Builder setServiceTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + serviceType_ = value; + onChanged(); + return this; + } + + private java.lang.Object serviceLeader_ = ""; + /** + *
    +       * Address where the coordination service instance is hosted.
    +       * 
    + * + * string service_leader = 2; + */ + public java.lang.String getServiceLeader() { + java.lang.Object ref = serviceLeader_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + serviceLeader_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +       * Address where the coordination service instance is hosted.
    +       * 
    + * + * string service_leader = 2; + */ + public com.google.protobuf.ByteString + getServiceLeaderBytes() { + java.lang.Object ref = serviceLeader_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + serviceLeader_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +       * Address where the coordination service instance is hosted.
    +       * 
    + * + * string service_leader = 2; + */ + public Builder setServiceLeader( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + serviceLeader_ = value; + onChanged(); + return this; + } + /** + *
    +       * Address where the coordination service instance is hosted.
    +       * 
    + * + * string service_leader = 2; + */ + public Builder clearServiceLeader() { + + serviceLeader_ = getDefaultInstance().getServiceLeader(); + onChanged(); + return this; + } + /** + *
    +       * Address where the coordination service instance is hosted.
    +       * 
    + * + * string service_leader = 2; + */ + public Builder setServiceLeaderBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + serviceLeader_ = value; + onChanged(); + return this; + } + + private boolean enableHealthCheck_ ; + /** + *
    +       * Whether to enable the health check mechanism.
    +       * 
    + * + * bool enable_health_check = 3; + */ + public boolean getEnableHealthCheck() { + return enableHealthCheck_; + } + /** + *
    +       * Whether to enable the health check mechanism.
    +       * 
    + * + * bool enable_health_check = 3; + */ + public Builder setEnableHealthCheck(boolean value) { + + enableHealthCheck_ = value; + onChanged(); + return this; + } + /** + *
    +       * Whether to enable the health check mechanism.
    +       * 
    + * + * bool enable_health_check = 3; + */ + public Builder clearEnableHealthCheck() { + + enableHealthCheck_ = false; + onChanged(); + return this; + } + + private long clusterRegisterTimeoutInMs_ ; + /** + *
    +       * Maximum wait time for all members in the cluster to be registered.
    +       * 
    + * + * int64 cluster_register_timeout_in_ms = 4; + */ + public long getClusterRegisterTimeoutInMs() { + return clusterRegisterTimeoutInMs_; + } + /** + *
    +       * Maximum wait time for all members in the cluster to be registered.
    +       * 
    + * + * int64 cluster_register_timeout_in_ms = 4; + */ + public Builder setClusterRegisterTimeoutInMs(long value) { + + clusterRegisterTimeoutInMs_ = value; + onChanged(); + return this; + } + /** + *
    +       * Maximum wait time for all members in the cluster to be registered.
    +       * 
    + * + * int64 cluster_register_timeout_in_ms = 4; + */ + public Builder clearClusterRegisterTimeoutInMs() { + + clusterRegisterTimeoutInMs_ = 0L; + onChanged(); + return this; + } + + private long heartbeatTimeoutInMs_ ; + /** + *
    +       * Heartbeat timeout, if a worker does not record heartbeat in this time
    +       * window, it will be considered disconnected.
    +       * 
    + * + * int64 heartbeat_timeout_in_ms = 5; + */ + public long getHeartbeatTimeoutInMs() { + return heartbeatTimeoutInMs_; + } + /** + *
    +       * Heartbeat timeout, if a worker does not record heartbeat in this time
    +       * window, it will be considered disconnected.
    +       * 
    + * + * int64 heartbeat_timeout_in_ms = 5; + */ + public Builder setHeartbeatTimeoutInMs(long value) { + + heartbeatTimeoutInMs_ = value; + onChanged(); + return this; + } + /** + *
    +       * Heartbeat timeout, if a worker does not record heartbeat in this time
    +       * window, it will be considered disconnected.
    +       * 
    + * + * int64 heartbeat_timeout_in_ms = 5; + */ + public Builder clearHeartbeatTimeoutInMs() { + + heartbeatTimeoutInMs_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList coordinatedJobs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureCoordinatedJobsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + coordinatedJobs_ = new com.google.protobuf.LazyStringArrayList(coordinatedJobs_); + bitField0_ |= 0x00000001; + } + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public com.google.protobuf.ProtocolStringList + getCoordinatedJobsList() { + return coordinatedJobs_.getUnmodifiableView(); + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public int getCoordinatedJobsCount() { + return coordinatedJobs_.size(); + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public java.lang.String getCoordinatedJobs(int index) { + return coordinatedJobs_.get(index); + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public com.google.protobuf.ByteString + getCoordinatedJobsBytes(int index) { + return coordinatedJobs_.getByteString(index); + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public Builder setCoordinatedJobs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoordinatedJobsIsMutable(); + coordinatedJobs_.set(index, value); + onChanged(); + return this; + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public Builder addCoordinatedJobs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCoordinatedJobsIsMutable(); + coordinatedJobs_.add(value); + onChanged(); + return this; + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public Builder addAllCoordinatedJobs( + java.lang.Iterable values) { + ensureCoordinatedJobsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, coordinatedJobs_); + onChanged(); + return this; + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public Builder clearCoordinatedJobs() { + coordinatedJobs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + *
    +       * The list of jobs that partipate in the coordination service. If empty, all
    +       * jobs will be included in the coordination service by default.
    +       * 
    + * + * repeated string coordinated_jobs = 6; + */ + public Builder addCoordinatedJobsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureCoordinatedJobsIsMutable(); + coordinatedJobs_.add(value); + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.CoordinationServiceConfig) + } + + // @@protoc_insertion_point(class_scope:tensorflow.CoordinationServiceConfig) + private static final org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig(); + } + + public static org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CoordinationServiceConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CoordinationServiceConfig(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_CoordinationServiceConfig_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_CoordinationServiceConfig_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n2tensorflow/core/protobuf/coordination_" + + "config.proto\022\ntensorflow\"\311\001\n\031Coordinatio" + + "nServiceConfig\022\024\n\014service_type\030\001 \001(\t\022\026\n\016" + + "service_leader\030\002 \001(\t\022\033\n\023enable_health_ch" + + "eck\030\003 \001(\010\022&\n\036cluster_register_timeout_in" + + "_ms\030\004 \001(\003\022\037\n\027heartbeat_timeout_in_ms\030\005 \001" + + "(\003\022\030\n\020coordinated_jobs\030\006 \003(\tBy\n org.tens" + + "orflow.proto.distruntimeZUgithub.com/ten" + + "sorflow/tensorflow/tensorflow/go/core/pr" + + "otobuf/for_core_protos_go_protob\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_CoordinationServiceConfig_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_CoordinationServiceConfig_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_CoordinationServiceConfig_descriptor, + new java.lang.String[] { "ServiceType", "ServiceLeader", "EnableHealthCheck", "ClusterRegisterTimeoutInMs", "HeartbeatTimeoutInMs", "CoordinatedJobs", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/DistributedRuntimePayloads.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/DistributedRuntimePayloads.java new file mode 100644 index 00000000000..1e9db2e1b7a --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/distruntime/DistributedRuntimePayloads.java @@ -0,0 +1,1691 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/distributed_runtime_payloads.proto + +package org.tensorflow.proto.distruntime; + +public final class DistributedRuntimePayloads { + private DistributedRuntimePayloads() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + public interface GrpcPayloadContainerOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.distributed_runtime.GrpcPayloadContainer) + com.google.protobuf.MessageOrBuilder { + + /** + * map<string, bytes> payloads = 1; + */ + int getPayloadsCount(); + /** + * map<string, bytes> payloads = 1; + */ + boolean containsPayloads( + java.lang.String key); + /** + * Use {@link #getPayloadsMap()} instead. + */ + @java.lang.Deprecated + java.util.Map + getPayloads(); + /** + * map<string, bytes> payloads = 1; + */ + java.util.Map + getPayloadsMap(); + /** + * map<string, bytes> payloads = 1; + */ + + com.google.protobuf.ByteString getPayloadsOrDefault( + java.lang.String key, + com.google.protobuf.ByteString defaultValue); + /** + * map<string, bytes> payloads = 1; + */ + + com.google.protobuf.ByteString getPayloadsOrThrow( + java.lang.String key); + } + /** + *
    +   * Used to serialize and transmit tensorflow::Status payloads through
    +   * grpc::Status `error_details` since grpc::Status lacks payload API.
    +   * TODO(b/204231601): Use GRPC API once supported.
    +   * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.GrpcPayloadContainer} + */ + public static final class GrpcPayloadContainer extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.distributed_runtime.GrpcPayloadContainer) + GrpcPayloadContainerOrBuilder { + private static final long serialVersionUID = 0L; + // Use GrpcPayloadContainer.newBuilder() to construct. + private GrpcPayloadContainer(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GrpcPayloadContainer() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GrpcPayloadContainer(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GrpcPayloadContainer( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + payloads_ = com.google.protobuf.MapField.newMapField( + PayloadsDefaultEntryHolder.defaultEntry); + mutable_bitField0_ |= 0x00000001; + } + com.google.protobuf.MapEntry + payloads__ = input.readMessage( + PayloadsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); + payloads_.getMutableMap().put( + payloads__.getKey(), payloads__.getValue()); + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + @java.lang.Override + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetPayloads(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.Builder.class); + } + + public static final int PAYLOADS_FIELD_NUMBER = 1; + private static final class PayloadsDefaultEntryHolder { + static final com.google.protobuf.MapEntry< + java.lang.String, com.google.protobuf.ByteString> defaultEntry = + com.google.protobuf.MapEntry + .newDefaultInstance( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_descriptor, + com.google.protobuf.WireFormat.FieldType.STRING, + "", + com.google.protobuf.WireFormat.FieldType.BYTES, + com.google.protobuf.ByteString.EMPTY); + } + private com.google.protobuf.MapField< + java.lang.String, com.google.protobuf.ByteString> payloads_; + private com.google.protobuf.MapField + internalGetPayloads() { + if (payloads_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PayloadsDefaultEntryHolder.defaultEntry); + } + return payloads_; + } + + public int getPayloadsCount() { + return internalGetPayloads().getMap().size(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public boolean containsPayloads( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetPayloads().getMap().containsKey(key); + } + /** + * Use {@link #getPayloadsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getPayloads() { + return getPayloadsMap(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public java.util.Map getPayloadsMap() { + return internalGetPayloads().getMap(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public com.google.protobuf.ByteString getPayloadsOrDefault( + java.lang.String key, + com.google.protobuf.ByteString defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetPayloads().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * map<string, bytes> payloads = 1; + */ + + public com.google.protobuf.ByteString getPayloadsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetPayloads().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + com.google.protobuf.GeneratedMessageV3 + .serializeStringMapTo( + output, + internalGetPayloads(), + PayloadsDefaultEntryHolder.defaultEntry, + 1); + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (java.util.Map.Entry entry + : internalGetPayloads().getMap().entrySet()) { + com.google.protobuf.MapEntry + payloads__ = PayloadsDefaultEntryHolder.defaultEntry.newBuilderForType() + .setKey(entry.getKey()) + .setValue(entry.getValue()) + .build(); + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, payloads__); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer)) { + return super.equals(obj); + } + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer other = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer) obj; + + if (!internalGetPayloads().equals( + other.internalGetPayloads())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (!internalGetPayloads().getMap().isEmpty()) { + hash = (37 * hash) + PAYLOADS_FIELD_NUMBER; + hash = (53 * hash) + internalGetPayloads().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * Used to serialize and transmit tensorflow::Status payloads through
    +     * grpc::Status `error_details` since grpc::Status lacks payload API.
    +     * TODO(b/204231601): Use GRPC API once supported.
    +     * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.GrpcPayloadContainer} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.distributed_runtime.GrpcPayloadContainer) + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor; + } + + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMapField( + int number) { + switch (number) { + case 1: + return internalGetPayloads(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @SuppressWarnings({"rawtypes"}) + protected com.google.protobuf.MapField internalGetMutableMapField( + int number) { + switch (number) { + case 1: + return internalGetMutablePayloads(); + default: + throw new RuntimeException( + "Invalid map field number: " + number); + } + } + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.Builder.class); + } + + // Construct using org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + internalGetMutablePayloads().clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer getDefaultInstanceForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer build() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer buildPartial() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer result = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer(this); + int from_bitField0_ = bitField0_; + result.payloads_ = internalGetPayloads(); + result.payloads_.makeImmutable(); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer) { + return mergeFrom((org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer other) { + if (other == org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer.getDefaultInstance()) return this; + internalGetMutablePayloads().mergeFrom( + other.internalGetPayloads()); + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private com.google.protobuf.MapField< + java.lang.String, com.google.protobuf.ByteString> payloads_; + private com.google.protobuf.MapField + internalGetPayloads() { + if (payloads_ == null) { + return com.google.protobuf.MapField.emptyMapField( + PayloadsDefaultEntryHolder.defaultEntry); + } + return payloads_; + } + private com.google.protobuf.MapField + internalGetMutablePayloads() { + onChanged();; + if (payloads_ == null) { + payloads_ = com.google.protobuf.MapField.newMapField( + PayloadsDefaultEntryHolder.defaultEntry); + } + if (!payloads_.isMutable()) { + payloads_ = payloads_.copy(); + } + return payloads_; + } + + public int getPayloadsCount() { + return internalGetPayloads().getMap().size(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public boolean containsPayloads( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + return internalGetPayloads().getMap().containsKey(key); + } + /** + * Use {@link #getPayloadsMap()} instead. + */ + @java.lang.Deprecated + public java.util.Map getPayloads() { + return getPayloadsMap(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public java.util.Map getPayloadsMap() { + return internalGetPayloads().getMap(); + } + /** + * map<string, bytes> payloads = 1; + */ + + public com.google.protobuf.ByteString getPayloadsOrDefault( + java.lang.String key, + com.google.protobuf.ByteString defaultValue) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetPayloads().getMap(); + return map.containsKey(key) ? map.get(key) : defaultValue; + } + /** + * map<string, bytes> payloads = 1; + */ + + public com.google.protobuf.ByteString getPayloadsOrThrow( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + java.util.Map map = + internalGetPayloads().getMap(); + if (!map.containsKey(key)) { + throw new java.lang.IllegalArgumentException(); + } + return map.get(key); + } + + public Builder clearPayloads() { + internalGetMutablePayloads().getMutableMap() + .clear(); + return this; + } + /** + * map<string, bytes> payloads = 1; + */ + + public Builder removePayloads( + java.lang.String key) { + if (key == null) { throw new java.lang.NullPointerException(); } + internalGetMutablePayloads().getMutableMap() + .remove(key); + return this; + } + /** + * Use alternate mutation accessors instead. + */ + @java.lang.Deprecated + public java.util.Map + getMutablePayloads() { + return internalGetMutablePayloads().getMutableMap(); + } + /** + * map<string, bytes> payloads = 1; + */ + public Builder putPayloads( + java.lang.String key, + com.google.protobuf.ByteString value) { + if (key == null) { throw new java.lang.NullPointerException(); } + if (value == null) { throw new java.lang.NullPointerException(); } + internalGetMutablePayloads().getMutableMap() + .put(key, value); + return this; + } + /** + * map<string, bytes> payloads = 1; + */ + + public Builder putAllPayloads( + java.util.Map values) { + internalGetMutablePayloads().getMutableMap() + .putAll(values); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.distributed_runtime.GrpcPayloadContainer) + } + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.GrpcPayloadContainer) + private static final org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer(); + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GrpcPayloadContainer parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GrpcPayloadContainer(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadContainer getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface GrpcPayloadsLostOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.distributed_runtime.GrpcPayloadsLost) + com.google.protobuf.MessageOrBuilder { + } + /** + *
    +   * If included as a payload, this message flags the Status to have lost payloads
    +   * during the GRPC transmission.
    +   * URI: "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost"
    +   * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.GrpcPayloadsLost} + */ + public static final class GrpcPayloadsLost extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.distributed_runtime.GrpcPayloadsLost) + GrpcPayloadsLostOrBuilder { + private static final long serialVersionUID = 0L; + // Use GrpcPayloadsLost.newBuilder() to construct. + private GrpcPayloadsLost(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private GrpcPayloadsLost() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new GrpcPayloadsLost(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GrpcPayloadsLost( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.Builder.class); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost)) { + return super.equals(obj); + } + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost other = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost) obj; + + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * If included as a payload, this message flags the Status to have lost payloads
    +     * during the GRPC transmission.
    +     * URI: "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost"
    +     * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.GrpcPayloadsLost} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.distributed_runtime.GrpcPayloadsLost) + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLostOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.Builder.class); + } + + // Construct using org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost getDefaultInstanceForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost build() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost buildPartial() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost result = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost) { + return mergeFrom((org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost other) { + if (other == org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.distributed_runtime.GrpcPayloadsLost) + } + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.GrpcPayloadsLost) + private static final org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost(); + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GrpcPayloadsLost parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GrpcPayloadsLost(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.GrpcPayloadsLost getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface WorkerPossiblyRestartedOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + com.google.protobuf.MessageOrBuilder { + } + /** + *
    +   * If included as a payload, this message flags the Status to be a possible
    +   * outcome of a worker restart.
    +   * URI:
    +   * "type.googleapis.com/tensorflow.distributed_runtime.WorkerPossiblyRestarted"
    +   * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.WorkerPossiblyRestarted} + */ + public static final class WorkerPossiblyRestarted extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + WorkerPossiblyRestartedOrBuilder { + private static final long serialVersionUID = 0L; + // Use WorkerPossiblyRestarted.newBuilder() to construct. + private WorkerPossiblyRestarted(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private WorkerPossiblyRestarted() { + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new WorkerPossiblyRestarted(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WorkerPossiblyRestarted( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.Builder.class); + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted)) { + return super.equals(obj); + } + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted other = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted) obj; + + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + *
    +     * If included as a payload, this message flags the Status to be a possible
    +     * outcome of a worker restart.
    +     * URI:
    +     * "type.googleapis.com/tensorflow.distributed_runtime.WorkerPossiblyRestarted"
    +     * 
    + * + * Protobuf type {@code tensorflow.distributed_runtime.WorkerPossiblyRestarted} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestartedOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.class, org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.Builder.class); + } + + // Construct using org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted getDefaultInstanceForType() { + return org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted build() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted buildPartial() { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted result = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted) { + return mergeFrom((org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted other) { + if (other == org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted.getDefaultInstance()) return this; + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + } + + // @@protoc_insertion_point(class_scope:tensorflow.distributed_runtime.WorkerPossiblyRestarted) + private static final org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted(); + } + + public static org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WorkerPossiblyRestarted parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WorkerPossiblyRestarted(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.distruntime.DistributedRuntimePayloads.WorkerPossiblyRestarted getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n;tensorflow/core/protobuf/distributed_r" + + "untime_payloads.proto\022\036tensorflow.distri" + + "buted_runtime\"\235\001\n\024GrpcPayloadContainer\022T" + + "\n\010payloads\030\001 \003(\0132B.tensorflow.distribute" + + "d_runtime.GrpcPayloadContainer.PayloadsE" + + "ntry\032/\n\rPayloadsEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005va" + + "lue\030\002 \001(\014:\0028\001\"\022\n\020GrpcPayloadsLost\"\031\n\027Wor" + + "kerPossiblyRestartedB|\n org.tensorflow.p" + + "roto.distruntimeZUgithub.com/tensorflow/" + + "tensorflow/tensorflow/go/core/protobuf/f" + + "or_core_protos_go_proto\370\001\001b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor, + new java.lang.String[] { "Payloads", }); + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_descriptor = + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_descriptor.getNestedTypes().get(0); + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_distributed_runtime_GrpcPayloadContainer_PayloadsEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_distributed_runtime_GrpcPayloadsLost_descriptor, + new java.lang.String[] { }); + internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_distributed_runtime_WorkerPossiblyRestarted_descriptor, + new java.lang.String[] { }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java index 31be1799f01..26d02e5342b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProto.java @@ -513,34 +513,49 @@ public interface ExperimentalOrBuilder extends /** *
    -     * Distributed coordination service to be enabled if set.
    -     * Currently only effective in multi-client setup.
    +     * Whether functional control flow op lowering should be disabled. This is
    +     * useful when executing within a portable runtime where control flow op
    +     * kernels may not be loaded due to selective registration.
          * 
    * - * string coordination_service = 19; + * bool disable_functional_ops_lowering = 21; */ - java.lang.String getCoordinationService(); + boolean getDisableFunctionalOpsLowering(); + /** *
    -     * Distributed coordination service to be enabled if set.
    -     * Currently only effective in multi-client setup.
    +     * Provides a hint to XLA auto clustering to prefer forming a single large
    +     * cluster that encompases most of the graph.
          * 
    * - * string coordination_service = 19; + * bool xla_prefer_single_graph_cluster = 22; */ - com.google.protobuf.ByteString - getCoordinationServiceBytes(); + boolean getXlaPreferSingleGraphCluster(); /** *
    -     * Whether functional control flow op lowering should be disabled. This is
    -     * useful when executing within a portable runtime where control flow op
    -     * kernels may not be loaded due to selective registration.
    +     * Distributed coordination service configurations.
          * 
    * - * bool disable_functional_ops_lowering = 21; + * .tensorflow.CoordinationServiceConfig coordination_config = 23; */ - boolean getDisableFunctionalOpsLowering(); + boolean hasCoordinationConfig(); + /** + *
    +     * Distributed coordination service configurations.
    +     * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getCoordinationConfig(); + /** + *
    +     * Distributed coordination service configurations.
    +     * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder getCoordinationConfigOrBuilder(); } /** *
    @@ -564,7 +579,6 @@ private Experimental() {
           collectiveGroupLeader_ = "";
           executorType_ = "";
           mlirBridgeRollout_ = 0;
    -      coordinationService_ = "";
         }
     
         @java.lang.Override
    @@ -693,15 +707,27 @@ private Experimental(
                   useTfrt_ = input.readBool();
                   break;
                 }
    -            case 154: {
    -              java.lang.String s = input.readStringRequireUtf8();
    +            case 168: {
     
    -              coordinationService_ = s;
    +              disableFunctionalOpsLowering_ = input.readBool();
                   break;
                 }
    -            case 168: {
    +            case 176: {
    +
    +              xlaPreferSingleGraphCluster_ = input.readBool();
    +              break;
    +            }
    +            case 186: {
    +              org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder subBuilder = null;
    +              if (coordinationConfig_ != null) {
    +                subBuilder = coordinationConfig_.toBuilder();
    +              }
    +              coordinationConfig_ = input.readMessage(org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.parser(), extensionRegistry);
    +              if (subBuilder != null) {
    +                subBuilder.mergeFrom(coordinationConfig_);
    +                coordinationConfig_ = subBuilder.buildPartial();
    +              }
     
    -              disableFunctionalOpsLowering_ = input.readBool();
                   break;
                 }
                 default: {
    @@ -1299,63 +1325,66 @@ public boolean getUseTfrt() {
           return useTfrt_;
         }
     
    -    public static final int COORDINATION_SERVICE_FIELD_NUMBER = 19;
    -    private volatile java.lang.Object coordinationService_;
    +    public static final int DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER = 21;
    +    private boolean disableFunctionalOpsLowering_;
         /**
          * 
    -     * Distributed coordination service to be enabled if set.
    -     * Currently only effective in multi-client setup.
    +     * Whether functional control flow op lowering should be disabled. This is
    +     * useful when executing within a portable runtime where control flow op
    +     * kernels may not be loaded due to selective registration.
          * 
    * - * string coordination_service = 19; + * bool disable_functional_ops_lowering = 21; */ - public java.lang.String getCoordinationService() { - java.lang.Object ref = coordinationService_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - coordinationService_ = s; - return s; - } + public boolean getDisableFunctionalOpsLowering() { + return disableFunctionalOpsLowering_; } + + public static final int XLA_PREFER_SINGLE_GRAPH_CLUSTER_FIELD_NUMBER = 22; + private boolean xlaPreferSingleGraphCluster_; /** *
    -     * Distributed coordination service to be enabled if set.
    -     * Currently only effective in multi-client setup.
    +     * Provides a hint to XLA auto clustering to prefer forming a single large
    +     * cluster that encompases most of the graph.
          * 
    * - * string coordination_service = 19; + * bool xla_prefer_single_graph_cluster = 22; */ - public com.google.protobuf.ByteString - getCoordinationServiceBytes() { - java.lang.Object ref = coordinationService_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - coordinationService_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public boolean getXlaPreferSingleGraphCluster() { + return xlaPreferSingleGraphCluster_; } - public static final int DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER = 21; - private boolean disableFunctionalOpsLowering_; + public static final int COORDINATION_CONFIG_FIELD_NUMBER = 23; + private org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig coordinationConfig_; /** *
    -     * Whether functional control flow op lowering should be disabled. This is
    -     * useful when executing within a portable runtime where control flow op
    -     * kernels may not be loaded due to selective registration.
    +     * Distributed coordination service configurations.
          * 
    * - * bool disable_functional_ops_lowering = 21; + * .tensorflow.CoordinationServiceConfig coordination_config = 23; */ - public boolean getDisableFunctionalOpsLowering() { - return disableFunctionalOpsLowering_; + public boolean hasCoordinationConfig() { + return coordinationConfig_ != null; + } + /** + *
    +     * Distributed coordination service configurations.
    +     * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getCoordinationConfig() { + return coordinationConfig_ == null ? org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.getDefaultInstance() : coordinationConfig_; + } + /** + *
    +     * Distributed coordination service configurations.
    +     * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder getCoordinationConfigOrBuilder() { + return getCoordinationConfig(); } private byte memoizedIsInitialized = -1; @@ -1423,12 +1452,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (useTfrt_ != false) { output.writeBool(18, useTfrt_); } - if (!getCoordinationServiceBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 19, coordinationService_); - } if (disableFunctionalOpsLowering_ != false) { output.writeBool(21, disableFunctionalOpsLowering_); } + if (xlaPreferSingleGraphCluster_ != false) { + output.writeBool(22, xlaPreferSingleGraphCluster_); + } + if (coordinationConfig_ != null) { + output.writeMessage(23, getCoordinationConfig()); + } unknownFields.writeTo(output); } @@ -1504,13 +1536,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBoolSize(18, useTfrt_); } - if (!getCoordinationServiceBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(19, coordinationService_); - } if (disableFunctionalOpsLowering_ != false) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(21, disableFunctionalOpsLowering_); } + if (xlaPreferSingleGraphCluster_ != false) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(22, xlaPreferSingleGraphCluster_); + } + if (coordinationConfig_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(23, getCoordinationConfig()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -1562,10 +1599,15 @@ public boolean equals(final java.lang.Object obj) { != other.getXlaFusionAutotunerThresh()) return false; if (getUseTfrt() != other.getUseTfrt()) return false; - if (!getCoordinationService() - .equals(other.getCoordinationService())) return false; if (getDisableFunctionalOpsLowering() != other.getDisableFunctionalOpsLowering()) return false; + if (getXlaPreferSingleGraphCluster() + != other.getXlaPreferSingleGraphCluster()) return false; + if (hasCoordinationConfig() != other.hasCoordinationConfig()) return false; + if (hasCoordinationConfig()) { + if (!getCoordinationConfig() + .equals(other.getCoordinationConfig())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -1625,11 +1667,16 @@ public int hashCode() { hash = (37 * hash) + USE_TFRT_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getUseTfrt()); - hash = (37 * hash) + COORDINATION_SERVICE_FIELD_NUMBER; - hash = (53 * hash) + getCoordinationService().hashCode(); hash = (37 * hash) + DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getDisableFunctionalOpsLowering()); + hash = (37 * hash) + XLA_PREFER_SINGLE_GRAPH_CLUSTER_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( + getXlaPreferSingleGraphCluster()); + if (hasCoordinationConfig()) { + hash = (37 * hash) + COORDINATION_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getCoordinationConfig().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -1807,10 +1854,16 @@ public Builder clear() { useTfrt_ = false; - coordinationService_ = ""; - disableFunctionalOpsLowering_ = false; + xlaPreferSingleGraphCluster_ = false; + + if (coordinationConfigBuilder_ == null) { + coordinationConfig_ = null; + } else { + coordinationConfig_ = null; + coordinationConfigBuilder_ = null; + } return this; } @@ -1858,8 +1911,13 @@ public org.tensorflow.proto.framework.ConfigProto.Experimental buildPartial() { result.disableOutputPartitionGraphs_ = disableOutputPartitionGraphs_; result.xlaFusionAutotunerThresh_ = xlaFusionAutotunerThresh_; result.useTfrt_ = useTfrt_; - result.coordinationService_ = coordinationService_; result.disableFunctionalOpsLowering_ = disableFunctionalOpsLowering_; + result.xlaPreferSingleGraphCluster_ = xlaPreferSingleGraphCluster_; + if (coordinationConfigBuilder_ == null) { + result.coordinationConfig_ = coordinationConfig_; + } else { + result.coordinationConfig_ = coordinationConfigBuilder_.build(); + } onBuilt(); return result; } @@ -1961,13 +2019,15 @@ public Builder mergeFrom(org.tensorflow.proto.framework.ConfigProto.Experimental if (other.getUseTfrt() != false) { setUseTfrt(other.getUseTfrt()); } - if (!other.getCoordinationService().isEmpty()) { - coordinationService_ = other.coordinationService_; - onChanged(); - } if (other.getDisableFunctionalOpsLowering() != false) { setDisableFunctionalOpsLowering(other.getDisableFunctionalOpsLowering()); } + if (other.getXlaPreferSingleGraphCluster() != false) { + setXlaPreferSingleGraphCluster(other.getXlaPreferSingleGraphCluster()); + } + if (other.hasCoordinationConfig()) { + mergeCoordinationConfig(other.getCoordinationConfig()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -3079,142 +3139,242 @@ public Builder clearUseTfrt() { return this; } - private java.lang.Object coordinationService_ = ""; + private boolean disableFunctionalOpsLowering_ ; /** *
    -       * Distributed coordination service to be enabled if set.
    -       * Currently only effective in multi-client setup.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * string coordination_service = 19; + * bool disable_functional_ops_lowering = 21; */ - public java.lang.String getCoordinationService() { - java.lang.Object ref = coordinationService_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - coordinationService_ = s; - return s; - } else { - return (java.lang.String) ref; - } + public boolean getDisableFunctionalOpsLowering() { + return disableFunctionalOpsLowering_; } /** *
    -       * Distributed coordination service to be enabled if set.
    -       * Currently only effective in multi-client setup.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * string coordination_service = 19; + * bool disable_functional_ops_lowering = 21; */ - public com.google.protobuf.ByteString - getCoordinationServiceBytes() { - java.lang.Object ref = coordinationService_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - coordinationService_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public Builder setDisableFunctionalOpsLowering(boolean value) { + + disableFunctionalOpsLowering_ = value; + onChanged(); + return this; } /** *
    -       * Distributed coordination service to be enabled if set.
    -       * Currently only effective in multi-client setup.
    +       * Whether functional control flow op lowering should be disabled. This is
    +       * useful when executing within a portable runtime where control flow op
    +       * kernels may not be loaded due to selective registration.
            * 
    * - * string coordination_service = 19; + * bool disable_functional_ops_lowering = 21; */ - public Builder setCoordinationService( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - coordinationService_ = value; + public Builder clearDisableFunctionalOpsLowering() { + + disableFunctionalOpsLowering_ = false; onChanged(); return this; } + + private boolean xlaPreferSingleGraphCluster_ ; /** *
    -       * Distributed coordination service to be enabled if set.
    -       * Currently only effective in multi-client setup.
    +       * Provides a hint to XLA auto clustering to prefer forming a single large
    +       * cluster that encompases most of the graph.
            * 
    * - * string coordination_service = 19; + * bool xla_prefer_single_graph_cluster = 22; */ - public Builder clearCoordinationService() { + public boolean getXlaPreferSingleGraphCluster() { + return xlaPreferSingleGraphCluster_; + } + /** + *
    +       * Provides a hint to XLA auto clustering to prefer forming a single large
    +       * cluster that encompases most of the graph.
    +       * 
    + * + * bool xla_prefer_single_graph_cluster = 22; + */ + public Builder setXlaPreferSingleGraphCluster(boolean value) { - coordinationService_ = getDefaultInstance().getCoordinationService(); + xlaPreferSingleGraphCluster_ = value; onChanged(); return this; } /** *
    -       * Distributed coordination service to be enabled if set.
    -       * Currently only effective in multi-client setup.
    +       * Provides a hint to XLA auto clustering to prefer forming a single large
    +       * cluster that encompases most of the graph.
            * 
    * - * string coordination_service = 19; + * bool xla_prefer_single_graph_cluster = 22; */ - public Builder setCoordinationServiceBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); + public Builder clearXlaPreferSingleGraphCluster() { - coordinationService_ = value; + xlaPreferSingleGraphCluster_ = false; onChanged(); return this; } - private boolean disableFunctionalOpsLowering_ ; + private org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig coordinationConfig_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder> coordinationConfigBuilder_; /** *
    -       * Whether functional control flow op lowering should be disabled. This is
    -       * useful when executing within a portable runtime where control flow op
    -       * kernels may not be loaded due to selective registration.
    +       * Distributed coordination service configurations.
            * 
    * - * bool disable_functional_ops_lowering = 21; + * .tensorflow.CoordinationServiceConfig coordination_config = 23; */ - public boolean getDisableFunctionalOpsLowering() { - return disableFunctionalOpsLowering_; + public boolean hasCoordinationConfig() { + return coordinationConfigBuilder_ != null || coordinationConfig_ != null; } /** *
    -       * Whether functional control flow op lowering should be disabled. This is
    -       * useful when executing within a portable runtime where control flow op
    -       * kernels may not be loaded due to selective registration.
    +       * Distributed coordination service configurations.
            * 
    * - * bool disable_functional_ops_lowering = 21; + * .tensorflow.CoordinationServiceConfig coordination_config = 23; */ - public Builder setDisableFunctionalOpsLowering(boolean value) { - - disableFunctionalOpsLowering_ = value; - onChanged(); + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig getCoordinationConfig() { + if (coordinationConfigBuilder_ == null) { + return coordinationConfig_ == null ? org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.getDefaultInstance() : coordinationConfig_; + } else { + return coordinationConfigBuilder_.getMessage(); + } + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public Builder setCoordinationConfig(org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig value) { + if (coordinationConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + coordinationConfig_ = value; + onChanged(); + } else { + coordinationConfigBuilder_.setMessage(value); + } + return this; } /** *
    -       * Whether functional control flow op lowering should be disabled. This is
    -       * useful when executing within a portable runtime where control flow op
    -       * kernels may not be loaded due to selective registration.
    +       * Distributed coordination service configurations.
            * 
    * - * bool disable_functional_ops_lowering = 21; + * .tensorflow.CoordinationServiceConfig coordination_config = 23; */ - public Builder clearDisableFunctionalOpsLowering() { + public Builder setCoordinationConfig( + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder builderForValue) { + if (coordinationConfigBuilder_ == null) { + coordinationConfig_ = builderForValue.build(); + onChanged(); + } else { + coordinationConfigBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public Builder mergeCoordinationConfig(org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig value) { + if (coordinationConfigBuilder_ == null) { + if (coordinationConfig_ != null) { + coordinationConfig_ = + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.newBuilder(coordinationConfig_).mergeFrom(value).buildPartial(); + } else { + coordinationConfig_ = value; + } + onChanged(); + } else { + coordinationConfigBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public Builder clearCoordinationConfig() { + if (coordinationConfigBuilder_ == null) { + coordinationConfig_ = null; + onChanged(); + } else { + coordinationConfig_ = null; + coordinationConfigBuilder_ = null; + } + + return this; + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder getCoordinationConfigBuilder() { - disableFunctionalOpsLowering_ = false; onChanged(); - return this; + return getCoordinationConfigFieldBuilder().getBuilder(); + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + public org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder getCoordinationConfigOrBuilder() { + if (coordinationConfigBuilder_ != null) { + return coordinationConfigBuilder_.getMessageOrBuilder(); + } else { + return coordinationConfig_ == null ? + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.getDefaultInstance() : coordinationConfig_; + } + } + /** + *
    +       * Distributed coordination service configurations.
    +       * 
    + * + * .tensorflow.CoordinationServiceConfig coordination_config = 23; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder> + getCoordinationConfigFieldBuilder() { + if (coordinationConfigBuilder_ == null) { + coordinationConfigBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfig.Builder, org.tensorflow.proto.distruntime.CoordinationConfig.CoordinationServiceConfigOrBuilder>( + getCoordinationConfig(), + getParentForChildren(), + isClean()); + coordinationConfig_ = null; + } + return coordinationConfigBuilder_; } @java.lang.Override public final Builder setUnknownFields( diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java index 9ddfa2e80ff..8ad97e1fb9c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/ConfigProtos.java @@ -128,142 +128,145 @@ public static void registerAllExtensions( "ost_graph.proto\032%tensorflow/core/framewo" + "rk/graph.proto\032*tensorflow/core/framewor" + "k/step_stats.proto\032&tensorflow/core/prot" + - "obuf/cluster.proto\032$tensorflow/core/prot" + - "obuf/debug.proto\032.tensorflow/core/protob" + - "uf/rewriter_config.proto\"\221\006\n\nGPUOptions\022" + - "\'\n\037per_process_gpu_memory_fraction\030\001 \001(\001" + - "\022\024\n\014allow_growth\030\004 \001(\010\022\026\n\016allocator_type" + - "\030\002 \001(\t\022\037\n\027deferred_deletion_bytes\030\003 \001(\003\022" + - "\033\n\023visible_device_list\030\005 \001(\t\022\"\n\032polling_" + - "active_delay_usecs\030\006 \001(\005\022$\n\034polling_inac" + - "tive_delay_msecs\030\007 \001(\005\022\034\n\024force_gpu_comp" + - "atible\030\010 \001(\010\0229\n\014experimental\030\t \001(\0132#.ten" + - "sorflow.GPUOptions.Experimental\032\312\003\n\014Expe" + - "rimental\022K\n\017virtual_devices\030\001 \003(\01322.tens" + - "orflow.GPUOptions.Experimental.VirtualDe" + - "vices\022\032\n\022use_unified_memory\030\002 \001(\010\022#\n\033num" + - "_dev_to_dev_copy_streams\030\003 \001(\005\022\035\n\025collec" + - "tive_ring_order\030\004 \001(\t\022\035\n\025timestamped_all" + - "ocator\030\005 \001(\010\022#\n\033kernel_tracker_max_inter" + - "val\030\007 \001(\005\022 \n\030kernel_tracker_max_bytes\030\010 " + - "\001(\005\022\"\n\032kernel_tracker_max_pending\030\t \001(\005\022" + - "\'\n\037internal_fragmentation_fraction\030\n \001(\001" + - "\022\035\n\025use_cuda_malloc_async\030\013 \001(\010\032;\n\016Virtu" + - "alDevices\022\027\n\017memory_limit_mb\030\001 \003(\002\022\020\n\010pr" + - "iority\030\002 \003(\005\"\235\003\n\020OptimizerOptions\022+\n#do_" + - "common_subexpression_elimination\030\001 \001(\010\022\033" + - "\n\023do_constant_folding\030\002 \001(\010\022$\n\034max_folde" + - "d_constant_in_bytes\030\006 \001(\003\022\034\n\024do_function" + - "_inlining\030\004 \001(\010\0225\n\topt_level\030\003 \001(\0162\".ten" + - "sorflow.OptimizerOptions.Level\022E\n\020global" + - "_jit_level\030\005 \001(\0162+.tensorflow.OptimizerO" + - "ptions.GlobalJitLevel\022\026\n\016cpu_global_jit\030" + - "\007 \001(\010\" \n\005Level\022\006\n\002L1\020\000\022\017\n\002L0\020\377\377\377\377\377\377\377\377\377\001\"" + - "C\n\016GlobalJitLevel\022\013\n\007DEFAULT\020\000\022\020\n\003OFF\020\377\377" + - "\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022\010\n\004ON_2\020\002\"\356\002\n\014GraphOp" + - "tions\022\036\n\026enable_recv_scheduling\030\002 \001(\010\0227\n" + - "\021optimizer_options\030\003 \001(\0132\034.tensorflow.Op" + - "timizerOptions\022\030\n\020build_cost_model\030\004 \001(\003" + - "\022\036\n\026build_cost_model_after\030\t \001(\003\022\024\n\014infe" + - "r_shapes\030\005 \001(\010\022\032\n\022place_pruned_graph\030\006 \001" + - "(\010\022 \n\030enable_bfloat16_sendrecv\030\007 \001(\010\022\025\n\r" + - "timeline_step\030\010 \001(\005\0223\n\017rewrite_options\030\n" + - " \001(\0132\032.tensorflow.RewriterConfigJ\004\010\001\020\002R%" + - "skip_common_subexpression_elimination\"A\n" + - "\025ThreadPoolOptionProto\022\023\n\013num_threads\030\001 " + - "\001(\005\022\023\n\013global_name\030\002 \001(\t\"\325\001\n\nRPCOptions\022" + - "$\n\034use_rpc_for_inprocess_master\030\001 \001(\010\022\035\n" + - "\025compression_algorithm\030\002 \001(\t\022\031\n\021compress" + - "ion_level\030\003 \001(\005\022\032\n\022cache_rpc_response\030\004 " + - "\001(\010\022*\n\"disable_session_connection_sharin" + - "g\030\005 \001(\010\022\037\n\027num_channels_per_target\030\006 \001(\005" + - "\"0\n\017SessionMetadata\022\014\n\004name\030\001 \001(\t\022\017\n\007ver" + - "sion\030\002 \001(\003\"\331\r\n\013ConfigProto\022>\n\014device_cou" + - "nt\030\001 \003(\0132(.tensorflow.ConfigProto.Device" + - "CountEntry\022$\n\034intra_op_parallelism_threa" + - "ds\030\002 \001(\005\022$\n\034inter_op_parallelism_threads" + - "\030\005 \001(\005\022\037\n\027use_per_session_threads\030\t \001(\010\022" + - "G\n\034session_inter_op_thread_pool\030\014 \003(\0132!." + - "tensorflow.ThreadPoolOptionProto\022\030\n\020plac" + - "ement_period\030\003 \001(\005\022\026\n\016device_filters\030\004 \003" + - "(\t\022+\n\013gpu_options\030\006 \001(\0132\026.tensorflow.GPU" + - "Options\022\034\n\024allow_soft_placement\030\007 \001(\010\022\034\n" + - "\024log_device_placement\030\010 \001(\010\022/\n\rgraph_opt" + - "ions\030\n \001(\0132\030.tensorflow.GraphOptions\022\037\n\027" + - "operation_timeout_in_ms\030\013 \001(\003\022+\n\013rpc_opt" + - "ions\030\r \001(\0132\026.tensorflow.RPCOptions\022+\n\013cl" + - "uster_def\030\016 \001(\0132\026.tensorflow.ClusterDef\022" + - "\035\n\025isolate_session_state\030\017 \001(\010\022(\n share_" + - "cluster_devices_in_session\030\021 \001(\010\022:\n\014expe" + - "rimental\030\020 \001(\0132$.tensorflow.ConfigProto." + - "Experimental\0322\n\020DeviceCountEntry\022\013\n\003key\030" + - "\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001\032\323\007\n\014Experimenta" + - "l\022\037\n\027collective_group_leader\030\001 \001(\t\022\025\n\rex" + - "ecutor_type\030\003 \001(\t\022\032\n\022recv_buf_max_chunk\030" + - "\004 \001(\005\022\031\n\021use_numa_affinity\030\005 \001(\010\0225\n-coll" + - "ective_deterministic_sequential_executio" + - "n\030\006 \001(\010\022\027\n\017collective_nccl\030\007 \001(\010\0226\n.shar" + - "e_session_state_in_clusterspec_propagati" + - "on\030\010 \001(\010\022\037\n\027disable_thread_spinning\030\t \001(" + - "\010\022(\n share_cluster_devices_in_session\030\n " + - "\001(\010\0225\n\020session_metadata\030\013 \001(\0132\033.tensorfl" + - "ow.SessionMetadata\022!\n\031optimize_for_stati" + - "c_graph\030\014 \001(\010\022\032\n\022enable_mlir_bridge\030\r \001(" + - "\010\022S\n\023mlir_bridge_rollout\030\021 \001(\01626.tensorf" + - "low.ConfigProto.Experimental.MlirBridgeR" + - "ollout\022&\n\036enable_mlir_graph_optimization" + - "\030\020 \001(\010\022\'\n\037disable_output_partition_graph" + - "s\030\016 \001(\010\022#\n\033xla_fusion_autotuner_thresh\030\017" + - " \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\034\n\024coordination_s" + - "ervice\030\023 \001(\t\022\'\n\037disable_functional_ops_l" + - "owering\030\025 \001(\010\"\332\001\n\021MlirBridgeRollout\022#\n\037M" + - "LIR_BRIDGE_ROLLOUT_UNSPECIFIED\020\000\022\037\n\033MLIR" + - "_BRIDGE_ROLLOUT_ENABLED\020\001\022 \n\034MLIR_BRIDGE" + - "_ROLLOUT_DISABLED\020\002\022)\n%MLIR_BRIDGE_ROLLO" + - "UT_SAFE_MODE_ENABLED\020\003\0222\n.MLIR_BRIDGE_RO" + - "LLOUT_SAFE_MODE_FALLBACK_ENABLED\020\004J\004\010\002\020\003" + - "J\004\010\024\020\025\"\341\004\n\nRunOptions\0226\n\013trace_level\030\001 \001" + - "(\0162!.tensorflow.RunOptions.TraceLevel\022\025\n" + - "\rtimeout_in_ms\030\002 \001(\003\022\034\n\024inter_op_thread_" + - "pool\030\003 \001(\005\022\037\n\027output_partition_graphs\030\005 " + - "\001(\010\022/\n\rdebug_options\030\006 \001(\0132\030.tensorflow." + - "DebugOptions\022*\n\"report_tensor_allocation" + - "s_upon_oom\030\007 \001(\010\0229\n\014experimental\030\010 \001(\0132#" + - ".tensorflow.RunOptions.Experimental\032\322\001\n\014" + - "Experimental\022\034\n\024collective_graph_key\030\001 \001" + - "(\003\022\034\n\024use_run_handler_pool\030\002 \001(\010\022[\n\030run_" + - "handler_pool_options\030\003 \001(\01329.tensorflow." + - "RunOptions.Experimental.RunHandlerPoolOp" + - "tions\032)\n\025RunHandlerPoolOptions\022\020\n\010priori" + - "ty\030\001 \001(\003\"R\n\nTraceLevel\022\014\n\010NO_TRACE\020\000\022\022\n\016" + - "SOFTWARE_TRACE\020\001\022\022\n\016HARDWARE_TRACE\020\002\022\016\n\n" + - "FULL_TRACE\020\003J\004\010\004\020\005\"\207\003\n\013RunMetadata\022)\n\nst" + - "ep_stats\030\001 \001(\0132\025.tensorflow.StepStats\022,\n" + - "\ncost_graph\030\002 \001(\0132\030.tensorflow.CostGraph" + - "Def\022.\n\020partition_graphs\030\003 \003(\0132\024.tensorfl" + - "ow.GraphDef\022?\n\017function_graphs\030\004 \003(\0132&.t" + - "ensorflow.RunMetadata.FunctionGraphs\032\255\001\n" + - "\016FunctionGraphs\022.\n\020partition_graphs\030\001 \003(" + - "\0132\024.tensorflow.GraphDef\0224\n\026pre_optimizat" + - "ion_graph\030\002 \001(\0132\024.tensorflow.GraphDef\0225\n" + - "\027post_optimization_graph\030\003 \001(\0132\024.tensorf" + - "low.GraphDef\":\n\020TensorConnection\022\023\n\013from" + - "_tensor\030\001 \001(\t\022\021\n\tto_tensor\030\002 \001(\t\"\260\003\n\017Cal" + - "lableOptions\022\014\n\004feed\030\001 \003(\t\022\r\n\005fetch\030\002 \003(" + - "\t\022\016\n\006target\030\003 \003(\t\022+\n\013run_options\030\004 \001(\0132\026" + - ".tensorflow.RunOptions\0227\n\021tensor_connect" + - "ion\030\005 \003(\0132\034.tensorflow.TensorConnection\022" + - "B\n\014feed_devices\030\006 \003(\0132,.tensorflow.Calla" + - "bleOptions.FeedDevicesEntry\022D\n\rfetch_dev" + - "ices\030\007 \003(\0132-.tensorflow.CallableOptions." + - "FetchDevicesEntry\022\027\n\017fetch_skip_sync\030\010 \001" + - "(\010\0322\n\020FeedDevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005v" + - "alue\030\002 \001(\t:\0028\001\0323\n\021FetchDevicesEntry\022\013\n\003k" + - "ey\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001B\212\001\n\036org.tens" + - "orflow.proto.frameworkB\014ConfigProtosP\001ZU" + - "github.com/tensorflow/tensorflow/tensorf" + - "low/go/core/protobuf/for_core_protos_go_" + - "proto\370\001\001b\006proto3" + "obuf/cluster.proto\0322tensorflow/core/prot" + + "obuf/coordination_config.proto\032$tensorfl" + + "ow/core/protobuf/debug.proto\032.tensorflow" + + "/core/protobuf/rewriter_config.proto\"\221\006\n" + + "\nGPUOptions\022\'\n\037per_process_gpu_memory_fr" + + "action\030\001 \001(\001\022\024\n\014allow_growth\030\004 \001(\010\022\026\n\016al" + + "locator_type\030\002 \001(\t\022\037\n\027deferred_deletion_" + + "bytes\030\003 \001(\003\022\033\n\023visible_device_list\030\005 \001(\t" + + "\022\"\n\032polling_active_delay_usecs\030\006 \001(\005\022$\n\034" + + "polling_inactive_delay_msecs\030\007 \001(\005\022\034\n\024fo" + + "rce_gpu_compatible\030\010 \001(\010\0229\n\014experimental" + + "\030\t \001(\0132#.tensorflow.GPUOptions.Experimen" + + "tal\032\312\003\n\014Experimental\022K\n\017virtual_devices\030" + + "\001 \003(\01322.tensorflow.GPUOptions.Experiment" + + "al.VirtualDevices\022\032\n\022use_unified_memory\030" + + "\002 \001(\010\022#\n\033num_dev_to_dev_copy_streams\030\003 \001" + + "(\005\022\035\n\025collective_ring_order\030\004 \001(\t\022\035\n\025tim" + + "estamped_allocator\030\005 \001(\010\022#\n\033kernel_track" + + "er_max_interval\030\007 \001(\005\022 \n\030kernel_tracker_" + + "max_bytes\030\010 \001(\005\022\"\n\032kernel_tracker_max_pe" + + "nding\030\t \001(\005\022\'\n\037internal_fragmentation_fr" + + "action\030\n \001(\001\022\035\n\025use_cuda_malloc_async\030\013 " + + "\001(\010\032;\n\016VirtualDevices\022\027\n\017memory_limit_mb" + + "\030\001 \003(\002\022\020\n\010priority\030\002 \003(\005\"\235\003\n\020OptimizerOp" + + "tions\022+\n#do_common_subexpression_elimina" + + "tion\030\001 \001(\010\022\033\n\023do_constant_folding\030\002 \001(\010\022" + + "$\n\034max_folded_constant_in_bytes\030\006 \001(\003\022\034\n" + + "\024do_function_inlining\030\004 \001(\010\0225\n\topt_level" + + "\030\003 \001(\0162\".tensorflow.OptimizerOptions.Lev" + + "el\022E\n\020global_jit_level\030\005 \001(\0162+.tensorflo" + + "w.OptimizerOptions.GlobalJitLevel\022\026\n\016cpu" + + "_global_jit\030\007 \001(\010\" \n\005Level\022\006\n\002L1\020\000\022\017\n\002L0" + + "\020\377\377\377\377\377\377\377\377\377\001\"C\n\016GlobalJitLevel\022\013\n\007DEFAULT" + + "\020\000\022\020\n\003OFF\020\377\377\377\377\377\377\377\377\377\001\022\010\n\004ON_1\020\001\022\010\n\004ON_2\020\002" + + "\"\356\002\n\014GraphOptions\022\036\n\026enable_recv_schedul" + + "ing\030\002 \001(\010\0227\n\021optimizer_options\030\003 \001(\0132\034.t" + + "ensorflow.OptimizerOptions\022\030\n\020build_cost" + + "_model\030\004 \001(\003\022\036\n\026build_cost_model_after\030\t" + + " \001(\003\022\024\n\014infer_shapes\030\005 \001(\010\022\032\n\022place_prun" + + "ed_graph\030\006 \001(\010\022 \n\030enable_bfloat16_sendre" + + "cv\030\007 \001(\010\022\025\n\rtimeline_step\030\010 \001(\005\0223\n\017rewri" + + "te_options\030\n \001(\0132\032.tensorflow.RewriterCo" + + "nfigJ\004\010\001\020\002R%skip_common_subexpression_el" + + "imination\"A\n\025ThreadPoolOptionProto\022\023\n\013nu" + + "m_threads\030\001 \001(\005\022\023\n\013global_name\030\002 \001(\t\"\325\001\n" + + "\nRPCOptions\022$\n\034use_rpc_for_inprocess_mas" + + "ter\030\001 \001(\010\022\035\n\025compression_algorithm\030\002 \001(\t" + + "\022\031\n\021compression_level\030\003 \001(\005\022\032\n\022cache_rpc" + + "_response\030\004 \001(\010\022*\n\"disable_session_conne" + + "ction_sharing\030\005 \001(\010\022\037\n\027num_channels_per_" + + "target\030\006 \001(\005\"0\n\017SessionMetadata\022\014\n\004name\030" + + "\001 \001(\t\022\017\n\007version\030\002 \001(\003\"\256\016\n\013ConfigProto\022>" + + "\n\014device_count\030\001 \003(\0132(.tensorflow.Config" + + "Proto.DeviceCountEntry\022$\n\034intra_op_paral" + + "lelism_threads\030\002 \001(\005\022$\n\034inter_op_paralle" + + "lism_threads\030\005 \001(\005\022\037\n\027use_per_session_th" + + "reads\030\t \001(\010\022G\n\034session_inter_op_thread_p" + + "ool\030\014 \003(\0132!.tensorflow.ThreadPoolOptionP" + + "roto\022\030\n\020placement_period\030\003 \001(\005\022\026\n\016device" + + "_filters\030\004 \003(\t\022+\n\013gpu_options\030\006 \001(\0132\026.te" + + "nsorflow.GPUOptions\022\034\n\024allow_soft_placem" + + "ent\030\007 \001(\010\022\034\n\024log_device_placement\030\010 \001(\010\022" + + "/\n\rgraph_options\030\n \001(\0132\030.tensorflow.Grap" + + "hOptions\022\037\n\027operation_timeout_in_ms\030\013 \001(" + + "\003\022+\n\013rpc_options\030\r \001(\0132\026.tensorflow.RPCO" + + "ptions\022+\n\013cluster_def\030\016 \001(\0132\026.tensorflow" + + ".ClusterDef\022\035\n\025isolate_session_state\030\017 \001" + + "(\010\022(\n share_cluster_devices_in_session\030\021" + + " \001(\010\022:\n\014experimental\030\020 \001(\0132$.tensorflow." + + "ConfigProto.Experimental\0322\n\020DeviceCountE" + + "ntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\005:\0028\001\032\250\010\n" + + "\014Experimental\022\037\n\027collective_group_leader" + + "\030\001 \001(\t\022\025\n\rexecutor_type\030\003 \001(\t\022\032\n\022recv_bu" + + "f_max_chunk\030\004 \001(\005\022\031\n\021use_numa_affinity\030\005" + + " \001(\010\0225\n-collective_deterministic_sequent" + + "ial_execution\030\006 \001(\010\022\027\n\017collective_nccl\030\007" + + " \001(\010\0226\n.share_session_state_in_clustersp" + + "ec_propagation\030\010 \001(\010\022\037\n\027disable_thread_s" + + "pinning\030\t \001(\010\022(\n share_cluster_devices_i" + + "n_session\030\n \001(\010\0225\n\020session_metadata\030\013 \001(" + + "\0132\033.tensorflow.SessionMetadata\022!\n\031optimi" + + "ze_for_static_graph\030\014 \001(\010\022\032\n\022enable_mlir" + + "_bridge\030\r \001(\010\022S\n\023mlir_bridge_rollout\030\021 \001" + + "(\01626.tensorflow.ConfigProto.Experimental" + + ".MlirBridgeRollout\022&\n\036enable_mlir_graph_" + + "optimization\030\020 \001(\010\022\'\n\037disable_output_par" + + "tition_graphs\030\016 \001(\010\022#\n\033xla_fusion_autotu" + + "ner_thresh\030\017 \001(\003\022\020\n\010use_tfrt\030\022 \001(\010\022\'\n\037di" + + "sable_functional_ops_lowering\030\025 \001(\010\022\'\n\037x" + + "la_prefer_single_graph_cluster\030\026 \001(\010\022B\n\023" + + "coordination_config\030\027 \001(\0132%.tensorflow.C" + + "oordinationServiceConfig\"\332\001\n\021MlirBridgeR" + + "ollout\022#\n\037MLIR_BRIDGE_ROLLOUT_UNSPECIFIE" + + "D\020\000\022\037\n\033MLIR_BRIDGE_ROLLOUT_ENABLED\020\001\022 \n\034" + + "MLIR_BRIDGE_ROLLOUT_DISABLED\020\002\022)\n%MLIR_B" + + "RIDGE_ROLLOUT_SAFE_MODE_ENABLED\020\003\0222\n.MLI" + + "R_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENAB" + + "LED\020\004J\004\010\002\020\003J\004\010\023\020\024J\004\010\024\020\025\"\341\004\n\nRunOptions\0226" + + "\n\013trace_level\030\001 \001(\0162!.tensorflow.RunOpti" + + "ons.TraceLevel\022\025\n\rtimeout_in_ms\030\002 \001(\003\022\034\n" + + "\024inter_op_thread_pool\030\003 \001(\005\022\037\n\027output_pa" + + "rtition_graphs\030\005 \001(\010\022/\n\rdebug_options\030\006 " + + "\001(\0132\030.tensorflow.DebugOptions\022*\n\"report_" + + "tensor_allocations_upon_oom\030\007 \001(\010\0229\n\014exp" + + "erimental\030\010 \001(\0132#.tensorflow.RunOptions." + + "Experimental\032\322\001\n\014Experimental\022\034\n\024collect" + + "ive_graph_key\030\001 \001(\003\022\034\n\024use_run_handler_p" + + "ool\030\002 \001(\010\022[\n\030run_handler_pool_options\030\003 " + + "\001(\01329.tensorflow.RunOptions.Experimental" + + ".RunHandlerPoolOptions\032)\n\025RunHandlerPool" + + "Options\022\020\n\010priority\030\001 \001(\003\"R\n\nTraceLevel\022" + + "\014\n\010NO_TRACE\020\000\022\022\n\016SOFTWARE_TRACE\020\001\022\022\n\016HAR" + + "DWARE_TRACE\020\002\022\016\n\nFULL_TRACE\020\003J\004\010\004\020\005\"\207\003\n\013" + + "RunMetadata\022)\n\nstep_stats\030\001 \001(\0132\025.tensor" + + "flow.StepStats\022,\n\ncost_graph\030\002 \001(\0132\030.ten" + + "sorflow.CostGraphDef\022.\n\020partition_graphs" + + "\030\003 \003(\0132\024.tensorflow.GraphDef\022?\n\017function" + + "_graphs\030\004 \003(\0132&.tensorflow.RunMetadata.F" + + "unctionGraphs\032\255\001\n\016FunctionGraphs\022.\n\020part" + + "ition_graphs\030\001 \003(\0132\024.tensorflow.GraphDef" + + "\0224\n\026pre_optimization_graph\030\002 \001(\0132\024.tenso" + + "rflow.GraphDef\0225\n\027post_optimization_grap" + + "h\030\003 \001(\0132\024.tensorflow.GraphDef\":\n\020TensorC" + + "onnection\022\023\n\013from_tensor\030\001 \001(\t\022\021\n\tto_ten" + + "sor\030\002 \001(\t\"\260\003\n\017CallableOptions\022\014\n\004feed\030\001 " + + "\003(\t\022\r\n\005fetch\030\002 \003(\t\022\016\n\006target\030\003 \003(\t\022+\n\013ru" + + "n_options\030\004 \001(\0132\026.tensorflow.RunOptions\022" + + "7\n\021tensor_connection\030\005 \003(\0132\034.tensorflow." + + "TensorConnection\022B\n\014feed_devices\030\006 \003(\0132," + + ".tensorflow.CallableOptions.FeedDevicesE" + + "ntry\022D\n\rfetch_devices\030\007 \003(\0132-.tensorflow" + + ".CallableOptions.FetchDevicesEntry\022\027\n\017fe" + + "tch_skip_sync\030\010 \001(\010\0322\n\020FeedDevicesEntry\022" + + "\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t:\0028\001\0323\n\021Fetch" + + "DevicesEntry\022\013\n\003key\030\001 \001(\t\022\r\n\005value\030\002 \001(\t" + + ":\0028\001B\212\001\n\036org.tensorflow.proto.frameworkB" + + "\014ConfigProtosP\001ZUgithub.com/tensorflow/t" + + "ensorflow/tensorflow/go/core/protobuf/fo" + + "r_core_protos_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -272,6 +275,7 @@ public static void registerAllExtensions( org.tensorflow.proto.framework.GraphProtos.getDescriptor(), org.tensorflow.proto.framework.StepStatsProtos.getDescriptor(), org.tensorflow.proto.distruntime.ClusterProtos.getDescriptor(), + org.tensorflow.proto.distruntime.CoordinationConfig.getDescriptor(), org.tensorflow.proto.framework.DebugProtos.getDescriptor(), org.tensorflow.proto.framework.RewriterConfigProtos.getDescriptor(), }); @@ -340,7 +344,7 @@ public static void registerAllExtensions( internal_static_tensorflow_ConfigProto_Experimental_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_ConfigProto_Experimental_descriptor, - new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "CoordinationService", "DisableFunctionalOpsLowering", }); + new java.lang.String[] { "CollectiveGroupLeader", "ExecutorType", "RecvBufMaxChunk", "UseNumaAffinity", "CollectiveDeterministicSequentialExecution", "CollectiveNccl", "ShareSessionStateInClusterspecPropagation", "DisableThreadSpinning", "ShareClusterDevicesInSession", "SessionMetadata", "OptimizeForStaticGraph", "EnableMlirBridge", "MlirBridgeRollout", "EnableMlirGraphOptimization", "DisableOutputPartitionGraphs", "XlaFusionAutotunerThresh", "UseTfrt", "DisableFunctionalOpsLowering", "XlaPreferSingleGraphCluster", "CoordinationConfig", }); internal_static_tensorflow_RunOptions_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_tensorflow_RunOptions_fieldAccessorTable = new @@ -399,6 +403,7 @@ public static void registerAllExtensions( org.tensorflow.proto.framework.GraphProtos.getDescriptor(); org.tensorflow.proto.framework.StepStatsProtos.getDescriptor(); org.tensorflow.proto.distruntime.ClusterProtos.getDescriptor(); + org.tensorflow.proto.distruntime.CoordinationConfig.getDescriptor(); org.tensorflow.proto.framework.DebugProtos.getDescriptor(); org.tensorflow.proto.framework.RewriterConfigProtos.getDescriptor(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java index dc9ed1c5a70..1b6db06124d 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeId.java @@ -30,7 +30,7 @@ public enum FullTypeId * TFT_TENSOR[TFT_VAR["T"]], TFT_TENSOR[TFT_VAR["T"]] are two tensors of * identical element types. * TFT_TENSOR[TFT_VAR["P"]], TFT_TENSOR[TFT_VAR["Q"]] are two tensors of - * potentially different element types. + * independent element types. *
    * * TFT_VAR = 1; @@ -80,6 +80,31 @@ public enum FullTypeId * TFT_NAMED = 4; */ TFT_NAMED(4), + /** + *
    +   * Template definition. Expands the variables by repeating a template as
    +   * arguments of container.
    +   * Parametrization:
    +   *   TFT_FOR_EACH[<container_type>, <template>, <expansions>]
    +   *   * <container_type> is the type of the container that the template will be
    +   *     expanded into
    +   *   * <template> is any type definition that potentially contains type
    +   *     variables
    +   *   * <expansions> is a TFT_VAR and may include more types in the future
    +   * Example:
    +   *   TFT_FOR_EACH[
    +   *         TFT_PRODUCT,
    +   *         TFT_TENSOR[TFT_VAR["t"]],
    +   *         TFT_VAR["t"]
    +   *     ]
    +   *     will substitute a T = TFT_INT32 to TFT_PRODUCT[TFT_TENSOR[TFT_INT32]]
    +   *     and a T = (TFT_INT32, TFT_INT64) to
    +   *     TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_INT64]].
    +   * 
    + * + * TFT_FOR_EACH = 20; + */ + TFT_FOR_EACH(20), /** *
        * Callable types describe functions and ops.
    @@ -178,45 +203,6 @@ public enum FullTypeId
        * TFT_LITERAL = 1003;
        */
       TFT_LITERAL(1003),
    -  /**
    -   * 
    -   * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    -   * semantics, that is, one can construct an iterator from them. Like
    -   * Array, they are considered to return elements that can be described
    -   * by a single type. Unlike Array, they do not support random access or
    -   * mutation, and can potentially produce an infinite number of elements.
    -   * A datasets can produce logical structures (e.g. multiple elements). This
    -   * is expressed using TFT_PRODUCT.
    -   * Parametrization: TFT_ARRAY[<element type>].
    -   *   * <element type> may be a concrete type or a type symbol. It represents
    -   *     the data type of the elements produced by the dataset.
    -   * Examples:
    -   *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
    -   *     Tensors of unknown shape.
    -   *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    -   *     a Dataset producing pairs of Tensors, one integer and one float.
    -   * Note: The high ID number is to prepare for the eventuality that Datasets
    -   * will be supported by user types in the future.
    -   * 
    - * - * TFT_DATASET = 10102; - */ - TFT_DATASET(10102), - /** - *
    -   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    -   * Unlike strict execution models, where ownership of a lock is denoted by
    -   * "running after the lock has been acquired", in non-strict mode, lock
    -   * ownership is in the true sense: "the op argument representing the lock is
    -   * available".
    -   * Mutex locks are the dynamic counterpart of control dependencies.
    -   * TODO(mdan): Properly document this thing.
    -   * Parametrization: TFT_MUTEX_LOCK[].
    -   * 
    - * - * TFT_MUTEX_LOCK = 10202; - */ - TFT_MUTEX_LOCK(10202), /** *
        * The bool element type.
    @@ -303,6 +289,65 @@ public enum FullTypeId
        * TFT_STRING = 214;
        */
       TFT_STRING(214),
    +  /**
    +   * 
    +   * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    +   * semantics, that is, one can construct an iterator from them. Like
    +   * Array, they are considered to return elements that can be described
    +   * by a single type. Unlike Array, they do not support random access or
    +   * mutation, and can potentially produce an infinite number of elements.
    +   * A datasets can produce logical structures (e.g. multiple elements). This
    +   * is expressed using TFT_PRODUCT.
    +   * Parametrization: TFT_ARRAY[<element type>].
    +   *   * <element type> may be a concrete type or a type symbol. It represents
    +   *     the data type of the elements produced by the dataset.
    +   * Examples:
    +   *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
    +   *     Tensors of unknown shape.
    +   *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    +   *     a Dataset producing pairs of Tensors, one integer and one float.
    +   * Note: The high ID number is to prepare for the eventuality that Datasets
    +   * will be supported by user types in the future.
    +   * 
    + * + * TFT_DATASET = 10102; + */ + TFT_DATASET(10102), + /** + *
    +   * A ragged tensor created by tf.ragged ops and APIs.
    +   * Parametrization: TFT_RAGGED[<element_type>].
    +   * 
    + * + * TFT_RAGGED = 10103; + */ + TFT_RAGGED(10103), + /** + *
    +   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    +   * Unlike strict execution models, where ownership of a lock is denoted by
    +   * "running after the lock has been acquired", in non-strict mode, lock
    +   * ownership is in the true sense: "the op argument representing the lock is
    +   * available".
    +   * Mutex locks are the dynamic counterpart of control dependencies.
    +   * TODO(mdan): Properly document this thing.
    +   * Parametrization: TFT_MUTEX_LOCK[].
    +   * 
    + * + * TFT_MUTEX_LOCK = 10202; + */ + TFT_MUTEX_LOCK(10202), + /** + *
    +   * The equivalent of a Tensor with DT_VARIANT dtype, kept here to simplify
    +   * translation. This type should not normally appear after type inference.
    +   * Note that LEGACY_VARIANT != ANY: TENSOR[INT32] is a subtype of ANY, but is
    +   * not a subtype of LEGACY_VARIANT.
    +   * 
    + * + * TFT_LEGACY_VARIANT = 10203; + */ + TFT_LEGACY_VARIANT(10203), UNRECOGNIZED(-1), ; @@ -324,7 +369,7 @@ public enum FullTypeId * TFT_TENSOR[TFT_VAR["T"]], TFT_TENSOR[TFT_VAR["T"]] are two tensors of * identical element types. * TFT_TENSOR[TFT_VAR["P"]], TFT_TENSOR[TFT_VAR["Q"]] are two tensors of - * potentially different element types. + * independent element types. *
    * * TFT_VAR = 1; @@ -374,6 +419,31 @@ public enum FullTypeId * TFT_NAMED = 4; */ public static final int TFT_NAMED_VALUE = 4; + /** + *
    +   * Template definition. Expands the variables by repeating a template as
    +   * arguments of container.
    +   * Parametrization:
    +   *   TFT_FOR_EACH[<container_type>, <template>, <expansions>]
    +   *   * <container_type> is the type of the container that the template will be
    +   *     expanded into
    +   *   * <template> is any type definition that potentially contains type
    +   *     variables
    +   *   * <expansions> is a TFT_VAR and may include more types in the future
    +   * Example:
    +   *   TFT_FOR_EACH[
    +   *         TFT_PRODUCT,
    +   *         TFT_TENSOR[TFT_VAR["t"]],
    +   *         TFT_VAR["t"]
    +   *     ]
    +   *     will substitute a T = TFT_INT32 to TFT_PRODUCT[TFT_TENSOR[TFT_INT32]]
    +   *     and a T = (TFT_INT32, TFT_INT64) to
    +   *     TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_INT64]].
    +   * 
    + * + * TFT_FOR_EACH = 20; + */ + public static final int TFT_FOR_EACH_VALUE = 20; /** *
        * Callable types describe functions and ops.
    @@ -472,45 +542,6 @@ public enum FullTypeId
        * TFT_LITERAL = 1003;
        */
       public static final int TFT_LITERAL_VALUE = 1003;
    -  /**
    -   * 
    -   * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    -   * semantics, that is, one can construct an iterator from them. Like
    -   * Array, they are considered to return elements that can be described
    -   * by a single type. Unlike Array, they do not support random access or
    -   * mutation, and can potentially produce an infinite number of elements.
    -   * A datasets can produce logical structures (e.g. multiple elements). This
    -   * is expressed using TFT_PRODUCT.
    -   * Parametrization: TFT_ARRAY[<element type>].
    -   *   * <element type> may be a concrete type or a type symbol. It represents
    -   *     the data type of the elements produced by the dataset.
    -   * Examples:
    -   *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
    -   *     Tensors of unknown shape.
    -   *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    -   *     a Dataset producing pairs of Tensors, one integer and one float.
    -   * Note: The high ID number is to prepare for the eventuality that Datasets
    -   * will be supported by user types in the future.
    -   * 
    - * - * TFT_DATASET = 10102; - */ - public static final int TFT_DATASET_VALUE = 10102; - /** - *
    -   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    -   * Unlike strict execution models, where ownership of a lock is denoted by
    -   * "running after the lock has been acquired", in non-strict mode, lock
    -   * ownership is in the true sense: "the op argument representing the lock is
    -   * available".
    -   * Mutex locks are the dynamic counterpart of control dependencies.
    -   * TODO(mdan): Properly document this thing.
    -   * Parametrization: TFT_MUTEX_LOCK[].
    -   * 
    - * - * TFT_MUTEX_LOCK = 10202; - */ - public static final int TFT_MUTEX_LOCK_VALUE = 10202; /** *
        * The bool element type.
    @@ -597,6 +628,65 @@ public enum FullTypeId
        * TFT_STRING = 214;
        */
       public static final int TFT_STRING_VALUE = 214;
    +  /**
    +   * 
    +   * Datasets created by tf.data ops and APIs. Datasets have generator/iterable
    +   * semantics, that is, one can construct an iterator from them. Like
    +   * Array, they are considered to return elements that can be described
    +   * by a single type. Unlike Array, they do not support random access or
    +   * mutation, and can potentially produce an infinite number of elements.
    +   * A datasets can produce logical structures (e.g. multiple elements). This
    +   * is expressed using TFT_PRODUCT.
    +   * Parametrization: TFT_ARRAY[<element type>].
    +   *   * <element type> may be a concrete type or a type symbol. It represents
    +   *     the data type of the elements produced by the dataset.
    +   * Examples:
    +   *   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
    +   *     Tensors of unknown shape.
    +   *   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
    +   *     a Dataset producing pairs of Tensors, one integer and one float.
    +   * Note: The high ID number is to prepare for the eventuality that Datasets
    +   * will be supported by user types in the future.
    +   * 
    + * + * TFT_DATASET = 10102; + */ + public static final int TFT_DATASET_VALUE = 10102; + /** + *
    +   * A ragged tensor created by tf.ragged ops and APIs.
    +   * Parametrization: TFT_RAGGED[<element_type>].
    +   * 
    + * + * TFT_RAGGED = 10103; + */ + public static final int TFT_RAGGED_VALUE = 10103; + /** + *
    +   * A mutex lock tensor, produced by tf.raw_ops.MutexLock.
    +   * Unlike strict execution models, where ownership of a lock is denoted by
    +   * "running after the lock has been acquired", in non-strict mode, lock
    +   * ownership is in the true sense: "the op argument representing the lock is
    +   * available".
    +   * Mutex locks are the dynamic counterpart of control dependencies.
    +   * TODO(mdan): Properly document this thing.
    +   * Parametrization: TFT_MUTEX_LOCK[].
    +   * 
    + * + * TFT_MUTEX_LOCK = 10202; + */ + public static final int TFT_MUTEX_LOCK_VALUE = 10202; + /** + *
    +   * The equivalent of a Tensor with DT_VARIANT dtype, kept here to simplify
    +   * translation. This type should not normally appear after type inference.
    +   * Note that LEGACY_VARIANT != ANY: TENSOR[INT32] is a subtype of ANY, but is
    +   * not a subtype of LEGACY_VARIANT.
    +   * 
    + * + * TFT_LEGACY_VARIANT = 10203; + */ + public static final int TFT_LEGACY_VARIANT_VALUE = 10203; public final int getNumber() { @@ -622,13 +712,12 @@ public static FullTypeId forNumber(int value) { case 2: return TFT_ANY; case 3: return TFT_PRODUCT; case 4: return TFT_NAMED; + case 20: return TFT_FOR_EACH; case 100: return TFT_CALLABLE; case 1000: return TFT_TENSOR; case 1001: return TFT_ARRAY; case 1002: return TFT_OPTIONAL; case 1003: return TFT_LITERAL; - case 10102: return TFT_DATASET; - case 10202: return TFT_MUTEX_LOCK; case 200: return TFT_BOOL; case 201: return TFT_UINT8; case 202: return TFT_UINT16; @@ -645,6 +734,10 @@ public static FullTypeId forNumber(int value) { case 212: return TFT_COMPLEX64; case 213: return TFT_COMPLEX128; case 214: return TFT_STRING; + case 10102: return TFT_DATASET; + case 10103: return TFT_RAGGED; + case 10202: return TFT_MUTEX_LOCK; + case 10203: return TFT_LEGACY_VARIANT; default: return null; } } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java index bff92b6a7bc..abbd444781c 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/FullTypeProtos.java @@ -32,23 +32,24 @@ public static void registerAllExtensions( "oto\022\ntensorflow\"\177\n\013FullTypeDef\022\'\n\007type_i" + "d\030\001 \001(\0162\026.tensorflow.FullTypeId\022%\n\004args\030" + "\002 \003(\0132\027.tensorflow.FullTypeDef\022\013\n\001s\030\003 \001(" + - "\tH\000\022\013\n\001i\030\004 \001(\003H\000B\006\n\004attr*\342\003\n\nFullTypeId\022" + + "\tH\000\022\013\n\001i\030\004 \001(\003H\000B\006\n\004attr*\236\004\n\nFullTypeId\022" + "\r\n\tTFT_UNSET\020\000\022\013\n\007TFT_VAR\020\001\022\013\n\007TFT_ANY\020\002" + "\022\017\n\013TFT_PRODUCT\020\003\022\r\n\tTFT_NAMED\020\004\022\020\n\014TFT_" + - "CALLABLE\020d\022\017\n\nTFT_TENSOR\020\350\007\022\016\n\tTFT_ARRAY" + - "\020\351\007\022\021\n\014TFT_OPTIONAL\020\352\007\022\020\n\013TFT_LITERAL\020\353\007" + - "\022\020\n\013TFT_DATASET\020\366N\022\023\n\016TFT_MUTEX_LOCK\020\332O\022" + - "\r\n\010TFT_BOOL\020\310\001\022\016\n\tTFT_UINT8\020\311\001\022\017\n\nTFT_UI" + - "NT16\020\312\001\022\017\n\nTFT_UINT32\020\313\001\022\017\n\nTFT_UINT64\020\314" + - "\001\022\r\n\010TFT_INT8\020\315\001\022\016\n\tTFT_INT16\020\316\001\022\016\n\tTFT_" + - "INT32\020\317\001\022\016\n\tTFT_INT64\020\320\001\022\r\n\010TFT_HALF\020\321\001\022" + - "\016\n\tTFT_FLOAT\020\322\001\022\017\n\nTFT_DOUBLE\020\323\001\022\021\n\014TFT_" + - "BFLOAT16\020\327\001\022\022\n\rTFT_COMPLEX64\020\324\001\022\023\n\016TFT_C" + - "OMPLEX128\020\325\001\022\017\n\nTFT_STRING\020\326\001B\207\001\n\036org.te" + - "nsorflow.proto.frameworkB\016FullTypeProtos" + - "P\001ZPgithub.com/tensorflow/tensorflow/ten" + - "sorflow/go/core/framework/full_type_go_p" + - "roto\370\001\001b\006proto3" + "FOR_EACH\020\024\022\020\n\014TFT_CALLABLE\020d\022\017\n\nTFT_TENS" + + "OR\020\350\007\022\016\n\tTFT_ARRAY\020\351\007\022\021\n\014TFT_OPTIONAL\020\352\007" + + "\022\020\n\013TFT_LITERAL\020\353\007\022\r\n\010TFT_BOOL\020\310\001\022\016\n\tTFT" + + "_UINT8\020\311\001\022\017\n\nTFT_UINT16\020\312\001\022\017\n\nTFT_UINT32" + + "\020\313\001\022\017\n\nTFT_UINT64\020\314\001\022\r\n\010TFT_INT8\020\315\001\022\016\n\tT" + + "FT_INT16\020\316\001\022\016\n\tTFT_INT32\020\317\001\022\016\n\tTFT_INT64" + + "\020\320\001\022\r\n\010TFT_HALF\020\321\001\022\016\n\tTFT_FLOAT\020\322\001\022\017\n\nTF" + + "T_DOUBLE\020\323\001\022\021\n\014TFT_BFLOAT16\020\327\001\022\022\n\rTFT_CO" + + "MPLEX64\020\324\001\022\023\n\016TFT_COMPLEX128\020\325\001\022\017\n\nTFT_S" + + "TRING\020\326\001\022\020\n\013TFT_DATASET\020\366N\022\017\n\nTFT_RAGGED" + + "\020\367N\022\023\n\016TFT_MUTEX_LOCK\020\332O\022\027\n\022TFT_LEGACY_V" + + "ARIANT\020\333OB\207\001\n\036org.tensorflow.proto.frame" + + "workB\016FullTypeProtosP\001ZPgithub.com/tenso" + + "rflow/tensorflow/tensorflow/go/core/fram" + + "ework/full_type_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaver.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaver.java new file mode 100644 index 00000000000..9b5429016c6 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaver.java @@ -0,0 +1,729 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/trackable_object_graph.proto + +package org.tensorflow.proto.framework; + +/** + * Protobuf type {@code tensorflow.RegisteredSaver} + */ +public final class RegisteredSaver extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:tensorflow.RegisteredSaver) + RegisteredSaverOrBuilder { +private static final long serialVersionUID = 0L; + // Use RegisteredSaver.newBuilder() to construct. + private RegisteredSaver(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private RegisteredSaver() { + name_ = ""; + objectName_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new RegisteredSaver(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegisteredSaver( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + objectName_ = s; + break; + } + default: { + if (!parseUnknownField( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.TrackableObjectGraphProtos.internal_static_tensorflow_RegisteredSaver_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.TrackableObjectGraphProtos.internal_static_tensorflow_RegisteredSaver_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.RegisteredSaver.class, org.tensorflow.proto.framework.RegisteredSaver.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + *
    +   * The name of the registered saver/restore function.
    +   * 
    + * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + *
    +   * The name of the registered saver/restore function.
    +   * 
    + * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OBJECT_NAME_FIELD_NUMBER = 2; + private volatile java.lang.Object objectName_; + /** + *
    +   * Unique auto-generated name of the object.
    +   * 
    + * + * string object_name = 2; + */ + public java.lang.String getObjectName() { + java.lang.Object ref = objectName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + objectName_ = s; + return s; + } + } + /** + *
    +   * Unique auto-generated name of the object.
    +   * 
    + * + * string object_name = 2; + */ + public com.google.protobuf.ByteString + getObjectNameBytes() { + java.lang.Object ref = objectName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + objectName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (!getObjectNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, objectName_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (!getObjectNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, objectName_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.tensorflow.proto.framework.RegisteredSaver)) { + return super.equals(obj); + } + org.tensorflow.proto.framework.RegisteredSaver other = (org.tensorflow.proto.framework.RegisteredSaver) obj; + + if (!getName() + .equals(other.getName())) return false; + if (!getObjectName() + .equals(other.getObjectName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + OBJECT_NAME_FIELD_NUMBER; + hash = (53 * hash) + getObjectName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.tensorflow.proto.framework.RegisteredSaver parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.tensorflow.proto.framework.RegisteredSaver prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code tensorflow.RegisteredSaver} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:tensorflow.RegisteredSaver) + org.tensorflow.proto.framework.RegisteredSaverOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.tensorflow.proto.framework.TrackableObjectGraphProtos.internal_static_tensorflow_RegisteredSaver_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.tensorflow.proto.framework.TrackableObjectGraphProtos.internal_static_tensorflow_RegisteredSaver_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.tensorflow.proto.framework.RegisteredSaver.class, org.tensorflow.proto.framework.RegisteredSaver.Builder.class); + } + + // Construct using org.tensorflow.proto.framework.RegisteredSaver.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + } + } + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + objectName_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.tensorflow.proto.framework.TrackableObjectGraphProtos.internal_static_tensorflow_RegisteredSaver_descriptor; + } + + @java.lang.Override + public org.tensorflow.proto.framework.RegisteredSaver getDefaultInstanceForType() { + return org.tensorflow.proto.framework.RegisteredSaver.getDefaultInstance(); + } + + @java.lang.Override + public org.tensorflow.proto.framework.RegisteredSaver build() { + org.tensorflow.proto.framework.RegisteredSaver result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public org.tensorflow.proto.framework.RegisteredSaver buildPartial() { + org.tensorflow.proto.framework.RegisteredSaver result = new org.tensorflow.proto.framework.RegisteredSaver(this); + result.name_ = name_; + result.objectName_ = objectName_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.tensorflow.proto.framework.RegisteredSaver) { + return mergeFrom((org.tensorflow.proto.framework.RegisteredSaver)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.tensorflow.proto.framework.RegisteredSaver other) { + if (other == org.tensorflow.proto.framework.RegisteredSaver.getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (!other.getObjectName().isEmpty()) { + objectName_ = other.objectName_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.tensorflow.proto.framework.RegisteredSaver parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.tensorflow.proto.framework.RegisteredSaver) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + *
    +     * The name of the registered saver/restore function.
    +     * 
    + * + * string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +     * The name of the registered saver/restore function.
    +     * 
    + * + * string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +     * The name of the registered saver/restore function.
    +     * 
    + * + * string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + *
    +     * The name of the registered saver/restore function.
    +     * 
    + * + * string name = 1; + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + *
    +     * The name of the registered saver/restore function.
    +     * 
    + * + * string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private java.lang.Object objectName_ = ""; + /** + *
    +     * Unique auto-generated name of the object.
    +     * 
    + * + * string object_name = 2; + */ + public java.lang.String getObjectName() { + java.lang.Object ref = objectName_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + objectName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +     * Unique auto-generated name of the object.
    +     * 
    + * + * string object_name = 2; + */ + public com.google.protobuf.ByteString + getObjectNameBytes() { + java.lang.Object ref = objectName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + objectName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +     * Unique auto-generated name of the object.
    +     * 
    + * + * string object_name = 2; + */ + public Builder setObjectName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + objectName_ = value; + onChanged(); + return this; + } + /** + *
    +     * Unique auto-generated name of the object.
    +     * 
    + * + * string object_name = 2; + */ + public Builder clearObjectName() { + + objectName_ = getDefaultInstance().getObjectName(); + onChanged(); + return this; + } + /** + *
    +     * Unique auto-generated name of the object.
    +     * 
    + * + * string object_name = 2; + */ + public Builder setObjectNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + objectName_ = value; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:tensorflow.RegisteredSaver) + } + + // @@protoc_insertion_point(class_scope:tensorflow.RegisteredSaver) + private static final org.tensorflow.proto.framework.RegisteredSaver DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.tensorflow.proto.framework.RegisteredSaver(); + } + + public static org.tensorflow.proto.framework.RegisteredSaver getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public RegisteredSaver parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegisteredSaver(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public org.tensorflow.proto.framework.RegisteredSaver getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + +} + diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaverOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaverOrBuilder.java new file mode 100644 index 00000000000..a80190ac3de --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RegisteredSaverOrBuilder.java @@ -0,0 +1,45 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: tensorflow/core/protobuf/trackable_object_graph.proto + +package org.tensorflow.proto.framework; + +public interface RegisteredSaverOrBuilder extends + // @@protoc_insertion_point(interface_extends:tensorflow.RegisteredSaver) + com.google.protobuf.MessageOrBuilder { + + /** + *
    +   * The name of the registered saver/restore function.
    +   * 
    + * + * string name = 1; + */ + java.lang.String getName(); + /** + *
    +   * The name of the registered saver/restore function.
    +   * 
    + * + * string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + /** + *
    +   * Unique auto-generated name of the object.
    +   * 
    + * + * string object_name = 2; + */ + java.lang.String getObjectName(); + /** + *
    +   * Unique auto-generated name of the object.
    +   * 
    + * + * string object_name = 2; + */ + com.google.protobuf.ByteString + getObjectNameBytes(); +} diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java index 54860b767d8..4978cd47265 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfig.java @@ -37,6 +37,7 @@ private RewriterConfig() { implementationSelector_ = 0; autoMixedPrecision_ = 0; autoMixedPrecisionMkl_ = 0; + autoMixedPrecisionCpu_ = 0; usePluginOptimizers_ = 0; metaOptimizerIterations_ = 0; memoryOptimization_ = 0; @@ -251,6 +252,12 @@ private RewriterConfig( usePluginOptimizers_ = rawValue; break; } + case 232: { + int rawValue = input.readEnum(); + + autoMixedPrecisionCpu_ = rawValue; + break; + } case 400: { int rawValue = input.readEnum(); @@ -2211,6 +2218,39 @@ public org.tensorflow.proto.framework.RewriterConfig.Toggle getAutoMixedPrecisio return result == null ? org.tensorflow.proto.framework.RewriterConfig.Toggle.UNRECOGNIZED : result; } + public static final int AUTO_MIXED_PRECISION_CPU_FIELD_NUMBER = 29; + private int autoMixedPrecisionCpu_; + /** + *
    +   * Emulate a model using data type float16 on CPU (default is OFF).
    +   * This will try to emulate the float16 inputs and outputs of an operator
    +   * on CPU to have better correlation with float16 on GPU; however the
    +   * computation in the operator is based on float32.
    +   * Note that this can change the numerical stability of the graph.
    +   * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public int getAutoMixedPrecisionCpuValue() { + return autoMixedPrecisionCpu_; + } + /** + *
    +   * Emulate a model using data type float16 on CPU (default is OFF).
    +   * This will try to emulate the float16 inputs and outputs of an operator
    +   * on CPU to have better correlation with float16 on GPU; however the
    +   * computation in the operator is based on float32.
    +   * Note that this can change the numerical stability of the graph.
    +   * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public org.tensorflow.proto.framework.RewriterConfig.Toggle getAutoMixedPrecisionCpu() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.RewriterConfig.Toggle result = org.tensorflow.proto.framework.RewriterConfig.Toggle.valueOf(autoMixedPrecisionCpu_); + return result == null ? org.tensorflow.proto.framework.RewriterConfig.Toggle.UNRECOGNIZED : result; + } + public static final int DISABLE_META_OPTIMIZER_FIELD_NUMBER = 19; private boolean disableMetaOptimizer_; /** @@ -2799,6 +2839,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (usePluginOptimizers_ != org.tensorflow.proto.framework.RewriterConfig.Toggle.DEFAULT.getNumber()) { output.writeEnum(28, usePluginOptimizers_); } + if (autoMixedPrecisionCpu_ != org.tensorflow.proto.framework.RewriterConfig.Toggle.DEFAULT.getNumber()) { + output.writeEnum(29, autoMixedPrecisionCpu_); + } if (cpuLayoutConversion_ != org.tensorflow.proto.framework.RewriterConfig.CpuLayout.NO_CONVERSION_ON_CPU.getNumber()) { output.writeEnum(50, cpuLayoutConversion_); } @@ -2934,6 +2977,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeEnumSize(28, usePluginOptimizers_); } + if (autoMixedPrecisionCpu_ != org.tensorflow.proto.framework.RewriterConfig.Toggle.DEFAULT.getNumber()) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(29, autoMixedPrecisionCpu_); + } if (cpuLayoutConversion_ != org.tensorflow.proto.framework.RewriterConfig.CpuLayout.NO_CONVERSION_ON_CPU.getNumber()) { size += com.google.protobuf.CodedOutputStream .computeEnumSize(50, cpuLayoutConversion_); @@ -2991,6 +3038,7 @@ public boolean equals(final java.lang.Object obj) { if (implementationSelector_ != other.implementationSelector_) return false; if (autoMixedPrecision_ != other.autoMixedPrecision_) return false; if (autoMixedPrecisionMkl_ != other.autoMixedPrecisionMkl_) return false; + if (autoMixedPrecisionCpu_ != other.autoMixedPrecisionCpu_) return false; if (getDisableMetaOptimizer() != other.getDisableMetaOptimizer()) return false; if (usePluginOptimizers_ != other.usePluginOptimizers_) return false; @@ -3078,6 +3126,8 @@ public int hashCode() { hash = (53 * hash) + autoMixedPrecision_; hash = (37 * hash) + AUTO_MIXED_PRECISION_MKL_FIELD_NUMBER; hash = (53 * hash) + autoMixedPrecisionMkl_; + hash = (37 * hash) + AUTO_MIXED_PRECISION_CPU_FIELD_NUMBER; + hash = (53 * hash) + autoMixedPrecisionCpu_; hash = (37 * hash) + DISABLE_META_OPTIMIZER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getDisableMetaOptimizer()); @@ -3300,6 +3350,8 @@ public Builder clear() { autoMixedPrecisionMkl_ = 0; + autoMixedPrecisionCpu_ = 0; + disableMetaOptimizer_ = false; usePluginOptimizers_ = 0; @@ -3396,6 +3448,7 @@ public org.tensorflow.proto.framework.RewriterConfig buildPartial() { result.implementationSelector_ = implementationSelector_; result.autoMixedPrecision_ = autoMixedPrecision_; result.autoMixedPrecisionMkl_ = autoMixedPrecisionMkl_; + result.autoMixedPrecisionCpu_ = autoMixedPrecisionCpu_; result.disableMetaOptimizer_ = disableMetaOptimizer_; result.usePluginOptimizers_ = usePluginOptimizers_; result.metaOptimizerIterations_ = metaOptimizerIterations_; @@ -3539,6 +3592,9 @@ public Builder mergeFrom(org.tensorflow.proto.framework.RewriterConfig other) { if (other.autoMixedPrecisionMkl_ != 0) { setAutoMixedPrecisionMklValue(other.getAutoMixedPrecisionMklValue()); } + if (other.autoMixedPrecisionCpu_ != 0) { + setAutoMixedPrecisionCpuValue(other.getAutoMixedPrecisionCpuValue()); + } if (other.getDisableMetaOptimizer() != false) { setDisableMetaOptimizer(other.getDisableMetaOptimizer()); } @@ -4801,6 +4857,91 @@ public Builder clearAutoMixedPrecisionMkl() { return this; } + private int autoMixedPrecisionCpu_ = 0; + /** + *
    +     * Emulate a model using data type float16 on CPU (default is OFF).
    +     * This will try to emulate the float16 inputs and outputs of an operator
    +     * on CPU to have better correlation with float16 on GPU; however the
    +     * computation in the operator is based on float32.
    +     * Note that this can change the numerical stability of the graph.
    +     * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public int getAutoMixedPrecisionCpuValue() { + return autoMixedPrecisionCpu_; + } + /** + *
    +     * Emulate a model using data type float16 on CPU (default is OFF).
    +     * This will try to emulate the float16 inputs and outputs of an operator
    +     * on CPU to have better correlation with float16 on GPU; however the
    +     * computation in the operator is based on float32.
    +     * Note that this can change the numerical stability of the graph.
    +     * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public Builder setAutoMixedPrecisionCpuValue(int value) { + autoMixedPrecisionCpu_ = value; + onChanged(); + return this; + } + /** + *
    +     * Emulate a model using data type float16 on CPU (default is OFF).
    +     * This will try to emulate the float16 inputs and outputs of an operator
    +     * on CPU to have better correlation with float16 on GPU; however the
    +     * computation in the operator is based on float32.
    +     * Note that this can change the numerical stability of the graph.
    +     * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public org.tensorflow.proto.framework.RewriterConfig.Toggle getAutoMixedPrecisionCpu() { + @SuppressWarnings("deprecation") + org.tensorflow.proto.framework.RewriterConfig.Toggle result = org.tensorflow.proto.framework.RewriterConfig.Toggle.valueOf(autoMixedPrecisionCpu_); + return result == null ? org.tensorflow.proto.framework.RewriterConfig.Toggle.UNRECOGNIZED : result; + } + /** + *
    +     * Emulate a model using data type float16 on CPU (default is OFF).
    +     * This will try to emulate the float16 inputs and outputs of an operator
    +     * on CPU to have better correlation with float16 on GPU; however the
    +     * computation in the operator is based on float32.
    +     * Note that this can change the numerical stability of the graph.
    +     * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public Builder setAutoMixedPrecisionCpu(org.tensorflow.proto.framework.RewriterConfig.Toggle value) { + if (value == null) { + throw new NullPointerException(); + } + + autoMixedPrecisionCpu_ = value.getNumber(); + onChanged(); + return this; + } + /** + *
    +     * Emulate a model using data type float16 on CPU (default is OFF).
    +     * This will try to emulate the float16 inputs and outputs of an operator
    +     * on CPU to have better correlation with float16 on GPU; however the
    +     * computation in the operator is based on float32.
    +     * Note that this can change the numerical stability of the graph.
    +     * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + public Builder clearAutoMixedPrecisionCpu() { + + autoMixedPrecisionCpu_ = 0; + onChanged(); + return this; + } + private boolean disableMetaOptimizer_ ; /** *
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
    index eb2a9061177..7f853ba5ee2 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigOrBuilder.java
    @@ -318,6 +318,31 @@ public interface RewriterConfigOrBuilder extends
        */
       org.tensorflow.proto.framework.RewriterConfig.Toggle getAutoMixedPrecisionMkl();
     
    +  /**
    +   * 
    +   * Emulate a model using data type float16 on CPU (default is OFF).
    +   * This will try to emulate the float16 inputs and outputs of an operator
    +   * on CPU to have better correlation with float16 on GPU; however the
    +   * computation in the operator is based on float32.
    +   * Note that this can change the numerical stability of the graph.
    +   * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + int getAutoMixedPrecisionCpuValue(); + /** + *
    +   * Emulate a model using data type float16 on CPU (default is OFF).
    +   * This will try to emulate the float16 inputs and outputs of an operator
    +   * on CPU to have better correlation with float16 on GPU; however the
    +   * computation in the operator is based on float32.
    +   * Note that this can change the numerical stability of the graph.
    +   * 
    + * + * .tensorflow.RewriterConfig.Toggle auto_mixed_precision_cpu = 29; + */ + org.tensorflow.proto.framework.RewriterConfig.Toggle getAutoMixedPrecisionCpu(); + /** *
        * Disable the entire meta optimizer (off by default).
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
    index 513dd4d850d..fb1e84d99de 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/RewriterConfigProtos.java
    @@ -54,7 +54,7 @@ public static void registerAllExtensions(
           "e/protobuf/verifier_config.proto\";\n\023Auto" +
           "ParallelOptions\022\016\n\006enable\030\001 \001(\010\022\024\n\014num_r" +
           "eplicas\030\002 \001(\005\"+\n\026ScopedAllocatorOptions\022" +
    -      "\021\n\tenable_op\030\001 \003(\t\"\341\023\n\016RewriterConfig\022C\n" +
    +      "\021\n\tenable_op\030\001 \003(\t\"\246\024\n\016RewriterConfig\022C\n" +
           "\025cpu_layout_conversion\0302 \001(\0162$.tensorflo" +
           "w.RewriterConfig.CpuLayout\022;\n\020layout_opt" +
           "imizer\030\001 \001(\0162!.tensorflow.RewriterConfig" +
    @@ -81,47 +81,49 @@ public static void registerAllExtensions(
           ".RewriterConfig.Toggle\022?\n\024auto_mixed_pre" +
           "cision\030\027 \001(\0162!.tensorflow.RewriterConfig" +
           ".Toggle\022C\n\030auto_mixed_precision_mkl\030\031 \001(" +
    -      "\0162!.tensorflow.RewriterConfig.Toggle\022\036\n\026" +
    -      "disable_meta_optimizer\030\023 \001(\010\022@\n\025use_plug" +
    -      "in_optimizers\030\034 \001(\0162!.tensorflow.Rewrite" +
    -      "rConfig.Toggle\022O\n\031meta_optimizer_iterati" +
    -      "ons\030\014 \001(\0162,.tensorflow.RewriterConfig.Nu" +
    -      "mIterationsType\022\027\n\017min_graph_nodes\030\021 \001(\005" +
    -      "\022;\n3experimental_disable_compressed_tens" +
    -      "or_optimization\030\032 \001(\010\022;\n3experimental_di" +
    -      "sable_folding_quantization_emulation\030\033 \001" +
    -      "(\010\022B\n\023memory_optimization\030\004 \001(\0162%.tensor" +
    -      "flow.RewriterConfig.MemOptType\022/\n\'memory" +
    -      "_optimizer_target_node_name_scope\030\006 \001(\t\022" +
    -      "!\n\031meta_optimizer_timeout_ms\030\024 \001(\003\0226\n\rau" +
    -      "to_parallel\030\005 \001(\0132\037.tensorflow.AutoParal" +
    -      "lelOptions\022 \n\030fail_on_optimizer_errors\030\025" +
    -      " \001(\010\022A\n\025scoped_allocator_opts\030\020 \001(\0132\".te" +
    -      "nsorflow.ScopedAllocatorOptions\022\022\n\noptim" +
    -      "izers\030d \003(\t\022K\n\021custom_optimizers\030\310\001 \003(\0132" +
    -      "/.tensorflow.RewriterConfig.CustomGraphO" +
    -      "ptimizer\022D\n\037inter_optimizer_verifier_con" +
    -      "fig\030\254\002 \001(\0132\032.tensorflow.VerifierConfig\022F" +
    -      "\n!post_optimization_verifier_config\030\255\002 \001" +
    -      "(\0132\032.tensorflow.VerifierConfig\032\312\001\n\024Custo" +
    -      "mGraphOptimizer\022\014\n\004name\030\001 \001(\t\022X\n\rparamet" +
    -      "er_map\030\002 \003(\0132A.tensorflow.RewriterConfig" +
    -      ".CustomGraphOptimizer.ParameterMapEntry\032" +
    -      "J\n\021ParameterMapEntry\022\013\n\003key\030\001 \001(\t\022$\n\005val" +
    -      "ue\030\002 \001(\0132\025.tensorflow.AttrValue:\0028\001\"6\n\006T" +
    -      "oggle\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002\022\016\n\nA" +
    -      "GGRESSIVE\020\003\"I\n\tCpuLayout\022\030\n\024NO_CONVERSIO" +
    -      "N_ON_CPU\020\000\022\020\n\014NCHW_TO_NHWC\020\001\022\020\n\014NHWC_TO_" +
    -      "NCHW\020\002\"<\n\021NumIterationsType\022\025\n\021DEFAULT_N" +
    -      "UM_ITERS\020\000\022\007\n\003ONE\020\001\022\007\n\003TWO\020\002\"\237\001\n\nMemOptT" +
    -      "ype\022\023\n\017DEFAULT_MEM_OPT\020\000\022\016\n\nNO_MEM_OPT\020\001" +
    -      "\022\n\n\006MANUAL\020\002\022\027\n\023SWAPPING_HEURISTICS\020\004\022\034\n" +
    -      "\030RECOMPUTATION_HEURISTICS\020\005\022\031\n\025SCHEDULIN" +
    -      "G_HEURISTICS\020\006\022\016\n\nHEURISTICS\020\003B\222\001\n\036org.t" +
    -      "ensorflow.proto.frameworkB\024RewriterConfi" +
    -      "gProtosP\001ZUgithub.com/tensorflow/tensorf" +
    -      "low/tensorflow/go/core/protobuf/for_core" +
    -      "_protos_go_proto\370\001\001b\006proto3"
    +      "\0162!.tensorflow.RewriterConfig.Toggle\022C\n\030" +
    +      "auto_mixed_precision_cpu\030\035 \001(\0162!.tensorf" +
    +      "low.RewriterConfig.Toggle\022\036\n\026disable_met" +
    +      "a_optimizer\030\023 \001(\010\022@\n\025use_plugin_optimize" +
    +      "rs\030\034 \001(\0162!.tensorflow.RewriterConfig.Tog" +
    +      "gle\022O\n\031meta_optimizer_iterations\030\014 \001(\0162," +
    +      ".tensorflow.RewriterConfig.NumIterations" +
    +      "Type\022\027\n\017min_graph_nodes\030\021 \001(\005\022;\n3experim" +
    +      "ental_disable_compressed_tensor_optimiza" +
    +      "tion\030\032 \001(\010\022;\n3experimental_disable_foldi" +
    +      "ng_quantization_emulation\030\033 \001(\010\022B\n\023memor" +
    +      "y_optimization\030\004 \001(\0162%.tensorflow.Rewrit" +
    +      "erConfig.MemOptType\022/\n\'memory_optimizer_" +
    +      "target_node_name_scope\030\006 \001(\t\022!\n\031meta_opt" +
    +      "imizer_timeout_ms\030\024 \001(\003\0226\n\rauto_parallel" +
    +      "\030\005 \001(\0132\037.tensorflow.AutoParallelOptions\022" +
    +      " \n\030fail_on_optimizer_errors\030\025 \001(\010\022A\n\025sco" +
    +      "ped_allocator_opts\030\020 \001(\0132\".tensorflow.Sc" +
    +      "opedAllocatorOptions\022\022\n\noptimizers\030d \003(\t" +
    +      "\022K\n\021custom_optimizers\030\310\001 \003(\0132/.tensorflo" +
    +      "w.RewriterConfig.CustomGraphOptimizer\022D\n" +
    +      "\037inter_optimizer_verifier_config\030\254\002 \001(\0132" +
    +      "\032.tensorflow.VerifierConfig\022F\n!post_opti" +
    +      "mization_verifier_config\030\255\002 \001(\0132\032.tensor" +
    +      "flow.VerifierConfig\032\312\001\n\024CustomGraphOptim" +
    +      "izer\022\014\n\004name\030\001 \001(\t\022X\n\rparameter_map\030\002 \003(" +
    +      "\0132A.tensorflow.RewriterConfig.CustomGrap" +
    +      "hOptimizer.ParameterMapEntry\032J\n\021Paramete" +
    +      "rMapEntry\022\013\n\003key\030\001 \001(\t\022$\n\005value\030\002 \001(\0132\025." +
    +      "tensorflow.AttrValue:\0028\001\"6\n\006Toggle\022\013\n\007DE" +
    +      "FAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002\022\016\n\nAGGRESSIVE\020\003" +
    +      "\"I\n\tCpuLayout\022\030\n\024NO_CONVERSION_ON_CPU\020\000\022" +
    +      "\020\n\014NCHW_TO_NHWC\020\001\022\020\n\014NHWC_TO_NCHW\020\002\"<\n\021N" +
    +      "umIterationsType\022\025\n\021DEFAULT_NUM_ITERS\020\000\022" +
    +      "\007\n\003ONE\020\001\022\007\n\003TWO\020\002\"\237\001\n\nMemOptType\022\023\n\017DEFA" +
    +      "ULT_MEM_OPT\020\000\022\016\n\nNO_MEM_OPT\020\001\022\n\n\006MANUAL\020" +
    +      "\002\022\027\n\023SWAPPING_HEURISTICS\020\004\022\034\n\030RECOMPUTAT" +
    +      "ION_HEURISTICS\020\005\022\031\n\025SCHEDULING_HEURISTIC" +
    +      "S\020\006\022\016\n\nHEURISTICS\020\003B\222\001\n\036org.tensorflow.p" +
    +      "roto.frameworkB\024RewriterConfigProtosP\001ZU" +
    +      "github.com/tensorflow/tensorflow/tensorf" +
    +      "low/go/core/protobuf/for_core_protos_go_" +
    +      "proto\370\001\001b\006proto3"
         };
         descriptor = com.google.protobuf.Descriptors.FileDescriptor
           .internalBuildGeneratedFileFrom(descriptorData,
    @@ -146,7 +148,7 @@ public static void registerAllExtensions(
         internal_static_tensorflow_RewriterConfig_fieldAccessorTable = new
           com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
             internal_static_tensorflow_RewriterConfig_descriptor,
    -        new java.lang.String[] { "CpuLayoutConversion", "LayoutOptimizer", "ConstantFolding", "ShapeOptimization", "Remapping", "CommonSubgraphElimination", "ArithmeticOptimization", "DependencyOptimization", "LoopOptimization", "FunctionOptimization", "DebugStripper", "DisableModelPruning", "ScopedAllocatorOptimization", "PinToHostOptimization", "ImplementationSelector", "AutoMixedPrecision", "AutoMixedPrecisionMkl", "DisableMetaOptimizer", "UsePluginOptimizers", "MetaOptimizerIterations", "MinGraphNodes", "ExperimentalDisableCompressedTensorOptimization", "ExperimentalDisableFoldingQuantizationEmulation", "MemoryOptimization", "MemoryOptimizerTargetNodeNameScope", "MetaOptimizerTimeoutMs", "AutoParallel", "FailOnOptimizerErrors", "ScopedAllocatorOpts", "Optimizers", "CustomOptimizers", "InterOptimizerVerifierConfig", "PostOptimizationVerifierConfig", });
    +        new java.lang.String[] { "CpuLayoutConversion", "LayoutOptimizer", "ConstantFolding", "ShapeOptimization", "Remapping", "CommonSubgraphElimination", "ArithmeticOptimization", "DependencyOptimization", "LoopOptimization", "FunctionOptimization", "DebugStripper", "DisableModelPruning", "ScopedAllocatorOptimization", "PinToHostOptimization", "ImplementationSelector", "AutoMixedPrecision", "AutoMixedPrecisionMkl", "AutoMixedPrecisionCpu", "DisableMetaOptimizer", "UsePluginOptimizers", "MetaOptimizerIterations", "MinGraphNodes", "ExperimentalDisableCompressedTensorOptimization", "ExperimentalDisableFoldingQuantizationEmulation", "MemoryOptimization", "MemoryOptimizerTargetNodeNameScope", "MetaOptimizerTimeoutMs", "AutoParallel", "FailOnOptimizerErrors", "ScopedAllocatorOpts", "Optimizers", "CustomOptimizers", "InterOptimizerVerifierConfig", "PostOptimizationVerifierConfig", });
         internal_static_tensorflow_RewriterConfig_CustomGraphOptimizer_descriptor =
           internal_static_tensorflow_RewriterConfig_descriptor.getNestedTypes().get(0);
         internal_static_tensorflow_RewriterConfig_CustomGraphOptimizer_fieldAccessorTable = new
    diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObject.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObject.java
    index c2b8ae241d9..6a4731f2e49 100644
    --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObject.java
    +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObject.java
    @@ -95,6 +95,7 @@ private SaveableObject(
       /**
        * 
        * Node ids of concrete functions for saving and loading from a checkpoint.
    +   * These functions save and restore directly from tensors.
        * 
    * * int32 save_function = 2; @@ -435,6 +436,7 @@ public Builder mergeFrom( /** *
          * Node ids of concrete functions for saving and loading from a checkpoint.
    +     * These functions save and restore directly from tensors.
          * 
    * * int32 save_function = 2; @@ -445,6 +447,7 @@ public int getSaveFunction() { /** *
          * Node ids of concrete functions for saving and loading from a checkpoint.
    +     * These functions save and restore directly from tensors.
          * 
    * * int32 save_function = 2; @@ -458,6 +461,7 @@ public Builder setSaveFunction(int value) { /** *
          * Node ids of concrete functions for saving and loading from a checkpoint.
    +     * These functions save and restore directly from tensors.
          * 
    * * int32 save_function = 2; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObjectOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObjectOrBuilder.java index 896dcfa8b34..beacb40747e 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObjectOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SaveableObjectOrBuilder.java @@ -10,6 +10,7 @@ public interface SaveableObjectOrBuilder extends /** *
        * Node ids of concrete functions for saving and loading from a checkpoint.
    +   * These functions save and restore directly from tensors.
        * 
    * * int32 save_function = 2; diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java index 4a4390f1e6a..2fac227a96a 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObject.java @@ -17,8 +17,10 @@ private SavedObject(com.google.protobuf.GeneratedMessageV3.Builder builder) { } private SavedObject() { children_ = java.util.Collections.emptyList(); + dependencies_ = java.util.Collections.emptyList(); slotVariables_ = java.util.Collections.emptyList(); registeredName_ = ""; + registeredSaver_ = ""; } @java.lang.Override @@ -62,9 +64,9 @@ private SavedObject( break; } case 26: { - if (!((mutable_bitField0_ & 0x00000002) != 0)) { + if (!((mutable_bitField0_ & 0x00000004) != 0)) { slotVariables_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; + mutable_bitField0_ |= 0x00000004; } slotVariables_.add( input.readMessage(org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReference.parser(), extensionRegistry)); @@ -169,10 +171,10 @@ private SavedObject( break; } case 90: { - if (!((mutable_bitField0_ & 0x00000004) != 0)) { + if (!((mutable_bitField0_ & 0x00000008) != 0)) { saveableObjects_ = com.google.protobuf.MapField.newMapField( SaveableObjectsDefaultEntryHolder.defaultEntry); - mutable_bitField0_ |= 0x00000004; + mutable_bitField0_ |= 0x00000008; } com.google.protobuf.MapEntry saveableObjects__ = input.readMessage( @@ -214,6 +216,21 @@ private SavedObject( break; } + case 122: { + if (!((mutable_bitField0_ & 0x00000002) != 0)) { + dependencies_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + dependencies_.add( + input.readMessage(org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.parser(), extensionRegistry)); + break; + } + case 130: { + java.lang.String s = input.readStringRequireUtf8(); + + registeredSaver_ = s; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -232,9 +249,12 @@ private SavedObject( if (((mutable_bitField0_ & 0x00000001) != 0)) { children_ = java.util.Collections.unmodifiableList(children_); } - if (((mutable_bitField0_ & 0x00000002) != 0)) { + if (((mutable_bitField0_ & 0x00000004) != 0)) { slotVariables_ = java.util.Collections.unmodifiableList(slotVariables_); } + if (((mutable_bitField0_ & 0x00000002) != 0)) { + dependencies_ = java.util.Collections.unmodifiableList(dependencies_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -379,6 +399,71 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec return children_.get(index); } + public static final int DEPENDENCIES_FIELD_NUMBER = 15; + private java.util.List dependencies_; + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public java.util.List getDependenciesList() { + return dependencies_; + } + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public java.util.List + getDependenciesOrBuilderList() { + return dependencies_; + } + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public int getDependenciesCount() { + return dependencies_.size(); + } + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference getDependencies(int index) { + return dependencies_.get(index); + } + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder getDependenciesOrBuilder( + int index) { + return dependencies_.get(index); + } + public static final int SLOT_VARIABLES_FIELD_NUMBER = 3; private java.util.List slotVariables_; /** @@ -684,6 +769,13 @@ public int getSaveableObjectsCount() { return internalGetSaveableObjects().getMap().size(); } /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -700,6 +792,13 @@ public java.util.Map + * Stores the functions used to save and restore this object. At most one of + * `saveable_objects` or `registered_saver` is defined for each SavedObject. + * See the comment below for the difference between SaveableObject and + * registered savers. + *
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -707,6 +806,13 @@ public java.util.Map + * Stores the functions used to save and restore this object. At most one of + * `saveable_objects` or `registered_saver` is defined for each SavedObject. + * See the comment below for the difference between SaveableObject and + * registered savers. + *
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -719,6 +825,13 @@ public org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrDefault return map.containsKey(key) ? map.get(key) : defaultValue; } /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -737,9 +850,6 @@ public org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( private volatile java.lang.Object registeredName_; /** *
    -   * The fields below are filled when the user serializes a registered Trackable
    -   * class. Registered classes may save additional metadata and supersede the
    -   * default loading process where nodes are recreated from the proto.
        * The name of the registered class of the form "{package}.{class_name}".
        * This field is used to search for the registered class at loading time.
        * 
    @@ -760,9 +870,6 @@ public java.lang.String getRegisteredName() { } /** *
    -   * The fields below are filled when the user serializes a registered Trackable
    -   * class. Registered classes may save additional metadata and supersede the
    -   * default loading process where nodes are recreated from the proto.
        * The name of the registered class of the form "{package}.{class_name}".
        * This field is used to search for the registered class at loading time.
        * 
    @@ -822,6 +929,50 @@ public com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder() { return getSerializedUserProto(); } + public static final int REGISTERED_SAVER_FIELD_NUMBER = 16; + private volatile java.lang.Object registeredSaver_; + /** + *
    +   * String name of the registered saver. At most one of `saveable_objects` or
    +   * `registered_saver` is defined for each SavedObject.
    +   * 
    + * + * string registered_saver = 16; + */ + public java.lang.String getRegisteredSaver() { + java.lang.Object ref = registeredSaver_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + registeredSaver_ = s; + return s; + } + } + /** + *
    +   * String name of the registered saver. At most one of `saveable_objects` or
    +   * `registered_saver` is defined for each SavedObject.
    +   * 
    + * + * string registered_saver = 16; + */ + public com.google.protobuf.ByteString + getRegisteredSaverBytes() { + java.lang.Object ref = registeredSaver_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + registeredSaver_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -878,6 +1029,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (serializedUserProto_ != null) { output.writeMessage(14, getSerializedUserProto()); } + for (int i = 0; i < dependencies_.size(); i++) { + output.writeMessage(15, dependencies_.get(i)); + } + if (!getRegisteredSaverBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 16, registeredSaver_); + } unknownFields.writeTo(output); } @@ -944,6 +1101,13 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(14, getSerializedUserProto()); } + for (int i = 0; i < dependencies_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(15, dependencies_.get(i)); + } + if (!getRegisteredSaverBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(16, registeredSaver_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -961,6 +1125,8 @@ public boolean equals(final java.lang.Object obj) { if (!getChildrenList() .equals(other.getChildrenList())) return false; + if (!getDependenciesList() + .equals(other.getDependenciesList())) return false; if (!getSlotVariablesList() .equals(other.getSlotVariablesList())) return false; if (!internalGetSaveableObjects().equals( @@ -972,6 +1138,8 @@ public boolean equals(final java.lang.Object obj) { if (!getSerializedUserProto() .equals(other.getSerializedUserProto())) return false; } + if (!getRegisteredSaver() + .equals(other.getRegisteredSaver())) return false; if (!getKindCase().equals(other.getKindCase())) return false; switch (kindCase_) { case 4: @@ -1024,6 +1192,10 @@ public int hashCode() { hash = (37 * hash) + CHILDREN_FIELD_NUMBER; hash = (53 * hash) + getChildrenList().hashCode(); } + if (getDependenciesCount() > 0) { + hash = (37 * hash) + DEPENDENCIES_FIELD_NUMBER; + hash = (53 * hash) + getDependenciesList().hashCode(); + } if (getSlotVariablesCount() > 0) { hash = (37 * hash) + SLOT_VARIABLES_FIELD_NUMBER; hash = (53 * hash) + getSlotVariablesList().hashCode(); @@ -1038,6 +1210,8 @@ public int hashCode() { hash = (37 * hash) + SERIALIZED_USER_PROTO_FIELD_NUMBER; hash = (53 * hash) + getSerializedUserProto().hashCode(); } + hash = (37 * hash) + REGISTERED_SAVER_FIELD_NUMBER; + hash = (53 * hash) + getRegisteredSaver().hashCode(); switch (kindCase_) { case 4: hash = (37 * hash) + USER_OBJECT_FIELD_NUMBER; @@ -1225,6 +1399,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3 .alwaysUseFieldBuilders) { getChildrenFieldBuilder(); + getDependenciesFieldBuilder(); getSlotVariablesFieldBuilder(); } } @@ -1237,9 +1412,15 @@ public Builder clear() { } else { childrenBuilder_.clear(); } + if (dependenciesBuilder_ == null) { + dependencies_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + dependenciesBuilder_.clear(); + } if (slotVariablesBuilder_ == null) { slotVariables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } else { slotVariablesBuilder_.clear(); } @@ -1252,6 +1433,8 @@ public Builder clear() { serializedUserProto_ = null; serializedUserProtoBuilder_ = null; } + registeredSaver_ = ""; + kindCase_ = 0; kind_ = null; return this; @@ -1290,11 +1473,20 @@ public org.tensorflow.proto.framework.SavedObject buildPartial() { } else { result.children_ = childrenBuilder_.build(); } - if (slotVariablesBuilder_ == null) { + if (dependenciesBuilder_ == null) { if (((bitField0_ & 0x00000002) != 0)) { - slotVariables_ = java.util.Collections.unmodifiableList(slotVariables_); + dependencies_ = java.util.Collections.unmodifiableList(dependencies_); bitField0_ = (bitField0_ & ~0x00000002); } + result.dependencies_ = dependencies_; + } else { + result.dependencies_ = dependenciesBuilder_.build(); + } + if (slotVariablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) != 0)) { + slotVariables_ = java.util.Collections.unmodifiableList(slotVariables_); + bitField0_ = (bitField0_ & ~0x00000004); + } result.slotVariables_ = slotVariables_; } else { result.slotVariables_ = slotVariablesBuilder_.build(); @@ -1363,6 +1555,7 @@ public org.tensorflow.proto.framework.SavedObject buildPartial() { } else { result.serializedUserProto_ = serializedUserProtoBuilder_.build(); } + result.registeredSaver_ = registeredSaver_; result.kindCase_ = kindCase_; onBuilt(); return result; @@ -1438,11 +1631,37 @@ public Builder mergeFrom(org.tensorflow.proto.framework.SavedObject other) { } } } + if (dependenciesBuilder_ == null) { + if (!other.dependencies_.isEmpty()) { + if (dependencies_.isEmpty()) { + dependencies_ = other.dependencies_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureDependenciesIsMutable(); + dependencies_.addAll(other.dependencies_); + } + onChanged(); + } + } else { + if (!other.dependencies_.isEmpty()) { + if (dependenciesBuilder_.isEmpty()) { + dependenciesBuilder_.dispose(); + dependenciesBuilder_ = null; + dependencies_ = other.dependencies_; + bitField0_ = (bitField0_ & ~0x00000002); + dependenciesBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getDependenciesFieldBuilder() : null; + } else { + dependenciesBuilder_.addAllMessages(other.dependencies_); + } + } + } if (slotVariablesBuilder_ == null) { if (!other.slotVariables_.isEmpty()) { if (slotVariables_.isEmpty()) { slotVariables_ = other.slotVariables_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); } else { ensureSlotVariablesIsMutable(); slotVariables_.addAll(other.slotVariables_); @@ -1455,7 +1674,7 @@ public Builder mergeFrom(org.tensorflow.proto.framework.SavedObject other) { slotVariablesBuilder_.dispose(); slotVariablesBuilder_ = null; slotVariables_ = other.slotVariables_; - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); slotVariablesBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getSlotVariablesFieldBuilder() : null; @@ -1473,6 +1692,10 @@ public Builder mergeFrom(org.tensorflow.proto.framework.SavedObject other) { if (other.hasSerializedUserProto()) { mergeSerializedUserProto(other.getSerializedUserProto()); } + if (!other.getRegisteredSaver().isEmpty()) { + registeredSaver_ = other.registeredSaver_; + onChanged(); + } switch (other.getKindCase()) { case USER_OBJECT: { mergeUserObject(other.getUserObject()); @@ -1903,12 +2126,360 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.Objec return childrenBuilder_; } + private java.util.List dependencies_ = + java.util.Collections.emptyList(); + private void ensureDependenciesIsMutable() { + if (!((bitField0_ & 0x00000002) != 0)) { + dependencies_ = new java.util.ArrayList(dependencies_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder> dependenciesBuilder_; + + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public java.util.List getDependenciesList() { + if (dependenciesBuilder_ == null) { + return java.util.Collections.unmodifiableList(dependencies_); + } else { + return dependenciesBuilder_.getMessageList(); + } + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public int getDependenciesCount() { + if (dependenciesBuilder_ == null) { + return dependencies_.size(); + } else { + return dependenciesBuilder_.getCount(); + } + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference getDependencies(int index) { + if (dependenciesBuilder_ == null) { + return dependencies_.get(index); + } else { + return dependenciesBuilder_.getMessage(index); + } + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder setDependencies( + int index, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference value) { + if (dependenciesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependenciesIsMutable(); + dependencies_.set(index, value); + onChanged(); + } else { + dependenciesBuilder_.setMessage(index, value); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder setDependencies( + int index, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder builderForValue) { + if (dependenciesBuilder_ == null) { + ensureDependenciesIsMutable(); + dependencies_.set(index, builderForValue.build()); + onChanged(); + } else { + dependenciesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder addDependencies(org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference value) { + if (dependenciesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependenciesIsMutable(); + dependencies_.add(value); + onChanged(); + } else { + dependenciesBuilder_.addMessage(value); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder addDependencies( + int index, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference value) { + if (dependenciesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependenciesIsMutable(); + dependencies_.add(index, value); + onChanged(); + } else { + dependenciesBuilder_.addMessage(index, value); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder addDependencies( + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder builderForValue) { + if (dependenciesBuilder_ == null) { + ensureDependenciesIsMutable(); + dependencies_.add(builderForValue.build()); + onChanged(); + } else { + dependenciesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder addDependencies( + int index, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder builderForValue) { + if (dependenciesBuilder_ == null) { + ensureDependenciesIsMutable(); + dependencies_.add(index, builderForValue.build()); + onChanged(); + } else { + dependenciesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder addAllDependencies( + java.lang.Iterable values) { + if (dependenciesBuilder_ == null) { + ensureDependenciesIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, dependencies_); + onChanged(); + } else { + dependenciesBuilder_.addAllMessages(values); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder clearDependencies() { + if (dependenciesBuilder_ == null) { + dependencies_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + dependenciesBuilder_.clear(); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public Builder removeDependencies(int index) { + if (dependenciesBuilder_ == null) { + ensureDependenciesIsMutable(); + dependencies_.remove(index); + onChanged(); + } else { + dependenciesBuilder_.remove(index); + } + return this; + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder getDependenciesBuilder( + int index) { + return getDependenciesFieldBuilder().getBuilder(index); + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder getDependenciesOrBuilder( + int index) { + if (dependenciesBuilder_ == null) { + return dependencies_.get(index); } else { + return dependenciesBuilder_.getMessageOrBuilder(index); + } + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public java.util.List + getDependenciesOrBuilderList() { + if (dependenciesBuilder_ != null) { + return dependenciesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(dependencies_); + } + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder addDependenciesBuilder() { + return getDependenciesFieldBuilder().addBuilder( + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.getDefaultInstance()); + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder addDependenciesBuilder( + int index) { + return getDependenciesFieldBuilder().addBuilder( + index, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.getDefaultInstance()); + } + /** + *
    +     * Ordered list of dependencies that must be loaded before this object.
    +     * SavedModel loads with the bottom-up approach, by first creating all objects
    +     * (in the order defined by the dependencies), then connecting the edges.
    +     * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + public java.util.List + getDependenciesBuilderList() { + return getDependenciesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder> + getDependenciesFieldBuilder() { + if (dependenciesBuilder_ == null) { + dependenciesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference.Builder, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder>( + dependencies_, + ((bitField0_ & 0x00000002) != 0), + getParentForChildren(), + isClean()); + dependencies_ = null; + } + return dependenciesBuilder_; + } + private java.util.List slotVariables_ = java.util.Collections.emptyList(); private void ensureSlotVariablesIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { + if (!((bitField0_ & 0x00000004) != 0)) { slotVariables_ = new java.util.ArrayList(slotVariables_); - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; } } @@ -2135,7 +2706,7 @@ public Builder addAllSlotVariables( public Builder clearSlotVariables() { if (slotVariablesBuilder_ == null) { slotVariables_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { slotVariablesBuilder_.clear(); @@ -2261,7 +2832,7 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotV slotVariablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReference, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReference.Builder, org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReferenceOrBuilder>( slotVariables_, - ((bitField0_ & 0x00000002) != 0), + ((bitField0_ & 0x00000004) != 0), getParentForChildren(), isClean()); slotVariables_ = null; @@ -3384,6 +3955,13 @@ public int getSaveableObjectsCount() { return internalGetSaveableObjects().getMap().size(); } /** + *
    +     * Stores the functions used to save and restore this object. At most one of
    +     * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +     * See the comment below for the difference between SaveableObject and
    +     * registered savers.
    +     * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3400,6 +3978,13 @@ public java.util.Map + * Stores the functions used to save and restore this object. At most one of + * `saveable_objects` or `registered_saver` is defined for each SavedObject. + * See the comment below for the difference between SaveableObject and + * registered savers. + *
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3407,6 +3992,13 @@ public java.util.Map + * Stores the functions used to save and restore this object. At most one of + * `saveable_objects` or `registered_saver` is defined for each SavedObject. + * See the comment below for the difference between SaveableObject and + * registered savers. + *
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3419,6 +4011,13 @@ public org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrDefault return map.containsKey(key) ? map.get(key) : defaultValue; } /** + *
    +     * Stores the functions used to save and restore this object. At most one of
    +     * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +     * See the comment below for the difference between SaveableObject and
    +     * registered savers.
    +     * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3439,6 +4038,13 @@ public Builder clearSaveableObjects() { return this; } /** + *
    +     * Stores the functions used to save and restore this object. At most one of
    +     * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +     * See the comment below for the difference between SaveableObject and
    +     * registered savers.
    +     * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3458,6 +4064,13 @@ public Builder removeSaveableObjects( return internalGetMutableSaveableObjects().getMutableMap(); } /** + *
    +     * Stores the functions used to save and restore this object. At most one of
    +     * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +     * See the comment below for the difference between SaveableObject and
    +     * registered savers.
    +     * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ public Builder putSaveableObjects( @@ -3470,6 +4083,13 @@ public Builder putSaveableObjects( return this; } /** + *
    +     * Stores the functions used to save and restore this object. At most one of
    +     * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +     * See the comment below for the difference between SaveableObject and
    +     * registered savers.
    +     * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -3483,9 +4103,6 @@ public Builder putAllSaveableObjects( private java.lang.Object registeredName_ = ""; /** *
    -     * The fields below are filled when the user serializes a registered Trackable
    -     * class. Registered classes may save additional metadata and supersede the
    -     * default loading process where nodes are recreated from the proto.
          * The name of the registered class of the form "{package}.{class_name}".
          * This field is used to search for the registered class at loading time.
          * 
    @@ -3506,9 +4123,6 @@ public java.lang.String getRegisteredName() { } /** *
    -     * The fields below are filled when the user serializes a registered Trackable
    -     * class. Registered classes may save additional metadata and supersede the
    -     * default loading process where nodes are recreated from the proto.
          * The name of the registered class of the form "{package}.{class_name}".
          * This field is used to search for the registered class at loading time.
          * 
    @@ -3530,9 +4144,6 @@ public java.lang.String getRegisteredName() { } /** *
    -     * The fields below are filled when the user serializes a registered Trackable
    -     * class. Registered classes may save additional metadata and supersede the
    -     * default loading process where nodes are recreated from the proto.
          * The name of the registered class of the form "{package}.{class_name}".
          * This field is used to search for the registered class at loading time.
          * 
    @@ -3551,9 +4162,6 @@ public Builder setRegisteredName( } /** *
    -     * The fields below are filled when the user serializes a registered Trackable
    -     * class. Registered classes may save additional metadata and supersede the
    -     * default loading process where nodes are recreated from the proto.
          * The name of the registered class of the form "{package}.{class_name}".
          * This field is used to search for the registered class at loading time.
          * 
    @@ -3568,9 +4176,6 @@ public Builder clearRegisteredName() { } /** *
    -     * The fields below are filled when the user serializes a registered Trackable
    -     * class. Registered classes may save additional metadata and supersede the
    -     * default loading process where nodes are recreated from the proto.
          * The name of the registered class of the form "{package}.{class_name}".
          * This field is used to search for the registered class at loading time.
          * 
    @@ -3759,6 +4364,100 @@ public com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder() { } return serializedUserProtoBuilder_; } + + private java.lang.Object registeredSaver_ = ""; + /** + *
    +     * String name of the registered saver. At most one of `saveable_objects` or
    +     * `registered_saver` is defined for each SavedObject.
    +     * 
    + * + * string registered_saver = 16; + */ + public java.lang.String getRegisteredSaver() { + java.lang.Object ref = registeredSaver_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + registeredSaver_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + *
    +     * String name of the registered saver. At most one of `saveable_objects` or
    +     * `registered_saver` is defined for each SavedObject.
    +     * 
    + * + * string registered_saver = 16; + */ + public com.google.protobuf.ByteString + getRegisteredSaverBytes() { + java.lang.Object ref = registeredSaver_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + registeredSaver_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + *
    +     * String name of the registered saver. At most one of `saveable_objects` or
    +     * `registered_saver` is defined for each SavedObject.
    +     * 
    + * + * string registered_saver = 16; + */ + public Builder setRegisteredSaver( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + registeredSaver_ = value; + onChanged(); + return this; + } + /** + *
    +     * String name of the registered saver. At most one of `saveable_objects` or
    +     * `registered_saver` is defined for each SavedObject.
    +     * 
    + * + * string registered_saver = 16; + */ + public Builder clearRegisteredSaver() { + + registeredSaver_ = getDefaultInstance().getRegisteredSaver(); + onChanged(); + return this; + } + /** + *
    +     * String name of the registered saver. At most one of `saveable_objects` or
    +     * `registered_saver` is defined for each SavedObject.
    +     * 
    + * + * string registered_saver = 16; + */ + public Builder setRegisteredSaverBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + registeredSaver_ = value; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java index 9d9180a990b..44d1f9dc740 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectGraphProtos.java @@ -112,67 +112,70 @@ public static void registerAllExtensions( "(\01323.tensorflow.SavedObjectGraph.Concret" + "eFunctionsEntry\032[\n\026ConcreteFunctionsEntr" + "y\022\013\n\003key\030\001 \001(\t\0220\n\005value\030\002 \001(\0132!.tensorfl" + - "ow.SavedConcreteFunction:\0028\001\"\336\006\n\013SavedOb" + + "ow.SavedConcreteFunction:\0028\001\"\320\007\n\013SavedOb" + "ject\022R\n\010children\030\001 \003(\0132@.tensorflow.Trac" + "kableObjectGraph.TrackableObject.ObjectR" + - "eference\022^\n\016slot_variables\030\003 \003(\0132F.tenso" + - "rflow.TrackableObjectGraph.TrackableObje" + - "ct.SlotVariableReference\0222\n\013user_object\030" + - "\004 \001(\0132\033.tensorflow.SavedUserObjectH\000\022\'\n\005" + - "asset\030\005 \001(\0132\026.tensorflow.SavedAssetH\000\022-\n" + - "\010function\030\006 \001(\0132\031.tensorflow.SavedFuncti" + - "onH\000\022-\n\010variable\030\007 \001(\0132\031.tensorflow.Save" + - "dVariableH\000\022G\n\026bare_concrete_function\030\010 " + - "\001(\0132%.tensorflow.SavedBareConcreteFuncti" + - "onH\000\022-\n\010constant\030\t \001(\0132\031.tensorflow.Save" + - "dConstantH\000\022-\n\010resource\030\n \001(\0132\031.tensorfl" + - "ow.SavedResourceH\000\0225\n\017captured_tensor\030\014 " + - "\001(\0132\032.tensorflow.CapturedTensorH\000\022F\n\020sav" + - "eable_objects\030\013 \003(\0132,.tensorflow.SavedOb" + - "ject.SaveableObjectsEntry\022\027\n\017registered_" + - "name\030\r \001(\t\0223\n\025serialized_user_proto\030\016 \001(" + - "\0132\024.google.protobuf.Any\032R\n\024SaveableObjec" + - "tsEntry\022\013\n\003key\030\001 \001(\t\022)\n\005value\030\002 \001(\0132\032.te" + - "nsorflow.SaveableObject:\0028\001B\006\n\004kindJ\004\010\002\020" + - "\003R\nattributes\"d\n\017SavedUserObject\022\022\n\niden" + - "tifier\030\001 \001(\t\022\'\n\007version\030\002 \001(\0132\026.tensorfl" + - "ow.VersionDef\022\024\n\010metadata\030\003 \001(\tB\002\030\001\"*\n\nS" + - "avedAsset\022\034\n\024asset_file_def_index\030\001 \001(\005\"" + - "\\\n\rSavedFunction\022\032\n\022concrete_functions\030\001" + - " \003(\t\022/\n\rfunction_spec\030\002 \001(\0132\030.tensorflow" + - ".FunctionSpec\"9\n\016CapturedTensor\022\014\n\004name\030" + - "\001 \001(\t\022\031\n\021concrete_function\030\002 \001(\t\"\250\001\n\025Sav" + - "edConcreteFunction\022\024\n\014bound_inputs\030\002 \003(\005" + - "\022B\n\035canonicalized_input_signature\030\003 \001(\0132" + - "\033.tensorflow.StructuredValue\0225\n\020output_s" + - "ignature\030\004 \001(\0132\033.tensorflow.StructuredVa" + - "lue\"\255\001\n\031SavedBareConcreteFunction\022\036\n\026con" + - "crete_function_name\030\001 \001(\t\022\031\n\021argument_ke" + - "ywords\030\002 \003(\t\022$\n\034allowed_positional_argum" + - "ents\030\003 \001(\003\022/\n\rfunction_spec\030\004 \001(\0132\030.tens" + - "orflow.FunctionSpec\"\"\n\rSavedConstant\022\021\n\t" + - "operation\030\001 \001(\t\"\327\002\n\rSavedVariable\022#\n\005dty" + - "pe\030\001 \001(\0162\024.tensorflow.DataType\022+\n\005shape\030" + - "\002 \001(\0132\034.tensorflow.TensorShapeProto\022\021\n\tt" + - "rainable\030\003 \001(\010\022<\n\017synchronization\030\004 \001(\0162" + - "#.tensorflow.VariableSynchronization\0224\n\013" + - "aggregation\030\005 \001(\0162\037.tensorflow.VariableA" + - "ggregation\022\014\n\004name\030\006 \001(\t\022\016\n\006device\030\007 \001(\t" + - "\022O\n,experimental_distributed_variable_co" + - "mponents\030\010 \003(\0132\031.tensorflow.SavedVariabl" + - "e\"\373\001\n\014FunctionSpec\0220\n\013fullargspec\030\001 \001(\0132" + - "\033.tensorflow.StructuredValue\022\021\n\tis_metho" + - "d\030\002 \001(\010\0224\n\017input_signature\030\005 \001(\0132\033.tenso" + - "rflow.StructuredValue\0228\n\013jit_compile\030\006 \001" + - "(\0162#.tensorflow.FunctionSpec.JitCompile\"" + - "*\n\nJitCompile\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OF" + - "F\020\002J\004\010\003\020\004J\004\010\004\020\005\"\037\n\rSavedResource\022\016\n\006devi" + - "ce\030\001 \001(\t\"A\n\016SaveableObject\022\025\n\rsave_funct" + - "ion\030\002 \001(\005\022\030\n\020restore_function\030\003 \001(\005B\224\001\n\036" + - "org.tensorflow.proto.frameworkB\026SavedObj" + - "ectGraphProtosP\001ZUgithub.com/tensorflow/" + - "tensorflow/tensorflow/go/core/protobuf/f" + - "or_core_protos_go_proto\370\001\001b\006proto3" + "eference\022V\n\014dependencies\030\017 \003(\0132@.tensorf" + + "low.TrackableObjectGraph.TrackableObject" + + ".ObjectReference\022^\n\016slot_variables\030\003 \003(\013" + + "2F.tensorflow.TrackableObjectGraph.Track" + + "ableObject.SlotVariableReference\0222\n\013user" + + "_object\030\004 \001(\0132\033.tensorflow.SavedUserObje" + + "ctH\000\022\'\n\005asset\030\005 \001(\0132\026.tensorflow.SavedAs" + + "setH\000\022-\n\010function\030\006 \001(\0132\031.tensorflow.Sav" + + "edFunctionH\000\022-\n\010variable\030\007 \001(\0132\031.tensorf" + + "low.SavedVariableH\000\022G\n\026bare_concrete_fun" + + "ction\030\010 \001(\0132%.tensorflow.SavedBareConcre" + + "teFunctionH\000\022-\n\010constant\030\t \001(\0132\031.tensorf" + + "low.SavedConstantH\000\022-\n\010resource\030\n \001(\0132\031." + + "tensorflow.SavedResourceH\000\0225\n\017captured_t" + + "ensor\030\014 \001(\0132\032.tensorflow.CapturedTensorH" + + "\000\022F\n\020saveable_objects\030\013 \003(\0132,.tensorflow" + + ".SavedObject.SaveableObjectsEntry\022\027\n\017reg" + + "istered_name\030\r \001(\t\0223\n\025serialized_user_pr" + + "oto\030\016 \001(\0132\024.google.protobuf.Any\022\030\n\020regis" + + "tered_saver\030\020 \001(\t\032R\n\024SaveableObjectsEntr" + + "y\022\013\n\003key\030\001 \001(\t\022)\n\005value\030\002 \001(\0132\032.tensorfl" + + "ow.SaveableObject:\0028\001B\006\n\004kindJ\004\010\002\020\003R\natt" + + "ributes\"d\n\017SavedUserObject\022\022\n\nidentifier" + + "\030\001 \001(\t\022\'\n\007version\030\002 \001(\0132\026.tensorflow.Ver" + + "sionDef\022\024\n\010metadata\030\003 \001(\tB\002\030\001\"*\n\nSavedAs" + + "set\022\034\n\024asset_file_def_index\030\001 \001(\005\"\\\n\rSav" + + "edFunction\022\032\n\022concrete_functions\030\001 \003(\t\022/" + + "\n\rfunction_spec\030\002 \001(\0132\030.tensorflow.Funct" + + "ionSpec\"9\n\016CapturedTensor\022\014\n\004name\030\001 \001(\t\022" + + "\031\n\021concrete_function\030\002 \001(\t\"\250\001\n\025SavedConc" + + "reteFunction\022\024\n\014bound_inputs\030\002 \003(\005\022B\n\035ca" + + "nonicalized_input_signature\030\003 \001(\0132\033.tens" + + "orflow.StructuredValue\0225\n\020output_signatu" + + "re\030\004 \001(\0132\033.tensorflow.StructuredValue\"\255\001" + + "\n\031SavedBareConcreteFunction\022\036\n\026concrete_" + + "function_name\030\001 \001(\t\022\031\n\021argument_keywords" + + "\030\002 \003(\t\022$\n\034allowed_positional_arguments\030\003" + + " \001(\003\022/\n\rfunction_spec\030\004 \001(\0132\030.tensorflow" + + ".FunctionSpec\"\"\n\rSavedConstant\022\021\n\toperat" + + "ion\030\001 \001(\t\"\327\002\n\rSavedVariable\022#\n\005dtype\030\001 \001" + + "(\0162\024.tensorflow.DataType\022+\n\005shape\030\002 \001(\0132" + + "\034.tensorflow.TensorShapeProto\022\021\n\ttrainab" + + "le\030\003 \001(\010\022<\n\017synchronization\030\004 \001(\0162#.tens" + + "orflow.VariableSynchronization\0224\n\013aggreg" + + "ation\030\005 \001(\0162\037.tensorflow.VariableAggrega" + + "tion\022\014\n\004name\030\006 \001(\t\022\016\n\006device\030\007 \001(\t\022O\n,ex" + + "perimental_distributed_variable_componen" + + "ts\030\010 \003(\0132\031.tensorflow.SavedVariable\"\373\001\n\014" + + "FunctionSpec\0220\n\013fullargspec\030\001 \001(\0132\033.tens" + + "orflow.StructuredValue\022\021\n\tis_method\030\002 \001(" + + "\010\0224\n\017input_signature\030\005 \001(\0132\033.tensorflow." + + "StructuredValue\0228\n\013jit_compile\030\006 \001(\0162#.t" + + "ensorflow.FunctionSpec.JitCompile\"*\n\nJit" + + "Compile\022\013\n\007DEFAULT\020\000\022\006\n\002ON\020\001\022\007\n\003OFF\020\002J\004\010" + + "\003\020\004J\004\010\004\020\005\"\037\n\rSavedResource\022\016\n\006device\030\001 \001" + + "(\t\"A\n\016SaveableObject\022\025\n\rsave_function\030\002 " + + "\001(\005\022\030\n\020restore_function\030\003 \001(\005B\224\001\n\036org.te" + + "nsorflow.proto.frameworkB\026SavedObjectGra" + + "phProtosP\001ZUgithub.com/tensorflow/tensor" + + "flow/tensorflow/go/core/protobuf/for_cor" + + "e_protos_go_proto\370\001\001b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -202,7 +205,7 @@ public static void registerAllExtensions( internal_static_tensorflow_SavedObject_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_SavedObject_descriptor, - new java.lang.String[] { "Children", "SlotVariables", "UserObject", "Asset", "Function", "Variable", "BareConcreteFunction", "Constant", "Resource", "CapturedTensor", "SaveableObjects", "RegisteredName", "SerializedUserProto", "Kind", }); + new java.lang.String[] { "Children", "Dependencies", "SlotVariables", "UserObject", "Asset", "Function", "Variable", "BareConcreteFunction", "Constant", "Resource", "CapturedTensor", "SaveableObjects", "RegisteredName", "SerializedUserProto", "RegisteredSaver", "Kind", }); internal_static_tensorflow_SavedObject_SaveableObjectsEntry_descriptor = internal_static_tensorflow_SavedObject_descriptor.getNestedTypes().get(0); internal_static_tensorflow_SavedObject_SaveableObjectsEntry_fieldAccessorTable = new diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java index be025d44436..5efa11691ef 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/SavedObjectOrBuilder.java @@ -61,6 +61,60 @@ public interface SavedObjectOrBuilder extends org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder getChildrenOrBuilder( int index); + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + java.util.List + getDependenciesList(); + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReference getDependencies(int index); + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + int getDependenciesCount(); + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + java.util.List + getDependenciesOrBuilderList(); + /** + *
    +   * Ordered list of dependencies that must be loaded before this object.
    +   * SavedModel loads with the bottom-up approach, by first creating all objects
    +   * (in the order defined by the dependencies), then connecting the edges.
    +   * 
    + * + * repeated .tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference dependencies = 15; + */ + org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.ObjectReferenceOrBuilder getDependenciesOrBuilder( + int index); + /** *
        * Slot variables owned by this object. This describes the three-way
    @@ -225,10 +279,24 @@ org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariable
       org.tensorflow.proto.framework.CapturedTensorOrBuilder getCapturedTensorOrBuilder();
     
       /**
    +   * 
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ int getSaveableObjectsCount(); /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ boolean containsSaveableObjects( @@ -240,11 +308,25 @@ boolean containsSaveableObjects( java.util.Map getSaveableObjects(); /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ java.util.Map getSaveableObjectsMap(); /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -252,6 +334,13 @@ org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrDefault( java.lang.String key, org.tensorflow.proto.framework.SaveableObject defaultValue); /** + *
    +   * Stores the functions used to save and restore this object. At most one of
    +   * `saveable_objects` or `registered_saver` is defined for each SavedObject.
    +   * See the comment below for the difference between SaveableObject and
    +   * registered savers.
    +   * 
    + * * map<string, .tensorflow.SaveableObject> saveable_objects = 11; */ @@ -260,9 +349,6 @@ org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( /** *
    -   * The fields below are filled when the user serializes a registered Trackable
    -   * class. Registered classes may save additional metadata and supersede the
    -   * default loading process where nodes are recreated from the proto.
        * The name of the registered class of the form "{package}.{class_name}".
        * This field is used to search for the registered class at loading time.
        * 
    @@ -272,9 +358,6 @@ org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( java.lang.String getRegisteredName(); /** *
    -   * The fields below are filled when the user serializes a registered Trackable
    -   * class. Registered classes may save additional metadata and supersede the
    -   * default loading process where nodes are recreated from the proto.
        * The name of the registered class of the form "{package}.{class_name}".
        * This field is used to search for the registered class at loading time.
        * 
    @@ -315,5 +398,25 @@ org.tensorflow.proto.framework.SaveableObject getSaveableObjectsOrThrow( */ com.google.protobuf.AnyOrBuilder getSerializedUserProtoOrBuilder(); + /** + *
    +   * String name of the registered saver. At most one of `saveable_objects` or
    +   * `registered_saver` is defined for each SavedObject.
    +   * 
    + * + * string registered_saver = 16; + */ + java.lang.String getRegisteredSaver(); + /** + *
    +   * String name of the registered saver. At most one of `saveable_objects` or
    +   * `registered_saver` is defined for each SavedObject.
    +   * 
    + * + * string registered_saver = 16; + */ + com.google.protobuf.ByteString + getRegisteredSaverBytes(); + public org.tensorflow.proto.framework.SavedObject.KindCase getKindCase(); } diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java index cbc8a5b5e44..0a86d6889a2 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/Status.java @@ -459,8 +459,10 @@ public org.tensorflow.proto.framework.Status.DerivedStatus getDefaultInstanceFor static { java.lang.String[] descriptorData = { "\n%tensorflow/core/protobuf/status.proto\022" + - "\ntensorflow\"\017\n\rDerivedStatusB \n\036org.tens" + - "orflow.proto.frameworkb\006proto3" + "\ntensorflow\"\017\n\rDerivedStatusBw\n\036org.tens" + + "orflow.proto.frameworkZUgithub.com/tenso" + + "rflow/tensorflow/tensorflow/go/core/prot" + + "obuf/for_core_protos_go_protob\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraph.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraph.java index 5d516f926dc..3d48a846e96 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraph.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraph.java @@ -229,6 +229,68 @@ org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SerializedTe */ org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReferenceOrBuilder getSlotVariablesOrBuilder( int index); + + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + boolean hasRegisteredSaver(); + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + org.tensorflow.proto.framework.RegisteredSaver getRegisteredSaver(); + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + org.tensorflow.proto.framework.RegisteredSaverOrBuilder getRegisteredSaverOrBuilder(); + + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + boolean hasHasCheckpointValues(); + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + com.google.protobuf.BoolValue getHasCheckpointValues(); + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + com.google.protobuf.BoolValueOrBuilder getHasCheckpointValuesOrBuilder(); } /** * Protobuf type {@code tensorflow.TrackableObjectGraph.TrackableObject} @@ -306,6 +368,32 @@ private TrackableObject( input.readMessage(org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotVariableReference.parser(), extensionRegistry)); break; } + case 34: { + org.tensorflow.proto.framework.RegisteredSaver.Builder subBuilder = null; + if (registeredSaver_ != null) { + subBuilder = registeredSaver_.toBuilder(); + } + registeredSaver_ = input.readMessage(org.tensorflow.proto.framework.RegisteredSaver.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(registeredSaver_); + registeredSaver_ = subBuilder.buildPartial(); + } + + break; + } + case 42: { + com.google.protobuf.BoolValue.Builder subBuilder = null; + if (hasCheckpointValues_ != null) { + subBuilder = hasCheckpointValues_.toBuilder(); + } + hasCheckpointValues_ = input.readMessage(com.google.protobuf.BoolValue.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(hasCheckpointValues_); + hasCheckpointValues_ = subBuilder.buildPartial(); + } + + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -3030,6 +3118,84 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotV return slotVariables_.get(index); } + public static final int REGISTERED_SAVER_FIELD_NUMBER = 4; + private org.tensorflow.proto.framework.RegisteredSaver registeredSaver_; + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public boolean hasRegisteredSaver() { + return registeredSaver_ != null; + } + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public org.tensorflow.proto.framework.RegisteredSaver getRegisteredSaver() { + return registeredSaver_ == null ? org.tensorflow.proto.framework.RegisteredSaver.getDefaultInstance() : registeredSaver_; + } + /** + *
    +     * The registered saver used to save this object. If this saver is not
    +     * present when loading the checkpoint, then loading will fail.
    +     * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public org.tensorflow.proto.framework.RegisteredSaverOrBuilder getRegisteredSaverOrBuilder() { + return getRegisteredSaver(); + } + + public static final int HAS_CHECKPOINT_VALUES_FIELD_NUMBER = 5; + private com.google.protobuf.BoolValue hasCheckpointValues_; + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public boolean hasHasCheckpointValues() { + return hasCheckpointValues_ != null; + } + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public com.google.protobuf.BoolValue getHasCheckpointValues() { + return hasCheckpointValues_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : hasCheckpointValues_; + } + /** + *
    +     * Whether this object has checkpoint values or descendants with checkpoint
    +     * values. This is computed at save time to avoid traversing the entire
    +     * object graph proto when restoring (which also has to traverse the live
    +     * object graph).
    +     * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public com.google.protobuf.BoolValueOrBuilder getHasCheckpointValuesOrBuilder() { + return getHasCheckpointValues(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -3053,6 +3219,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) for (int i = 0; i < slotVariables_.size(); i++) { output.writeMessage(3, slotVariables_.get(i)); } + if (registeredSaver_ != null) { + output.writeMessage(4, getRegisteredSaver()); + } + if (hasCheckpointValues_ != null) { + output.writeMessage(5, getHasCheckpointValues()); + } unknownFields.writeTo(output); } @@ -3074,6 +3246,14 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(3, slotVariables_.get(i)); } + if (registeredSaver_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getRegisteredSaver()); + } + if (hasCheckpointValues_ != null) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, getHasCheckpointValues()); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -3095,6 +3275,16 @@ public boolean equals(final java.lang.Object obj) { .equals(other.getAttributesList())) return false; if (!getSlotVariablesList() .equals(other.getSlotVariablesList())) return false; + if (hasRegisteredSaver() != other.hasRegisteredSaver()) return false; + if (hasRegisteredSaver()) { + if (!getRegisteredSaver() + .equals(other.getRegisteredSaver())) return false; + } + if (hasHasCheckpointValues() != other.hasHasCheckpointValues()) return false; + if (hasHasCheckpointValues()) { + if (!getHasCheckpointValues() + .equals(other.getHasCheckpointValues())) return false; + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -3118,6 +3308,14 @@ public int hashCode() { hash = (37 * hash) + SLOT_VARIABLES_FIELD_NUMBER; hash = (53 * hash) + getSlotVariablesList().hashCode(); } + if (hasRegisteredSaver()) { + hash = (37 * hash) + REGISTERED_SAVER_FIELD_NUMBER; + hash = (53 * hash) + getRegisteredSaver().hashCode(); + } + if (hasHasCheckpointValues()) { + hash = (37 * hash) + HAS_CHECKPOINT_VALUES_FIELD_NUMBER; + hash = (53 * hash) + getHasCheckpointValues().hashCode(); + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -3272,6 +3470,18 @@ public Builder clear() { } else { slotVariablesBuilder_.clear(); } + if (registeredSaverBuilder_ == null) { + registeredSaver_ = null; + } else { + registeredSaver_ = null; + registeredSaverBuilder_ = null; + } + if (hasCheckpointValuesBuilder_ == null) { + hasCheckpointValues_ = null; + } else { + hasCheckpointValues_ = null; + hasCheckpointValuesBuilder_ = null; + } return this; } @@ -3326,6 +3536,16 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject build } else { result.slotVariables_ = slotVariablesBuilder_.build(); } + if (registeredSaverBuilder_ == null) { + result.registeredSaver_ = registeredSaver_; + } else { + result.registeredSaver_ = registeredSaverBuilder_.build(); + } + if (hasCheckpointValuesBuilder_ == null) { + result.hasCheckpointValues_ = hasCheckpointValues_; + } else { + result.hasCheckpointValues_ = hasCheckpointValuesBuilder_.build(); + } onBuilt(); return result; } @@ -3452,6 +3672,12 @@ public Builder mergeFrom(org.tensorflow.proto.framework.TrackableObjectGraph.Tra } } } + if (other.hasRegisteredSaver()) { + mergeRegisteredSaver(other.getRegisteredSaver()); + } + if (other.hasHasCheckpointValues()) { + mergeHasCheckpointValues(other.getHasCheckpointValues()); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -4417,6 +4643,348 @@ public org.tensorflow.proto.framework.TrackableObjectGraph.TrackableObject.SlotV } return slotVariablesBuilder_; } + + private org.tensorflow.proto.framework.RegisteredSaver registeredSaver_; + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.RegisteredSaver, org.tensorflow.proto.framework.RegisteredSaver.Builder, org.tensorflow.proto.framework.RegisteredSaverOrBuilder> registeredSaverBuilder_; + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public boolean hasRegisteredSaver() { + return registeredSaverBuilder_ != null || registeredSaver_ != null; + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public org.tensorflow.proto.framework.RegisteredSaver getRegisteredSaver() { + if (registeredSaverBuilder_ == null) { + return registeredSaver_ == null ? org.tensorflow.proto.framework.RegisteredSaver.getDefaultInstance() : registeredSaver_; + } else { + return registeredSaverBuilder_.getMessage(); + } + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public Builder setRegisteredSaver(org.tensorflow.proto.framework.RegisteredSaver value) { + if (registeredSaverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + registeredSaver_ = value; + onChanged(); + } else { + registeredSaverBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public Builder setRegisteredSaver( + org.tensorflow.proto.framework.RegisteredSaver.Builder builderForValue) { + if (registeredSaverBuilder_ == null) { + registeredSaver_ = builderForValue.build(); + onChanged(); + } else { + registeredSaverBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public Builder mergeRegisteredSaver(org.tensorflow.proto.framework.RegisteredSaver value) { + if (registeredSaverBuilder_ == null) { + if (registeredSaver_ != null) { + registeredSaver_ = + org.tensorflow.proto.framework.RegisteredSaver.newBuilder(registeredSaver_).mergeFrom(value).buildPartial(); + } else { + registeredSaver_ = value; + } + onChanged(); + } else { + registeredSaverBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public Builder clearRegisteredSaver() { + if (registeredSaverBuilder_ == null) { + registeredSaver_ = null; + onChanged(); + } else { + registeredSaver_ = null; + registeredSaverBuilder_ = null; + } + + return this; + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public org.tensorflow.proto.framework.RegisteredSaver.Builder getRegisteredSaverBuilder() { + + onChanged(); + return getRegisteredSaverFieldBuilder().getBuilder(); + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + public org.tensorflow.proto.framework.RegisteredSaverOrBuilder getRegisteredSaverOrBuilder() { + if (registeredSaverBuilder_ != null) { + return registeredSaverBuilder_.getMessageOrBuilder(); + } else { + return registeredSaver_ == null ? + org.tensorflow.proto.framework.RegisteredSaver.getDefaultInstance() : registeredSaver_; + } + } + /** + *
    +       * The registered saver used to save this object. If this saver is not
    +       * present when loading the checkpoint, then loading will fail.
    +       * 
    + * + * .tensorflow.RegisteredSaver registered_saver = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.RegisteredSaver, org.tensorflow.proto.framework.RegisteredSaver.Builder, org.tensorflow.proto.framework.RegisteredSaverOrBuilder> + getRegisteredSaverFieldBuilder() { + if (registeredSaverBuilder_ == null) { + registeredSaverBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + org.tensorflow.proto.framework.RegisteredSaver, org.tensorflow.proto.framework.RegisteredSaver.Builder, org.tensorflow.proto.framework.RegisteredSaverOrBuilder>( + getRegisteredSaver(), + getParentForChildren(), + isClean()); + registeredSaver_ = null; + } + return registeredSaverBuilder_; + } + + private com.google.protobuf.BoolValue hasCheckpointValues_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> hasCheckpointValuesBuilder_; + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public boolean hasHasCheckpointValues() { + return hasCheckpointValuesBuilder_ != null || hasCheckpointValues_ != null; + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public com.google.protobuf.BoolValue getHasCheckpointValues() { + if (hasCheckpointValuesBuilder_ == null) { + return hasCheckpointValues_ == null ? com.google.protobuf.BoolValue.getDefaultInstance() : hasCheckpointValues_; + } else { + return hasCheckpointValuesBuilder_.getMessage(); + } + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public Builder setHasCheckpointValues(com.google.protobuf.BoolValue value) { + if (hasCheckpointValuesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + hasCheckpointValues_ = value; + onChanged(); + } else { + hasCheckpointValuesBuilder_.setMessage(value); + } + + return this; + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public Builder setHasCheckpointValues( + com.google.protobuf.BoolValue.Builder builderForValue) { + if (hasCheckpointValuesBuilder_ == null) { + hasCheckpointValues_ = builderForValue.build(); + onChanged(); + } else { + hasCheckpointValuesBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public Builder mergeHasCheckpointValues(com.google.protobuf.BoolValue value) { + if (hasCheckpointValuesBuilder_ == null) { + if (hasCheckpointValues_ != null) { + hasCheckpointValues_ = + com.google.protobuf.BoolValue.newBuilder(hasCheckpointValues_).mergeFrom(value).buildPartial(); + } else { + hasCheckpointValues_ = value; + } + onChanged(); + } else { + hasCheckpointValuesBuilder_.mergeFrom(value); + } + + return this; + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public Builder clearHasCheckpointValues() { + if (hasCheckpointValuesBuilder_ == null) { + hasCheckpointValues_ = null; + onChanged(); + } else { + hasCheckpointValues_ = null; + hasCheckpointValuesBuilder_ = null; + } + + return this; + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public com.google.protobuf.BoolValue.Builder getHasCheckpointValuesBuilder() { + + onChanged(); + return getHasCheckpointValuesFieldBuilder().getBuilder(); + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + public com.google.protobuf.BoolValueOrBuilder getHasCheckpointValuesOrBuilder() { + if (hasCheckpointValuesBuilder_ != null) { + return hasCheckpointValuesBuilder_.getMessageOrBuilder(); + } else { + return hasCheckpointValues_ == null ? + com.google.protobuf.BoolValue.getDefaultInstance() : hasCheckpointValues_; + } + } + /** + *
    +       * Whether this object has checkpoint values or descendants with checkpoint
    +       * values. This is computed at save time to avoid traversing the entire
    +       * object graph proto when restoring (which also has to traverse the live
    +       * object graph).
    +       * 
    + * + * .google.protobuf.BoolValue has_checkpoint_values = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder> + getHasCheckpointValuesFieldBuilder() { + if (hasCheckpointValuesBuilder_ == null) { + hasCheckpointValuesBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.BoolValue, com.google.protobuf.BoolValue.Builder, com.google.protobuf.BoolValueOrBuilder>( + getHasCheckpointValues(), + getParentForChildren(), + isClean()); + hasCheckpointValues_ = null; + } + return hasCheckpointValuesBuilder_; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraphProtos.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraphProtos.java index 6bde2fbe86e..ccc9f36cd22 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraphProtos.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TrackableObjectGraphProtos.java @@ -39,6 +39,11 @@ public static void registerAllExtensions( static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_tensorflow_TrackableObjectGraph_TrackableObject_SlotVariableReference_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_tensorflow_RegisteredSaver_descriptor; + static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_tensorflow_RegisteredSaver_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -49,23 +54,28 @@ public static void registerAllExtensions( static { java.lang.String[] descriptorData = { "\n5tensorflow/core/protobuf/trackable_obj" + - "ect_graph.proto\022\ntensorflow\"\203\005\n\024Trackabl" + - "eObjectGraph\022?\n\005nodes\030\001 \003(\01320.tensorflow" + - ".TrackableObjectGraph.TrackableObject\032\251\004" + - "\n\017TrackableObject\022R\n\010children\030\001 \003(\0132@.te" + - "nsorflow.TrackableObjectGraph.TrackableO" + - "bject.ObjectReference\022U\n\nattributes\030\002 \003(" + - "\0132A.tensorflow.TrackableObjectGraph.Trac" + - "kableObject.SerializedTensor\022^\n\016slot_var" + - "iables\030\003 \003(\0132F.tensorflow.TrackableObjec" + - "tGraph.TrackableObject.SlotVariableRefer" + - "ence\0326\n\017ObjectReference\022\017\n\007node_id\030\001 \001(\005" + - "\022\022\n\nlocal_name\030\002 \001(\t\032e\n\020SerializedTensor" + - "\022\014\n\004name\030\001 \001(\t\022\021\n\tfull_name\030\002 \001(\t\022\026\n\016che" + - "ckpoint_key\030\003 \001(\t\022\030\n\020optional_restore\030\004 " + - "\001(\010\032l\n\025SlotVariableReference\022!\n\031original" + - "_variable_node_id\030\001 \001(\005\022\021\n\tslot_name\030\002 \001" + - "(\t\022\035\n\025slot_variable_node_id\030\003 \001(\005B\230\001\n\036or" + + "ect_graph.proto\022\ntensorflow\032\036google/prot" + + "obuf/wrappers.proto\"\365\005\n\024TrackableObjectG" + + "raph\022?\n\005nodes\030\001 \003(\01320.tensorflow.Trackab" + + "leObjectGraph.TrackableObject\032\233\005\n\017Tracka" + + "bleObject\022R\n\010children\030\001 \003(\0132@.tensorflow" + + ".TrackableObjectGraph.TrackableObject.Ob" + + "jectReference\022U\n\nattributes\030\002 \003(\0132A.tens" + + "orflow.TrackableObjectGraph.TrackableObj" + + "ect.SerializedTensor\022^\n\016slot_variables\030\003" + + " \003(\0132F.tensorflow.TrackableObjectGraph.T" + + "rackableObject.SlotVariableReference\0225\n\020" + + "registered_saver\030\004 \001(\0132\033.tensorflow.Regi" + + "steredSaver\0229\n\025has_checkpoint_values\030\005 \001" + + "(\0132\032.google.protobuf.BoolValue\0326\n\017Object" + + "Reference\022\017\n\007node_id\030\001 \001(\005\022\022\n\nlocal_name" + + "\030\002 \001(\t\032e\n\020SerializedTensor\022\014\n\004name\030\001 \001(\t" + + "\022\021\n\tfull_name\030\002 \001(\t\022\026\n\016checkpoint_key\030\003 " + + "\001(\t\022\030\n\020optional_restore\030\004 \001(\010\032l\n\025SlotVar" + + "iableReference\022!\n\031original_variable_node" + + "_id\030\001 \001(\005\022\021\n\tslot_name\030\002 \001(\t\022\035\n\025slot_var" + + "iable_node_id\030\003 \001(\005\"4\n\017RegisteredSaver\022\014" + + "\n\004name\030\001 \001(\t\022\023\n\013object_name\030\002 \001(\tB\230\001\n\036or" + "g.tensorflow.proto.frameworkB\032TrackableO" + "bjectGraphProtosP\001ZUgithub.com/tensorflo" + "w/tensorflow/tensorflow/go/core/protobuf" + @@ -74,6 +84,7 @@ public static void registerAllExtensions( descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.WrappersProto.getDescriptor(), }); internal_static_tensorflow_TrackableObjectGraph_descriptor = getDescriptor().getMessageTypes().get(0); @@ -86,7 +97,7 @@ public static void registerAllExtensions( internal_static_tensorflow_TrackableObjectGraph_TrackableObject_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_TrackableObjectGraph_TrackableObject_descriptor, - new java.lang.String[] { "Children", "Attributes", "SlotVariables", }); + new java.lang.String[] { "Children", "Attributes", "SlotVariables", "RegisteredSaver", "HasCheckpointValues", }); internal_static_tensorflow_TrackableObjectGraph_TrackableObject_ObjectReference_descriptor = internal_static_tensorflow_TrackableObjectGraph_TrackableObject_descriptor.getNestedTypes().get(0); internal_static_tensorflow_TrackableObjectGraph_TrackableObject_ObjectReference_fieldAccessorTable = new @@ -105,6 +116,13 @@ public static void registerAllExtensions( com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_tensorflow_TrackableObjectGraph_TrackableObject_SlotVariableReference_descriptor, new java.lang.String[] { "OriginalVariableNodeId", "SlotName", "SlotVariableNodeId", }); + internal_static_tensorflow_RegisteredSaver_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_tensorflow_RegisteredSaver_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_tensorflow_RegisteredSaver_descriptor, + new java.lang.String[] { "Name", "ObjectName", }); + com.google.protobuf.WrappersProto.getDescriptor(); } // @@protoc_insertion_point(outer_class_scope) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java index 6958037d582..dd1f30cc95b 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProto.java @@ -467,7 +467,7 @@ public org.tensorflow.proto.framework.StructuredValueOrBuilder getTypeStateOrBui * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -493,7 +493,7 @@ public java.lang.String getTypeSpecClassName() { * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -1102,7 +1102,7 @@ public org.tensorflow.proto.framework.StructuredValueOrBuilder getTypeStateOrBui * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -1128,7 +1128,7 @@ public java.lang.String getTypeSpecClassName() { * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -1155,7 +1155,7 @@ public java.lang.String getTypeSpecClassName() { * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -1179,7 +1179,7 @@ public Builder setTypeSpecClassName( * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -1199,7 +1199,7 @@ public Builder clearTypeSpecClassName() { * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java index eae32d163d9..0814cb0e478 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/java/org/tensorflow/proto/framework/TypeSpecProtoOrBuilder.java @@ -47,7 +47,7 @@ public interface TypeSpecProtoOrBuilder extends * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. @@ -62,7 +62,7 @@ public interface TypeSpecProtoOrBuilder extends * * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is * the one registered under this name. For types registered outside * core TensorFlow by an add-on library, that library must be loaded - * before this value can be deserialized by StructureCoder. + * before this value can be deserialized by nested_structure_coder. * * If type_spec_class specifies a particular TypeSpec class, this field is * redundant with the type_spec_class enum, and is only used for error * reporting in older binaries that do not know the tupe_spec_class enum. diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb index 1b3d72757c5..8eca207ec10 100644 Binary files a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb and b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pb differ diff --git a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt index 91501df0c6f..1b15dcfe590 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt +++ b/tensorflow-core/tensorflow-core-api/src/gen/resources/ops.pbtxt @@ -808,6 +808,26 @@ op { } is_stateful: true } +op { + name: "AnonymousIteratorV3" + output_arg { + name: "handle" + type: DT_RESOURCE + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + is_stateful: true +} op { name: "AnonymousMemoryCache" output_arg { @@ -850,6 +870,92 @@ op { } is_stateful: true } +op { + name: "AnonymousMutableDenseHashTable" + input_arg { + name: "empty_key" + type_attr: "key_dtype" + } + input_arg { + name: "deleted_key" + type_attr: "key_dtype" + } + output_arg { + name: "table_handle" + type: DT_RESOURCE + } + attr { + name: "key_dtype" + type: "type" + } + attr { + name: "value_dtype" + type: "type" + } + attr { + name: "value_shape" + type: "shape" + default_value { + shape { + } + } + } + attr { + name: "initial_num_buckets" + type: "int" + default_value { + i: 131072 + } + } + attr { + name: "max_load_factor" + type: "float" + default_value { + f: 0.8 + } + } + is_stateful: true +} +op { + name: "AnonymousMutableHashTable" + output_arg { + name: "table_handle" + type: DT_RESOURCE + } + attr { + name: "key_dtype" + type: "type" + } + attr { + name: "value_dtype" + type: "type" + } + is_stateful: true +} +op { + name: "AnonymousMutableHashTableOfTensors" + output_arg { + name: "table_handle" + type: DT_RESOURCE + } + attr { + name: "key_dtype" + type: "type" + } + attr { + name: "value_dtype" + type: "type" + } + attr { + name: "value_shape" + type: "shape" + default_value { + shape { + } + } + } + is_stateful: true +} op { name: "AnonymousRandomSeedGenerator" input_arg { @@ -2434,6 +2540,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -2461,6 +2587,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -2655,6 +2801,13 @@ op { name: "dtype" type: "type" } + attr { + name: "validate_shape" + type: "bool" + default_value { + b: false + } + } is_stateful: true } op { @@ -2876,6 +3029,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "auto_shard_policy" @@ -3490,6 +3663,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -3528,6 +3721,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "parallel_copy" @@ -6695,6 +6908,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -6839,6 +7072,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -6908,6 +7161,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -7170,6 +7443,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -7208,6 +7501,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -7446,6 +7759,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "Targuments" @@ -7493,6 +7826,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "N" @@ -8617,6 +8970,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -11548,6 +11921,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "task_refresh_interval_hint_ms" @@ -11625,6 +12018,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "task_refresh_interval_hint_ms" @@ -11661,6 +12074,114 @@ op { } is_stateful: true } +op { + name: "DataServiceDatasetV3" + input_arg { + name: "dataset_id" + type: DT_INT64 + } + input_arg { + name: "processing_mode" + type: DT_STRING + } + input_arg { + name: "address" + type: DT_STRING + } + input_arg { + name: "protocol" + type: DT_STRING + } + input_arg { + name: "job_name" + type: DT_STRING + } + input_arg { + name: "consumer_index" + type: DT_INT64 + } + input_arg { + name: "num_consumers" + type: DT_INT64 + } + input_arg { + name: "max_outstanding_requests" + type: DT_INT64 + } + input_arg { + name: "iteration_counter" + type: DT_RESOURCE + } + output_arg { + name: "handle" + type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } + } + attr { + name: "task_refresh_interval_hint_ms" + type: "int" + default_value { + i: -1 + } + } + attr { + name: "output_types" + type: "list(type)" + has_minimum: true + minimum: 1 + } + attr { + name: "output_shapes" + type: "list(shape)" + has_minimum: true + minimum: 1 + } + attr { + name: "data_transfer_protocol" + type: "string" + default_value { + s: "" + } + } + attr { + name: "target_workers" + type: "string" + default_value { + s: "AUTO" + } + } + attr { + name: "uncompress" + type: "bool" + default_value { + b: false + } + } + attr { + name: "uncompress_fn" + type: "func" + } + is_stateful: true +} op { name: "DatasetCardinality" input_arg { @@ -11681,6 +12202,12 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_ANY + } + } } } op { @@ -12877,6 +13404,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -13405,6 +13952,7 @@ op { name: "device_names" type: "list(string)" } + is_stateful: true } op { name: "Diag" @@ -13676,6 +14224,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -13853,6 +14421,86 @@ op { } is_stateful: true } +op { + name: "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch" + input_arg { + name: "sample_indices_or_row_lengths" + type_attr: "T1" + number_attr: "N" + } + input_arg { + name: "embedding_indices" + type_attr: "T2" + number_attr: "N" + } + input_arg { + name: "aggregation_weights" + type_attr: "T3" + number_attr: "N" + } + input_arg { + name: "mode_override" + type: DT_STRING + } + input_arg { + name: "device_ordinal" + type: DT_INT32 + } + attr { + name: "T1" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "T2" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "T3" + type: "type" + default_value { + type: DT_FLOAT + } + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "combiners" + type: "list(string)" + default_value { + list { + } + } + } + is_stateful: true +} op { name: "DynamicPartition" input_arg { @@ -14140,8 +14788,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -14369,6 +15020,89 @@ op { type: DT_STRING } } +op { + name: "EnqueueTPUEmbeddingArbitraryTensorBatch" + input_arg { + name: "sample_indices_or_row_lengths" + type_attr: "T1" + number_attr: "N" + } + input_arg { + name: "embedding_indices" + type_attr: "T2" + number_attr: "N" + } + input_arg { + name: "aggregation_weights" + type_attr: "T3" + number_attr: "N" + } + input_arg { + name: "mode_override" + type: DT_STRING + } + attr { + name: "T1" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "T2" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + attr { + name: "T3" + type: "type" + default_value { + type: DT_FLOAT + } + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "device_ordinal" + type: "int" + default_value { + i: -1 + } + } + attr { + name: "combiners" + type: "list(string)" + default_value { + list { + } + } + } + is_stateful: true +} op { name: "EnqueueTPUEmbeddingIntegerBatch" input_arg { @@ -14977,6 +15711,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15008,6 +15762,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "auto_shard_policy" @@ -15042,6 +15816,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15097,6 +15891,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15131,6 +15945,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "N" @@ -15199,6 +16033,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15227,6 +16081,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15272,6 +16146,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "key_func" @@ -15344,6 +16238,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "key_func" @@ -15394,6 +16308,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15436,6 +16370,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15464,6 +16418,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15503,6 +16477,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -15546,6 +16540,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -15592,6 +16606,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } is_stateful: true } @@ -15608,6 +16631,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15631,6 +16674,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15678,6 +16741,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -15718,6 +16801,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "sparse_keys" @@ -15791,6 +16894,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15818,6 +16941,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15846,6 +16989,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15884,6 +17047,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -15941,6 +17124,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -15969,6 +17172,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16004,6 +17227,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16035,6 +17278,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16097,6 +17360,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "predicate" @@ -16133,6 +17416,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16194,6 +17497,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16217,6 +17540,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -16946,6 +18289,22 @@ op { } is_stateful: true } +op { + name: "FileSystemSetConfiguration" + input_arg { + name: "scheme" + type: DT_STRING + } + input_arg { + name: "key" + type: DT_STRING + } + input_arg { + name: "value" + type: DT_STRING + } + is_stateful: true +} op { name: "Fill" input_arg { @@ -16987,6 +18346,26 @@ op { output_arg { name: "output" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -17014,6 +18393,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "predicate" @@ -17053,6 +18452,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "has_captured_ref" @@ -17118,6 +18537,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } attr { name: "metadata" @@ -17157,6 +18585,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } attr { name: "metadata" @@ -17388,6 +18825,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -18837,6 +20294,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "init_func" @@ -18899,6 +20376,26 @@ op { output_arg { name: "components" type_list_attr: "output_types" + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -19067,6 +20564,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "key_func" @@ -19139,6 +20656,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "key_func" @@ -19811,6 +21348,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -20473,6 +22030,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -20711,6 +22288,14 @@ op { name: "is_tpu_embedding_initialized" type: DT_BOOL } + attr { + name: "config" + type: "string" + default_value { + s: "" + } + } + is_stateful: true } op { name: "IsVariableInitialized" @@ -21076,6 +22661,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -21436,6 +23041,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -21636,6 +23261,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -21905,6 +23550,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -23077,6 +24742,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -23165,6 +24850,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -23623,6 +25328,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } is_stateful: true } @@ -24145,6 +25859,7 @@ op { type: "type" allowed_values { list { + type: DT_BFLOAT16 type: DT_DOUBLE type: DT_FLOAT type: DT_HALF @@ -24227,6 +25942,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -25577,6 +27312,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "algorithm" @@ -26794,6 +28549,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -26987,7 +28762,9 @@ op { type: DT_INT16 type: DT_UINT16 type: DT_INT32 + type: DT_UINT32 type: DT_INT64 + type: DT_UINT64 type: DT_COMPLEX64 type: DT_COMPLEX128 type: DT_BOOL @@ -27008,6 +28785,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -27051,6 +28848,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -27085,8 +28902,21 @@ op { experimental_full_type { type_id: TFT_OPTIONAL args { - type_id: TFT_VAR - s: "Toutput_types" + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + args { + type_id: TFT_VAR + s: "Toutput_types" + } } } } @@ -27147,6 +28977,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "serialized_options" @@ -27741,6 +29591,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + } } attr { name: "Toutput_types" @@ -27794,6 +29664,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + } } attr { name: "parallel_copy" @@ -27938,6 +29828,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "parallel_copy" @@ -28059,6 +29969,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28114,6 +30044,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28176,6 +30126,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28246,6 +30216,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28300,6 +30290,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28368,6 +30378,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -28582,6 +30612,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "sparse_keys" @@ -28699,6 +30749,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "sparse_keys" @@ -29855,6 +31925,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -30147,6 +32237,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -31151,6 +33261,63 @@ op { type: "type" } } +op { + name: "QuantizedConcatV2" + input_arg { + name: "values" + type_attr: "T" + number_attr: "N" + } + input_arg { + name: "axis" + type_attr: "Tidx" + } + input_arg { + name: "input_mins" + type: DT_FLOAT + number_attr: "N" + } + input_arg { + name: "input_maxes" + type: DT_FLOAT + number_attr: "N" + } + output_arg { + name: "output" + type_attr: "T" + } + output_arg { + name: "output_min" + type: DT_FLOAT + } + output_arg { + name: "output_max" + type: DT_FLOAT + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 2 + } + attr { + name: "T" + type: "type" + } + attr { + name: "Tidx" + type: "type" + default_value { + type: DT_INT32 + } + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } +} op { name: "QuantizedConv2D" input_arg { @@ -35386,6 +37553,13 @@ op { output_arg { name: "encoded_ragged" type: DT_VARIANT + experimental_full_type { + type_id: TFT_RAGGED + args { + type_id: TFT_VAR + s: "Tvalues" + } + } } attr { name: "RAGGED_RANK" @@ -35512,6 +37686,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -36104,6 +38298,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -36492,6 +38706,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -36530,6 +38764,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -37041,6 +39295,13 @@ op { s: "" } } + attr { + name: "metadata" + type: "string" + default_value { + s: "" + } + } } op { name: "Relu" @@ -37220,6 +39481,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -43597,6 +45878,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -43932,6 +46233,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "f" @@ -45665,6 +47986,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -45761,6 +48102,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "require_non_empty" @@ -45848,6 +48209,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -45905,6 +48286,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "reshuffle_each_iteration" @@ -45955,6 +48356,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "reshuffle_each_iteration" @@ -46000,6 +48421,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46047,6 +48488,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "reshuffle_each_iteration" @@ -46254,6 +48715,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46353,6 +48834,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46421,6 +48922,33 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } + } + attr { + name: "drop_remainder" + type: "bool" + default_value { + b: true + } } attr { name: "output_types" @@ -46463,6 +48991,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46588,6 +49136,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46634,6 +49202,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -46718,6 +49306,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -50619,6 +53227,16 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Tvalues" + } + } + } } attr { name: "Tvalues" @@ -50875,6 +53493,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -50967,6 +53605,10 @@ op { type: DT_INT16 type: DT_INT32 type: DT_INT64 + type: DT_UINT8 + type: DT_UINT16 + type: DT_UINT32 + type: DT_UINT64 type: DT_COMPLEX64 type: DT_COMPLEX128 } @@ -53781,6 +56423,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } attr { name: "metadata" @@ -53969,6 +56620,7 @@ op { has_minimum: true } is_stateful: true + is_distributed_communication: true } op { name: "TPUExecuteAndUpdateVariables" @@ -54005,6 +56657,7 @@ op { has_minimum: true } is_stateful: true + is_distributed_communication: true } op { name: "TPUOrdinalSelector" @@ -54290,6 +56943,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -54362,6 +57035,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "predicate" @@ -55474,6 +58167,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + } } attr { name: "Toutput_types" @@ -55540,8 +58253,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55624,8 +58340,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55740,8 +58459,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55766,8 +58488,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55792,8 +58517,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55847,8 +58575,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55887,8 +58618,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55921,8 +58655,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55961,8 +58698,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -55991,8 +58731,11 @@ op { experimental_full_type { type_id: TFT_ARRAY args { - type_id: TFT_VAR - s: "element_dtype" + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "element_dtype" + } } } } @@ -56329,6 +59072,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + args { + type_id: TFT_VAR + s: "Toutput_types" + } + } + } } attr { name: "Toutput_types" @@ -56511,6 +59274,15 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_TENSOR + args { + type_id: TFT_STRING + } + } + } } attr { name: "metadata" @@ -56597,6 +59369,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -57190,6 +59982,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -57621,6 +60433,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -58517,6 +61349,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -58547,6 +61399,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" @@ -58898,6 +61770,17 @@ op { } } } + attr { + name: "mode" + type: "string" + description: "group mode.\nCrossReplica: group_assignment contains replica_id. Each group contains the\n replicas for the current partition.\nCrossReplicaAndPartition: group_assignment contains replica_id. Each group\n contains the replicas for all partitions." + allowed_values { + list { + s: "CrossReplica" + s: "CrossReplicaAndPartition" + } + } + } summary: "Wraps the XLA AllReduce operator" description: " documented at https://www.tensorflow.org/xla/operation_semantics#allreduce." } @@ -59109,22 +61992,22 @@ op { name: "XlaConvV2" input_arg { name: "lhs" - description: "the input tensor" + description: "input tensor" type_attr: "LhsT" } input_arg { name: "rhs" - description: "the kernel tensor" + description: "kernel tensor" type_attr: "RhsT" } input_arg { name: "window_strides" - description: "the inter-window strides" + description: "inter-window strides" type_attr: "Tindices" } input_arg { name: "padding" - description: "the padding to apply at the start and end of each input dimensions" + description: "padding to apply at the start and end of each input dimensions" type_attr: "Tindices" } input_arg { @@ -59209,17 +62092,17 @@ op { attr { name: "dimension_numbers" type: "string" - description: "a serialized xla::ConvolutionDimensionNumbers proto." + description: "serialized xla::ConvolutionDimensionNumbers proto." } attr { name: "precision_config" type: "string" - description: "a serialized xla::PrecisionConfig proto." + description: "serialized xla::PrecisionConfig proto." } attr { name: "preferred_element_type" type: "type" - description: "The type of the tensor." + description: "type of the tensor." allowed_values { list { type: DT_FLOAT @@ -59242,6 +62125,14 @@ op { } } } + attr { + name: "batch_group_count" + type: "int" + default_value { + i: 1 + } + description: "number of batch groups or grouped filters." + } summary: "Wraps the XLA ConvGeneralDilated operator, documented at" description: " https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution\n." } @@ -60989,6 +63880,26 @@ op { output_arg { name: "handle" type: DT_VARIANT + experimental_full_type { + type_id: TFT_DATASET + args { + type_id: TFT_FOR_EACH + args { + type_id: TFT_PRODUCT + } + args { + type_id: TFT_TENSOR + args { + type_id: TFT_VAR + s: "output_types" + } + } + args { + type_id: TFT_VAR + s: "output_types" + } + } + } } attr { name: "output_types" diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java index 5693e23c560..dd3ea7e9cc8 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/RawTensor.java @@ -188,7 +188,9 @@ TType asTypedTensor() { return typeInfo.mapper().mapDense(this); } - /** @return metadata about the type of this tensor. */ + /** + * @return metadata about the type of this tensor. + */ TensorTypeInfo typeInfo() { return typeInfo; } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index 251f5a6e4b3..ad4044e8b58 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -1,18 +1,18 @@ /* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import java.util.Collections; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java index 9b3258fb08c..a776d35fe2b 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/internal/c_api/presets/tensorflow.java @@ -34,7 +34,9 @@ import org.bytedeco.javacpp.tools.InfoMap; import org.bytedeco.javacpp.tools.InfoMapper; -/** @author Samuel Audet */ +/** + * @author Samuel Audet + */ @Properties( value = { @Platform( @@ -467,7 +469,7 @@ public void map(InfoMap infoMap) { .put(new Info("tensorflow::Operation").javaNames("NativeOperation")) .put(new Info("tensorflow::Status").javaNames("NativeStatus").purify()) .put( - new Info("tensorflow::int32") + new Info("tensorflow::int32", "tensorflow::error::Code") .cast() .valueTypes("int") .pointerTypes("IntPointer", "IntBuffer", "int[]")) diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java index 33109e77fc0..5cb28911290 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/types/TString.java @@ -235,6 +235,8 @@ static TString sparseTensorOf(TInt64 indices, TString values, TInt64 denseShape) */ TString using(Charset charset); - /** @return the tensor data as a n-dimensional array of raw byte sequences. */ + /** + * @return the tensor data as a n-dimensional array of raw byte sequences. + */ NdArray asBytes(); } diff --git a/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc index f454087354f..b1d4eee905d 100644 --- a/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc +++ b/tensorflow-core/tensorflow-core-api/tensorflow.bazelrc @@ -115,6 +115,7 @@ build -c opt # Make Bazel print out all options from rc files. build --announce_rc +# TODO(mihaimaruseac): Document this option or remove if no longer needed build --define=grpc_no_ares=true # See https://github.com/bazelbuild/bazel/issues/7362 for information on what @@ -131,19 +132,26 @@ build --define=grpc_no_ares=true # archives in -whole_archive -no_whole_archive. build --noincompatible_remove_legacy_whole_archive +# TODO(mihaimaruseac): Document this option or remove if no longer needed build --enable_platform_specific_config # Enable XLA support by default. build --define=with_xla_support=true +# TODO(mihaimaruseac): Document this option or remove if no longer needed build --config=short_logs +# TODO(mihaimaruseac): Document this option or remove if no longer needed build --config=v2 # Disable AWS/HDFS support by default build --define=no_aws_support=true build --define=no_hdfs_support=true +# TF now has `cc_shared_library` targets, so it needs the experimental flag +# TODO(rostam): Remove when `cc_shared_library` is enabled by default +build --experimental_cc_shared_library + # Default options should come above this line. # Allow builds using libc++ as a linker library @@ -219,9 +227,8 @@ build:mkl_threadpool -c opt # Config setting to build oneDNN with Compute Library for the Arm Architecture (ACL). # This build is for the inference regime only. -build:mkl_aarch64 --define=build_with_mkl_aarch64=true --define=enable_mkl=true +build:mkl_aarch64 --define=build_with_mkl_aarch64=true build:mkl_aarch64 --define=tensorflow_mkldnn_contraction_kernel=0 -build:mkl_aarch64 --define=build_with_mkl_opensource=true build:mkl_aarch64 --define=build_with_openmp=true build:mkl_aarch64 -c opt @@ -473,7 +480,7 @@ build:rbe_linux_cuda_nvcc_py38 --config=rbe_linux_cuda11.2_nvcc_py3.8 build:rbe_linux_cuda_nvcc_py39 --config=rbe_linux_cuda11.2_nvcc_py3.9 # Deprecated configs that people might still use. -build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py36 +build:rbe_linux_cuda_nvcc --config=rbe_linux_cuda_nvcc_py39 build:rbe_gpu_linux --config=rbe_linux_cuda_nvcc build:rbe_linux_cuda_clang_base --config=rbe_linux_cuda_base @@ -586,6 +593,12 @@ build:release_cpu_linux --config=avx_linux build:release_cpu_linux --crosstool_top="@ubuntu18.04-gcc7_manylinux2010-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" test:release_cpu_linux --test_env=LD_LIBRARY_PATH +# manylinux2014 config for cpu +build:release_cpu_linux_manylinux2014 --config=release_base +build:release_cpu_linux_manylinux2014 --config=avx_linux +build:release_cpu_linux_manylinux2014 --crosstool_top="@ubuntu18.04-gcc8_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain" +test:release_cpu_linux_manylinux2014 --test_env=LD_LIBRARY_PATH + build:release_cpu_macos --config=release_base build:release_cpu_macos --config=avx_linux @@ -608,6 +621,12 @@ build:release_gpu_linux_11_4 --action_env=TF_CUDA_VERSION="11.4" build:release_gpu_linux_11_4 --action_env=TF_CUDNN_VERSION="8.2" build:release_gpu_linux_11_4 --crosstool_top=@ubuntu18.04-gcc7_manylinux2010-cuda11.4-cudnn8.2-tensorrt7.2_config_cuda//crosstool:toolchain +# manylinux2014 config for gpu +build:release_gpu_linux_manylinux2014 --config=release_gpu_linux +build:release_gpu_linux_manylinux2014 --action_env=GCC_HOST_COMPILER_PATH="/dt8/usr/bin/gcc" +build:release_gpu_linux_manylinux2014 --crosstool_top=@ubuntu18.04-gcc8_manylinux2014-cuda11.2-cudnn8.1-tensorrt7.2_config_cuda//crosstool:toolchain + + build:release_cpu_windows --config=release_base build:release_cpu_windows --config=avx_win build:release_cpu_windows --define=no_tensorflow_py_deps=true @@ -650,17 +669,26 @@ build:ubsan --linkopt -fsanitize=undefined build:ubsan --linkopt -lubsan # Disable TFRT integration for now unless --config=tfrt is specified. -build --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/fallback,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils +build --deleted_packages=tensorflow/compiler/mlir/tfrt,tensorflow/compiler/mlir/tfrt/benchmarks,tensorflow/compiler/mlir/tfrt/jit/python_binding,tensorflow/compiler/mlir/tfrt/jit/transforms,tensorflow/compiler/mlir/tfrt/python_tests,tensorflow/compiler/mlir/tfrt/tests,tensorflow/compiler/mlir/tfrt/tests/analysis,tensorflow/compiler/mlir/tfrt/tests/jit,tensorflow/compiler/mlir/tfrt/tests/lhlo_to_tfrt,tensorflow/compiler/mlir/tfrt/tests/tf_to_corert,tensorflow/compiler/mlir/tfrt/tests/tf_to_tfrt_data,tensorflow/compiler/mlir/tfrt/tests/saved_model,tensorflow/compiler/mlir/tfrt/transforms/lhlo_gpu_to_tfrt_gpu,tensorflow/core/runtime_fallback,tensorflow/core/runtime_fallback/conversion,tensorflow/core/runtime_fallback/kernel,tensorflow/core/runtime_fallback/opdefs,tensorflow/core/runtime_fallback/runtime,tensorflow/core/runtime_fallback/util,tensorflow/core/tfrt/common,tensorflow/core/tfrt/eager,tensorflow/core/tfrt/eager/backends/cpu,tensorflow/core/tfrt/eager/backends/gpu,tensorflow/core/tfrt/eager/core_runtime,tensorflow/core/tfrt/eager/cpp_tests/core_runtime,tensorflow/core/tfrt/fallback,tensorflow/core/tfrt/gpu,tensorflow/core/tfrt/run_handler_thread_pool,tensorflow/core/tfrt/runtime,tensorflow/core/tfrt/saved_model,tensorflow/core/tfrt/saved_model/tests,tensorflow/core/tfrt/tpu,tensorflow/core/tfrt/utils build:tfrt --deleted_packages= -# Experimental configuration for testing XLA GPU lowering to TFRT BEF thunks. -# bazel test --config=experimental_enable_bef_thunk \ +# Experimental configuration for testing XLA GPU lowering to TFRT. +# +# To run using BEF thunks, combine with +# --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk. For example, +# bazel test --config=experimental_tfrt_gpu \ +# --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk \ # //tensorflow/compiler/xla/service/gpu/tests:mlir_gemm_test -build:experimental_enable_bef_thunk --config=tfrt -build:experimental_enable_bef_thunk --//tensorflow/compiler/xla/service/gpu:enable_bef_thunk -build:experimental_enable_bef_thunk --@tf_runtime//:enable_gpu -build:experimental_enable_bef_thunk --@rules_cuda//cuda:enable_cuda -build:experimental_enable_bef_thunk --@rules_cuda//cuda:cuda_runtime=//tensorflow/compiler/xla/service/gpu:cuda_runtime_for_xlir -build:experimental_enable_bef_thunk --nocheck_visibility -build:experimental_enable_bef_thunk --incompatible_strict_action_env -build:experimental_enable_bef_thunk --config=monolithic \ No newline at end of file +# +# To run using BEF executable, combine with +# --//tensorflow/compiler/xla/service/gpu:enable_bef_executable. For example, +# bazel test --config=experimental_tfrt_gpu \ +# --//tensorflow/compiler/xla/service/gpu:enable_bef_executable \ +# //tensorflow/compiler/xla/service/gpu/tests:mnist +build:experimental_tfrt_gpu --config=tfrt +build:experimental_tfrt_gpu --@tf_runtime//:enable_gpu +build:experimental_tfrt_gpu --@rules_cuda//cuda:enable_cuda +build:experimental_tfrt_gpu --@rules_cuda//cuda:cuda_runtime=//tensorflow/compiler/xla/service/gpu:cuda_runtime_for_xlir +build:experimental_tfrt_gpu --nocheck_visibility +build:experimental_tfrt_gpu --incompatible_strict_action_env +build:experimental_tfrt_gpu --config=monolithic diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java index 273d2bb6c5e..b1f9f2c4e8a 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/AUC.java @@ -942,82 +942,114 @@ public Op resetStates(Ops tf) { return tf.withControlDependencies(controlList).noOp(); } - /** @return the numThresholds */ + /** + * @return the numThresholds + */ public int getNumThresholds() { return numThresholds; } - /** @return the curve */ + /** + * @return the curve + */ public AUCCurve getCurve() { return curve; } - /** @return the summationMethod */ + /** + * @return the summationMethod + */ public AUCSummationMethod getSummationMethod() { return summationMethod; } - /** @return the thresholds */ + /** + * @return the thresholds + */ public float[] getThresholds() { return thresholds; } - /** @return the multiLabel */ + /** + * @return the multiLabel + */ public boolean isMultiLabel() { return multiLabel; } - /** @return the numLabels */ + /** + * @return the numLabels + */ public Integer getNumLabels() { return numLabels; } - /** @param numLabels the numLabels to set */ + /** + * @param numLabels the numLabels to set + */ public void setNumLabels(Integer numLabels) { this.numLabels = numLabels; } - /** @return the labelWeights */ + /** + * @return the labelWeights + */ public Operand getLabelWeights() { return labelWeights; } - /** @return the truePositives */ + /** + * @return the truePositives + */ public Variable getTruePositives() { return truePositives; } - /** @return the falsePositives */ + /** + * @return the falsePositives + */ public Variable getFalsePositives() { return falsePositives; } - /** @return the trueNegatives */ + /** + * @return the trueNegatives + */ public Variable getTrueNegatives() { return trueNegatives; } - /** @return the falseNegatives */ + /** + * @return the falseNegatives + */ public Variable getFalseNegatives() { return falseNegatives; } - /** @return the truePositivesName */ + /** + * @return the truePositivesName + */ public String getTruePositivesName() { return truePositivesName; } - /** @return the falsePositivesName */ + /** + * @return the falsePositivesName + */ public String getFalsePositivesName() { return falsePositivesName; } - /** @return the trueNegativesName */ + /** + * @return the trueNegativesName + */ public String getTrueNegativesName() { return trueNegativesName; } - /** @return the falseNegativesName */ + /** + * @return the falseNegativesName + */ public String getFalseNegativesName() { return falseNegativesName; } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java index 3b4f4e9a73a..b98dac5a328 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/MeanTensor.java @@ -163,12 +163,16 @@ public Operand result(Ops tf, Class resultType) { } } - /** @return the total */ + /** + * @return the total + */ public Variable getTotal() { return total; } - /** @return the count */ + /** + * @return the count + */ public Variable getCount() { return count; }