diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java index 6d0c86e18a..9331a9dc48 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -259,7 +259,9 @@ public static class Builder extends StubSettings.BuildernewArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java index f10e9f3a9d..cadc196f5c 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryWriteImpl.java @@ -174,8 +174,7 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" - + " %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java index 889be3bbb8..b1d6e58aae 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java @@ -117,8 +117,7 @@ public void batchCreateReadSessionStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected" - + " %s or %s", + "Unrecognized response type %s for method BatchCreateReadSessionStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), Storage.BatchCreateReadSessionStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java index 814d5b73ff..098a1e7fa4 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java @@ -174,8 +174,7 @@ public void batchCommitWriteStreams( responseObserver.onError( new IllegalArgumentException( String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or" - + " %s", + "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", response == null ? "null" : response.getClass().getName(), BatchCommitWriteStreamsResponse.class.getName(), Exception.class.getName()))); diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java index 55258107b7..bf64f57698 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -24,9 +24,10 @@ *
  * Request message for `AppendRows`.
  *
- * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
- * parts of the AppendRowsRequest need only be specified for the first request
- * sent each time the gRPC network connection is opened/reopened.
+ * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+ * AppendRowsRequest need only be specified for the first request before
+ * switching table destinations. You can also switch table destinations within
+ * the same connection for the default stream.
  *
  * The size of a single AppendRowsRequest must be less than 10 MB in size.
  * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -85,10 +86,9 @@ protected com.google.protobuf.MapField internalGetMapField(int number) {
    *
    *
    * 
-   * An enum to indicate how to interpret missing values. Missing values are
-   * fields present in user schema but missing in rows. A missing value can
-   * represent a NULL or a column default value defined in BigQuery table
-   * schema.
+   * An enum to indicate how to interpret missing values of fields that are
+   * present in user schema but missing in rows. A missing value can represent a
+   * NULL or a column default value defined in BigQuery table schema.
    * 
* * Protobuf enum {@code @@ -260,9 +260,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -274,9 +279,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -288,9 +298,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -390,9 +405,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -407,9 +427,14 @@ public boolean hasWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -426,9 +451,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -915,9 +945,14 @@ public Builder mergeFrom( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -931,9 +966,14 @@ public boolean hasWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -953,9 +993,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -977,9 +1022,14 @@ public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -999,9 +1049,14 @@ public Builder setWriterSchema( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1027,9 +1082,14 @@ public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchem * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1048,9 +1108,14 @@ public Builder clearWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1064,9 +1129,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaB * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1084,9 +1154,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1440,10 +1515,14 @@ public RowsCase getRowsCase() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1452,6 +1531,22 @@ public RowsCase getRowsCase() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -1476,10 +1571,14 @@ public java.lang.String getWriteStream() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1488,6 +1587,22 @@ public java.lang.String getWriteStream() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -2248,9 +2363,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * Request message for `AppendRows`.
    *
-   * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
-   * parts of the AppendRowsRequest need only be specified for the first request
-   * sent each time the gRPC network connection is opened/reopened.
+   * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+   * AppendRowsRequest need only be specified for the first request before
+   * switching table destinations. You can also switch table destinations within
+   * the same connection for the default stream.
    *
    * The size of a single AppendRowsRequest must be less than 10 MB in size.
    * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -2563,10 +2679,14 @@ public Builder clearRows() {
      *
      *
      * 
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2575,6 +2695,22 @@ public Builder clearRows() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2598,10 +2734,14 @@ public java.lang.String getWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2610,6 +2750,22 @@ public java.lang.String getWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2633,10 +2789,14 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2645,6 +2805,22 @@ public com.google.protobuf.ByteString getWriteStreamBytes() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2667,10 +2843,14 @@ public Builder setWriteStream(java.lang.String value) { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2679,6 +2859,22 @@ public Builder setWriteStream(java.lang.String value) {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2697,10 +2893,14 @@ public Builder clearWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2709,6 +2909,22 @@ public Builder clearWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java index a9d1f10fe1..1f40b2ec71 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -27,10 +27,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -39,6 +43,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -52,10 +72,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -64,6 +88,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index 54711847d8..28eb0fd9f0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -4342,10 +4342,10 @@ public long getEstimatedTotalBytesScanned() { * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* @@ -7182,10 +7182,10 @@ public Builder clearEstimatedTotalBytesScanned() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7203,10 +7203,10 @@ public long getEstimatedTotalPhysicalFileSize() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7228,10 +7228,10 @@ public Builder setEstimatedTotalPhysicalFileSize(long value) { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index 85a8b45aa1..1fd051672d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -435,10 +435,10 @@ public interface ReadSessionOrBuilder * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto index d28c36f43f..2959faaf0b 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -397,9 +397,10 @@ message CreateWriteStreamRequest { // Request message for `AppendRows`. // -// Due to the nature of AppendRows being a bidirectional streaming RPC, certain -// parts of the AppendRowsRequest need only be specified for the first request -// sent each time the gRPC network connection is opened/reopened. +// Because AppendRows is a bidirectional streaming RPC, certain parts of the +// AppendRowsRequest need only be specified for the first request before +// switching table destinations. You can also switch table destinations within +// the same connection for the default stream. // // The size of a single AppendRowsRequest must be less than 10 MB in size. // Requests larger than this return an error, typically `INVALID_ARGUMENT`. @@ -407,9 +408,14 @@ message AppendRowsRequest { // ProtoData contains the data rows and schema when constructing append // requests. message ProtoData { - // Proto schema used to serialize the data. This value only needs to be - // provided as part of the first request on a gRPC network connection, - // and will be ignored for subsequent requests on the connection. + // The protocol buffer schema used to serialize the data. Provide this value + // whenever: + // + // * You send the first request of an RPC connection. + // + // * You change the input schema. + // + // * You specify a new destination table. ProtoSchema writer_schema = 1; // Serialized row data in protobuf message format. @@ -419,10 +425,9 @@ message AppendRowsRequest { ProtoRows rows = 2; } - // An enum to indicate how to interpret missing values. Missing values are - // fields present in user schema but missing in rows. A missing value can - // represent a NULL or a column default value defined in BigQuery table - // schema. + // An enum to indicate how to interpret missing values of fields that are + // present in user schema but missing in rows. A missing value can represent a + // NULL or a column default value defined in BigQuery table schema. enum MissingValueInterpretation { // Invalid missing value interpretation. Requests with this value will be // rejected. @@ -436,10 +441,14 @@ message AppendRowsRequest { DEFAULT_VALUE = 2; } - // Required. The write_stream identifies the target of the append operation, - // and only needs to be specified as part of the first request on the gRPC - // connection. If provided for subsequent requests, it must match the value of - // the first request. + // Required. The write_stream identifies the append operation. It must be + // provided in the following scenarios: + // + // * In the first request to an AppendRows connection. + // + // * In all subsequent requests to an AppendRows connection, if you use the + // same connection to write to multiple tables or change the input schema for + // default streams. // // For explicitly created write streams, the format is: // @@ -448,6 +457,22 @@ message AppendRowsRequest { // For the special default stream, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + // + // An example of a possible sequence of requests with write_stream fields + // within a single connection: + // + // * r1: {write_stream: stream_name_1} + // + // * r2: {write_stream: /*omit*/} + // + // * r3: {write_stream: /*omit*/} + // + // * r4: {write_stream: stream_name_2} + // + // * r5: {write_stream: stream_name_2} + // + // The destination changed in request_4, so the write_stream field must be + // populated in all subsequent requests in this stream. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index 0a7c7c79c0..785c74f788 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -194,10 +194,10 @@ message ReadSession { int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A pre-projected estimate of the total physical size (in bytes) - // of files this session will scan when all streams are completely consumed. - // This estimate does not depend on the selected columns and can be based on - // metadata from the table which might be incomplete or stale. Only set for + // Output only. A pre-projected estimate of the total physical size of files + // (in bytes) that this session will scan when all streams are consumed. This + // estimate is independent of the selected columns and can be based on + // incomplete or stale metadata from the table. This field is only set for // BigLake tables. int64 estimated_total_physical_file_size = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java index f0070660b2..9c058789f6 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java @@ -90,7 +90,6 @@ public interface TableReadOptionsOrBuilder * @return A list containing the selectedFields. */ java.util.List getSelectedFieldsList(); - /** * * @@ -149,7 +148,6 @@ public interface TableReadOptionsOrBuilder * @return The count of selectedFields. */ int getSelectedFieldsCount(); - /** * * @@ -209,7 +207,6 @@ public interface TableReadOptionsOrBuilder * @return The selectedFields at the given index. */ java.lang.String getSelectedFields(int index); - /** * * @@ -291,7 +288,6 @@ public interface TableReadOptionsOrBuilder * @return The rowRestriction. */ java.lang.String getRowRestriction(); - /** * * @@ -424,7 +420,6 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { return selectedFields_; } - /** * * @@ -485,7 +480,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } - /** * * @@ -547,7 +541,6 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } - /** * * @@ -614,7 +607,6 @@ public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { @SuppressWarnings("serial") private volatile java.lang.Object rowRestriction_ = ""; - /** * * @@ -647,7 +639,6 @@ public java.lang.String getRowRestriction() { return s; } } - /** * * @@ -1087,7 +1078,6 @@ private void ensureSelectedFieldsIsMutable() { } bitField0_ |= 0x00000001; } - /** * * @@ -1149,7 +1139,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { selectedFields_.makeImmutable(); return selectedFields_; } - /** * * @@ -1210,7 +1199,6 @@ public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { public int getSelectedFieldsCount() { return selectedFields_.size(); } - /** * * @@ -1272,7 +1260,6 @@ public int getSelectedFieldsCount() { public java.lang.String getSelectedFields(int index) { return selectedFields_.get(index); } - /** * * @@ -1334,7 +1321,6 @@ public java.lang.String getSelectedFields(int index) { public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { return selectedFields_.getByteString(index); } - /** * * @@ -1404,7 +1390,6 @@ public Builder setSelectedFields(int index, java.lang.String value) { onChanged(); return this; } - /** * * @@ -1473,7 +1458,6 @@ public Builder addSelectedFields(java.lang.String value) { onChanged(); return this; } - /** * * @@ -1539,7 +1523,6 @@ public Builder addAllSelectedFields(java.lang.Iterable values) onChanged(); return this; } - /** * * @@ -1604,7 +1587,6 @@ public Builder clearSelectedFields() { onChanged(); return this; } - /** * * @@ -1676,7 +1658,6 @@ public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { } private java.lang.Object rowRestriction_ = ""; - /** * * @@ -1708,7 +1689,6 @@ public java.lang.String getRowRestriction() { return (java.lang.String) ref; } } - /** * * @@ -1740,7 +1720,6 @@ public com.google.protobuf.ByteString getRowRestrictionBytes() { return (com.google.protobuf.ByteString) ref; } } - /** * * @@ -1771,7 +1750,6 @@ public Builder setRowRestriction(java.lang.String value) { onChanged(); return this; } - /** * * @@ -1798,7 +1776,6 @@ public Builder clearRowRestriction() { onChanged(); return this; } - /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java index a695486fc4..8713b6a98f 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java +++ b/proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java @@ -12454,7 +12454,6 @@ public Builder clearStatus() { : status_; } } - /** * * @@ -12487,7 +12486,6 @@ public Builder clearStatus() { com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus.Builder, com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatusOrBuilder> throttleStatusBuilder_; - /** * * @@ -12503,7 +12501,6 @@ public Builder clearStatus() { public boolean hasThrottleStatus() { return ((bitField0_ & 0x00000010) != 0); } - /** * * @@ -12526,7 +12523,6 @@ public com.google.cloud.bigquery.storage.v1beta1.Storage.ThrottleStatus getThrot return throttleStatusBuilder_.getMessage(); } } - /** * * @@ -12551,7 +12547,6 @@ public Builder setThrottleStatus( onChanged(); return this; } - /** * * @@ -12574,7 +12569,6 @@ public Builder setThrottleStatus( onChanged(); return this; } - /** * * @@ -12604,7 +12598,6 @@ public Builder mergeThrottleStatus( onChanged(); return this; } - /** * * @@ -12625,7 +12618,6 @@ public Builder clearThrottleStatus() { onChanged(); return this; } - /** * * @@ -12642,7 +12634,6 @@ public Builder clearThrottleStatus() { onChanged(); return getThrottleStatusFieldBuilder().getBuilder(); } - /** * * @@ -12664,7 +12655,6 @@ public Builder clearThrottleStatus() { : throttleStatus_; } } - /** * * diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java index e9d0a925a8..1fd91308f5 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -56,8 +56,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { - "\n" - + "2google/cloud/bigquery/storage/v1beta2/stream.proto\022%google.cloud.bigquery.stor" + "\n2google/cloud/bigquery/storage/v1beta2/" + + "stream.proto\022%google.cloud.bigquery.stor" + "age.v1beta2\032\037google/api/field_behavior.p" + "roto\032\031google/api/resource.proto\0321google/" + "cloud/bigquery/storage/v1beta2/arrow.pro"