google-cloud-bigtable-v2 0.7.1 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -209,8 +209,8 @@ module Google
209
209
  # Request message for Bigtable.MutateRow.
210
210
  # @!attribute [rw] table_name
211
211
  # @return [::String]
212
- # Required. The unique name of the table to which the mutation should be applied.
213
- # Values are of the form
212
+ # Required. The unique name of the table to which the mutation should be
213
+ # applied. Values are of the form
214
214
  # `projects/<project>/instances/<instance>/tables/<table>`.
215
215
  # @!attribute [rw] app_profile_id
216
216
  # @return [::String]
@@ -221,9 +221,9 @@ module Google
221
221
  # Required. The key of the row to which the mutation should be applied.
222
222
  # @!attribute [rw] mutations
223
223
  # @return [::Array<::Google::Cloud::Bigtable::V2::Mutation>]
224
- # Required. Changes to be atomically applied to the specified row. Entries are applied
225
- # in order, meaning that earlier mutations can be masked by later ones.
226
- # Must contain at least one entry and at most 100000.
224
+ # Required. Changes to be atomically applied to the specified row. Entries
225
+ # are applied in order, meaning that earlier mutations can be masked by later
226
+ # ones. Must contain at least one entry and at most 100000.
227
227
  class MutateRowRequest
228
228
  include ::Google::Protobuf::MessageExts
229
229
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -238,7 +238,8 @@ module Google
238
238
  # Request message for BigtableService.MutateRows.
239
239
  # @!attribute [rw] table_name
240
240
  # @return [::String]
241
- # Required. The unique name of the table to which the mutations should be applied.
241
+ # Required. The unique name of the table to which the mutations should be
242
+ # applied.
242
243
  # @!attribute [rw] app_profile_id
243
244
  # @return [::String]
244
245
  # This value specifies routing for replication. If not specified, the
@@ -260,10 +261,9 @@ module Google
260
261
  # The key of the row to which the `mutations` should be applied.
261
262
  # @!attribute [rw] mutations
262
263
  # @return [::Array<::Google::Cloud::Bigtable::V2::Mutation>]
263
- # Required. Changes to be atomically applied to the specified row. Mutations are
264
- # applied in order, meaning that earlier mutations can be masked by
265
- # later ones.
266
- # You must specify at least one mutation.
264
+ # Required. Changes to be atomically applied to the specified row.
265
+ # Mutations are applied in order, meaning that earlier mutations can be
266
+ # masked by later ones. You must specify at least one mutation.
267
267
  class Entry
268
268
  include ::Google::Protobuf::MessageExts
269
269
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -298,9 +298,8 @@ module Google
298
298
  # Request message for Bigtable.CheckAndMutateRow.
299
299
  # @!attribute [rw] table_name
300
300
  # @return [::String]
301
- # Required. The unique name of the table to which the conditional mutation should be
302
- # applied.
303
- # Values are of the form
301
+ # Required. The unique name of the table to which the conditional mutation
302
+ # should be applied. Values are of the form
304
303
  # `projects/<project>/instances/<instance>/tables/<table>`.
305
304
  # @!attribute [rw] app_profile_id
306
305
  # @return [::String]
@@ -308,7 +307,8 @@ module Google
308
307
  # "default" application profile will be used.
309
308
  # @!attribute [rw] row_key
310
309
  # @return [::String]
311
- # Required. The key of the row to which the conditional mutation should be applied.
310
+ # Required. The key of the row to which the conditional mutation should be
311
+ # applied.
312
312
  # @!attribute [rw] predicate_filter
313
313
  # @return [::Google::Cloud::Bigtable::V2::RowFilter]
314
314
  # The filter to be applied to the contents of the specified row. Depending
@@ -347,8 +347,9 @@ module Google
347
347
  # Request message for client connection keep-alive and warming.
348
348
  # @!attribute [rw] name
349
349
  # @return [::String]
350
- # Required. The unique name of the instance to check permissions for as well as
351
- # respond. Values are of the form `projects/<project>/instances/<instance>`.
350
+ # Required. The unique name of the instance to check permissions for as well
351
+ # as respond. Values are of the form
352
+ # `projects/<project>/instances/<instance>`.
352
353
  # @!attribute [rw] app_profile_id
353
354
  # @return [::String]
354
355
  # This value specifies routing for replication. If not specified, the
@@ -367,9 +368,8 @@ module Google
367
368
  # Request message for Bigtable.ReadModifyWriteRow.
368
369
  # @!attribute [rw] table_name
369
370
  # @return [::String]
370
- # Required. The unique name of the table to which the read/modify/write rules should be
371
- # applied.
372
- # Values are of the form
371
+ # Required. The unique name of the table to which the read/modify/write rules
372
+ # should be applied. Values are of the form
373
373
  # `projects/<project>/instances/<instance>/tables/<table>`.
374
374
  # @!attribute [rw] app_profile_id
375
375
  # @return [::String]
@@ -377,12 +377,13 @@ module Google
377
377
  # "default" application profile will be used.
378
378
  # @!attribute [rw] row_key
379
379
  # @return [::String]
380
- # Required. The key of the row to which the read/modify/write rules should be applied.
380
+ # Required. The key of the row to which the read/modify/write rules should be
381
+ # applied.
381
382
  # @!attribute [rw] rules
382
383
  # @return [::Array<::Google::Cloud::Bigtable::V2::ReadModifyWriteRule>]
383
- # Required. Rules specifying how the specified row's contents are to be transformed
384
- # into writes. Entries are applied in order, meaning that earlier rules will
385
- # affect the results of later ones.
384
+ # Required. Rules specifying how the specified row's contents are to be
385
+ # transformed into writes. Entries are applied in order, meaning that earlier
386
+ # rules will affect the results of later ones.
386
387
  class ReadModifyWriteRowRequest
387
388
  include ::Google::Protobuf::MessageExts
388
389
  extend ::Google::Protobuf::MessageExts::ClassMethods
@@ -396,6 +397,239 @@ module Google
396
397
  include ::Google::Protobuf::MessageExts
397
398
  extend ::Google::Protobuf::MessageExts::ClassMethods
398
399
  end
400
+
401
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
402
+ # Request message for Bigtable.GenerateInitialChangeStreamPartitions.
403
+ # @!attribute [rw] table_name
404
+ # @return [::String]
405
+ # Required. The unique name of the table from which to get change stream
406
+ # partitions. Values are of the form
407
+ # `projects/<project>/instances/<instance>/tables/<table>`.
408
+ # Change streaming must be enabled on the table.
409
+ # @!attribute [rw] app_profile_id
410
+ # @return [::String]
411
+ # This value specifies routing for replication. If not specified, the
412
+ # "default" application profile will be used.
413
+ # Single cluster routing must be configured on the profile.
414
+ class GenerateInitialChangeStreamPartitionsRequest
415
+ include ::Google::Protobuf::MessageExts
416
+ extend ::Google::Protobuf::MessageExts::ClassMethods
417
+ end
418
+
419
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
420
+ # Response message for Bigtable.GenerateInitialChangeStreamPartitions.
421
+ # @!attribute [rw] partition
422
+ # @return [::Google::Cloud::Bigtable::V2::StreamPartition]
423
+ # A partition of the change stream.
424
+ class GenerateInitialChangeStreamPartitionsResponse
425
+ include ::Google::Protobuf::MessageExts
426
+ extend ::Google::Protobuf::MessageExts::ClassMethods
427
+ end
428
+
429
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
430
+ # Request message for Bigtable.ReadChangeStream.
431
+ # @!attribute [rw] table_name
432
+ # @return [::String]
433
+ # Required. The unique name of the table from which to read a change stream.
434
+ # Values are of the form
435
+ # `projects/<project>/instances/<instance>/tables/<table>`.
436
+ # Change streaming must be enabled on the table.
437
+ # @!attribute [rw] app_profile_id
438
+ # @return [::String]
439
+ # This value specifies routing for replication. If not specified, the
440
+ # "default" application profile will be used.
441
+ # Single cluster routing must be configured on the profile.
442
+ # @!attribute [rw] partition
443
+ # @return [::Google::Cloud::Bigtable::V2::StreamPartition]
444
+ # The partition to read changes from.
445
+ # @!attribute [rw] start_time
446
+ # @return [::Google::Protobuf::Timestamp]
447
+ # Start reading the stream at the specified timestamp. This timestamp must
448
+ # be within the change stream retention period, less than or equal to the
449
+ # current time, and after change stream creation, whichever is greater.
450
+ # This value is inclusive and will be truncated to microsecond granularity.
451
+ # @!attribute [rw] continuation_tokens
452
+ # @return [::Google::Cloud::Bigtable::V2::StreamContinuationTokens]
453
+ # Tokens that describe how to resume reading a stream where reading
454
+ # previously left off. If specified, changes will be read starting at the
455
+ # the position. Tokens are delivered on the stream as part of `Heartbeat`
456
+ # and `CloseStream` messages.
457
+ #
458
+ # If a single token is provided, the token’s partition must exactly match
459
+ # the request’s partition. If multiple tokens are provided, as in the case
460
+ # of a partition merge, the union of the token partitions must exactly
461
+ # cover the request’s partition. Otherwise, INVALID_ARGUMENT will be
462
+ # returned.
463
+ # @!attribute [rw] end_time
464
+ # @return [::Google::Protobuf::Timestamp]
465
+ # If specified, OK will be returned when the stream advances beyond
466
+ # this time. Otherwise, changes will be continuously delivered on the stream.
467
+ # This value is inclusive and will be truncated to microsecond granularity.
468
+ # @!attribute [rw] heartbeat_duration
469
+ # @return [::Google::Protobuf::Duration]
470
+ # If specified, the duration between `Heartbeat` messages on the stream.
471
+ # Otherwise, defaults to 5 seconds.
472
+ class ReadChangeStreamRequest
473
+ include ::Google::Protobuf::MessageExts
474
+ extend ::Google::Protobuf::MessageExts::ClassMethods
475
+ end
476
+
477
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
478
+ # Response message for Bigtable.ReadChangeStream.
479
+ # @!attribute [rw] data_change
480
+ # @return [::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::DataChange]
481
+ # A mutation to the partition.
482
+ # @!attribute [rw] heartbeat
483
+ # @return [::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::Heartbeat]
484
+ # A periodic heartbeat message.
485
+ # @!attribute [rw] close_stream
486
+ # @return [::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::CloseStream]
487
+ # An indication that the stream should be closed.
488
+ class ReadChangeStreamResponse
489
+ include ::Google::Protobuf::MessageExts
490
+ extend ::Google::Protobuf::MessageExts::ClassMethods
491
+
492
+ # A partial or complete mutation.
493
+ # @!attribute [rw] chunk_info
494
+ # @return [::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::MutationChunk::ChunkInfo]
495
+ # If set, then the mutation is a `SetCell` with a chunked value across
496
+ # multiple messages.
497
+ # @!attribute [rw] mutation
498
+ # @return [::Google::Cloud::Bigtable::V2::Mutation]
499
+ # If this is a continuation of a chunked message (`chunked_value_offset` >
500
+ # 0), ignore all fields except the `SetCell`'s value and merge it with
501
+ # the previous message by concatenating the value fields.
502
+ class MutationChunk
503
+ include ::Google::Protobuf::MessageExts
504
+ extend ::Google::Protobuf::MessageExts::ClassMethods
505
+
506
+ # Information about the chunking of this mutation.
507
+ # Only `SetCell` mutations can be chunked, and all chunks for a `SetCell`
508
+ # will be delivered contiguously with no other mutation types interleaved.
509
+ # @!attribute [rw] chunked_value_size
510
+ # @return [::Integer]
511
+ # The total value size of all the chunks that make up the `SetCell`.
512
+ # @!attribute [rw] chunked_value_offset
513
+ # @return [::Integer]
514
+ # The byte offset of this chunk into the total value size of the
515
+ # mutation.
516
+ # @!attribute [rw] last_chunk
517
+ # @return [::Boolean]
518
+ # When true, this is the last chunk of a chunked `SetCell`.
519
+ class ChunkInfo
520
+ include ::Google::Protobuf::MessageExts
521
+ extend ::Google::Protobuf::MessageExts::ClassMethods
522
+ end
523
+ end
524
+
525
+ # A message corresponding to one or more mutations to the partition
526
+ # being streamed. A single logical `DataChange` message may also be split
527
+ # across a sequence of multiple individual messages. Messages other than
528
+ # the first in a sequence will only have the `type` and `chunks` fields
529
+ # populated, with the final message in the sequence also containing `done`
530
+ # set to true.
531
+ # @!attribute [rw] type
532
+ # @return [::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::DataChange::Type]
533
+ # The type of the mutation.
534
+ # @!attribute [rw] source_cluster_id
535
+ # @return [::String]
536
+ # The cluster where the mutation was applied.
537
+ # Not set when `type` is `GARBAGE_COLLECTION`.
538
+ # @!attribute [rw] row_key
539
+ # @return [::String]
540
+ # The row key for all mutations that are part of this `DataChange`.
541
+ # If the `DataChange` is chunked across multiple messages, then this field
542
+ # will only be set for the first message.
543
+ # @!attribute [rw] commit_timestamp
544
+ # @return [::Google::Protobuf::Timestamp]
545
+ # The timestamp at which the mutation was applied on the Bigtable server.
546
+ # @!attribute [rw] tiebreaker
547
+ # @return [::Integer]
548
+ # A value that lets stream consumers reconstruct Bigtable's
549
+ # conflict resolution semantics.
550
+ # https://cloud.google.com/bigtable/docs/writes#conflict-resolution
551
+ # In the event that the same row key, column family, column qualifier,
552
+ # timestamp are modified on different clusters at the same
553
+ # `commit_timestamp`, the mutation with the larger `tiebreaker` will be the
554
+ # one chosen for the eventually consistent state of the system.
555
+ # @!attribute [rw] chunks
556
+ # @return [::Array<::Google::Cloud::Bigtable::V2::ReadChangeStreamResponse::MutationChunk>]
557
+ # The mutations associated with this change to the partition.
558
+ # May contain complete mutations or chunks of a multi-message chunked
559
+ # `DataChange` record.
560
+ # @!attribute [rw] done
561
+ # @return [::Boolean]
562
+ # When true, indicates that the entire `DataChange` has been read
563
+ # and the client can safely process the message.
564
+ # @!attribute [rw] token
565
+ # @return [::String]
566
+ # An encoded position for this stream's partition to restart reading from.
567
+ # This token is for the StreamPartition from the request.
568
+ # @!attribute [rw] estimated_low_watermark
569
+ # @return [::Google::Protobuf::Timestamp]
570
+ # An estimate of the commit timestamp that is usually lower than or equal
571
+ # to any timestamp for a record that will be delivered in the future on the
572
+ # stream. It is possible that, under particular circumstances that a future
573
+ # record has a timestamp is is lower than a previously seen timestamp. For
574
+ # an example usage see
575
+ # https://beam.apache.org/documentation/basics/#watermarks
576
+ class DataChange
577
+ include ::Google::Protobuf::MessageExts
578
+ extend ::Google::Protobuf::MessageExts::ClassMethods
579
+
580
+ # The type of mutation.
581
+ module Type
582
+ # The type is unspecified.
583
+ TYPE_UNSPECIFIED = 0
584
+
585
+ # A user-initiated mutation.
586
+ USER = 1
587
+
588
+ # A system-initiated mutation as part of garbage collection.
589
+ # https://cloud.google.com/bigtable/docs/garbage-collection
590
+ GARBAGE_COLLECTION = 2
591
+
592
+ # This is a continuation of a multi-message change.
593
+ CONTINUATION = 3
594
+ end
595
+ end
596
+
597
+ # A periodic message with information that can be used to checkpoint
598
+ # the state of a stream.
599
+ # @!attribute [rw] continuation_token
600
+ # @return [::Google::Cloud::Bigtable::V2::StreamContinuationToken]
601
+ # A token that can be provided to a subsequent `ReadChangeStream` call
602
+ # to pick up reading at the current stream position.
603
+ # @!attribute [rw] estimated_low_watermark
604
+ # @return [::Google::Protobuf::Timestamp]
605
+ # An estimate of the commit timestamp that is usually lower than or equal
606
+ # to any timestamp for a record that will be delivered in the future on the
607
+ # stream. It is possible that, under particular circumstances that a future
608
+ # record has a timestamp is is lower than a previously seen timestamp. For
609
+ # an example usage see
610
+ # https://beam.apache.org/documentation/basics/#watermarks
611
+ class Heartbeat
612
+ include ::Google::Protobuf::MessageExts
613
+ extend ::Google::Protobuf::MessageExts::ClassMethods
614
+ end
615
+
616
+ # A message indicating that the client should stop reading from the stream.
617
+ # If status is OK and `continuation_tokens` is empty, the stream has finished
618
+ # (for example if there was an `end_time` specified).
619
+ # If `continuation_tokens` is present, then a change in partitioning requires
620
+ # the client to open a new stream for each token to resume reading.
621
+ # @!attribute [rw] status
622
+ # @return [::Google::Rpc::Status]
623
+ # The status of the stream.
624
+ # @!attribute [rw] continuation_tokens
625
+ # @return [::Array<::Google::Cloud::Bigtable::V2::StreamContinuationToken>]
626
+ # If non-empty, contains the information needed to start reading the new
627
+ # partition(s) that contain segments of this partition's row range.
628
+ class CloseStream
629
+ include ::Google::Protobuf::MessageExts
630
+ extend ::Google::Protobuf::MessageExts::ClassMethods
631
+ end
632
+ end
399
633
  end
400
634
  end
401
635
  end
@@ -550,6 +550,42 @@ module Google
550
550
  include ::Google::Protobuf::MessageExts
551
551
  extend ::Google::Protobuf::MessageExts::ClassMethods
552
552
  end
553
+
554
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
555
+ # A partition of a change stream.
556
+ # @!attribute [rw] row_range
557
+ # @return [::Google::Cloud::Bigtable::V2::RowRange]
558
+ # The row range covered by this partition and is specified by
559
+ # [`start_key_closed`, `end_key_open`).
560
+ class StreamPartition
561
+ include ::Google::Protobuf::MessageExts
562
+ extend ::Google::Protobuf::MessageExts::ClassMethods
563
+ end
564
+
565
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
566
+ # The information required to continue reading the data from multiple
567
+ # `StreamPartitions` from where a previous read left off.
568
+ # @!attribute [rw] tokens
569
+ # @return [::Array<::Google::Cloud::Bigtable::V2::StreamContinuationToken>]
570
+ # List of continuation tokens.
571
+ class StreamContinuationTokens
572
+ include ::Google::Protobuf::MessageExts
573
+ extend ::Google::Protobuf::MessageExts::ClassMethods
574
+ end
575
+
576
+ # NOTE: This API is intended to be used by Apache Beam BigtableIO.
577
+ # The information required to continue reading the data from a
578
+ # `StreamPartition` from where a previous read left off.
579
+ # @!attribute [rw] partition
580
+ # @return [::Google::Cloud::Bigtable::V2::StreamPartition]
581
+ # The partition that this token applies to.
582
+ # @!attribute [rw] token
583
+ # @return [::String]
584
+ # An encoded position in the stream to restart reading from.
585
+ class StreamContinuationToken
586
+ include ::Google::Protobuf::MessageExts
587
+ extend ::Google::Protobuf::MessageExts::ClassMethods
588
+ end
553
589
  end
554
590
  end
555
591
  end
@@ -0,0 +1,129 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2023 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Protobuf
22
+ # A Timestamp represents a point in time independent of any time zone or local
23
+ # calendar, encoded as a count of seconds and fractions of seconds at
24
+ # nanosecond resolution. The count is relative to an epoch at UTC midnight on
25
+ # January 1, 1970, in the proleptic Gregorian calendar which extends the
26
+ # Gregorian calendar backwards to year one.
27
+ #
28
+ # All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
29
+ # second table is needed for interpretation, using a [24-hour linear
30
+ # smear](https://developers.google.com/time/smear).
31
+ #
32
+ # The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
33
+ # restricting to that range, we ensure that we can convert to and from [RFC
34
+ # 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
35
+ #
36
+ # # Examples
37
+ #
38
+ # Example 1: Compute Timestamp from POSIX `time()`.
39
+ #
40
+ # Timestamp timestamp;
41
+ # timestamp.set_seconds(time(NULL));
42
+ # timestamp.set_nanos(0);
43
+ #
44
+ # Example 2: Compute Timestamp from POSIX `gettimeofday()`.
45
+ #
46
+ # struct timeval tv;
47
+ # gettimeofday(&tv, NULL);
48
+ #
49
+ # Timestamp timestamp;
50
+ # timestamp.set_seconds(tv.tv_sec);
51
+ # timestamp.set_nanos(tv.tv_usec * 1000);
52
+ #
53
+ # Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
54
+ #
55
+ # FILETIME ft;
56
+ # GetSystemTimeAsFileTime(&ft);
57
+ # UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
58
+ #
59
+ # // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
60
+ # // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
61
+ # Timestamp timestamp;
62
+ # timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
63
+ # timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
64
+ #
65
+ # Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
66
+ #
67
+ # long millis = System.currentTimeMillis();
68
+ #
69
+ # Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
70
+ # .setNanos((int) ((millis % 1000) * 1000000)).build();
71
+ #
72
+ #
73
+ # Example 5: Compute Timestamp from Java `Instant.now()`.
74
+ #
75
+ # Instant now = Instant.now();
76
+ #
77
+ # Timestamp timestamp =
78
+ # Timestamp.newBuilder().setSeconds(now.getEpochSecond())
79
+ # .setNanos(now.getNano()).build();
80
+ #
81
+ #
82
+ # Example 6: Compute Timestamp from current time in Python.
83
+ #
84
+ # timestamp = Timestamp()
85
+ # timestamp.GetCurrentTime()
86
+ #
87
+ # # JSON Mapping
88
+ #
89
+ # In JSON format, the Timestamp type is encoded as a string in the
90
+ # [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
91
+ # format is "\\{year}-\\{month}-\\{day}T\\{hour}:\\{min}:\\{sec}[.\\{frac_sec}]Z"
92
+ # where \\{year} is always expressed using four digits while \\{month}, \\{day},
93
+ # \\{hour}, \\{min}, and \\{sec} are zero-padded to two digits each. The fractional
94
+ # seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
95
+ # are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
96
+ # is required. A proto3 JSON serializer should always use UTC (as indicated by
97
+ # "Z") when printing the Timestamp type and a proto3 JSON parser should be
98
+ # able to accept both UTC and other timezones (as indicated by an offset).
99
+ #
100
+ # For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
101
+ # 01:30 UTC on January 15, 2017.
102
+ #
103
+ # In JavaScript, one can convert a Date object to this format using the
104
+ # standard
105
+ # [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
106
+ # method. In Python, a standard `datetime.datetime` object can be converted
107
+ # to this format using
108
+ # [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
109
+ # the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
110
+ # the Joda Time's [`ISODateTimeFormat.dateTime()`](
111
+ # http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
112
+ # ) to obtain a formatter capable of generating timestamps in this format.
113
+ # @!attribute [rw] seconds
114
+ # @return [::Integer]
115
+ # Represents seconds of UTC time since Unix epoch
116
+ # 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
117
+ # 9999-12-31T23:59:59Z inclusive.
118
+ # @!attribute [rw] nanos
119
+ # @return [::Integer]
120
+ # Non-negative fractions of a second at nanosecond resolution. Negative
121
+ # second values with fractions must still have non-negative nanos values
122
+ # that count forward in time. Must be from 0 to 999,999,999
123
+ # inclusive.
124
+ class Timestamp
125
+ include ::Google::Protobuf::MessageExts
126
+ extend ::Google::Protobuf::MessageExts::ClassMethods
127
+ end
128
+ end
129
+ end
@@ -28,12 +28,14 @@ module Google
28
28
  # [API Design Guide](https://cloud.google.com/apis/design/errors).
29
29
  # @!attribute [rw] code
30
30
  # @return [::Integer]
31
- # The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
31
+ # The status code, which should be an enum value of
32
+ # [google.rpc.Code][google.rpc.Code].
32
33
  # @!attribute [rw] message
33
34
  # @return [::String]
34
35
  # A developer-facing error message, which should be in English. Any
35
36
  # user-facing error message should be localized and sent in the
36
- # {::Google::Rpc::Status#details google.rpc.Status.details} field, or localized by the client.
37
+ # {::Google::Rpc::Status#details google.rpc.Status.details} field, or localized
38
+ # by the client.
37
39
  # @!attribute [rw] details
38
40
  # @return [::Array<::Google::Protobuf::Any>]
39
41
  # A list of messages that carry the error details. There is a common set of
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: google-cloud-bigtable-v2
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.7.1
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Google LLC
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-10-18 00:00:00.000000000 Z
11
+ date: 2023-02-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: gapic-common
@@ -16,7 +16,7 @@ dependencies:
16
16
  requirements:
17
17
  - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: '0.12'
19
+ version: 0.17.1
20
20
  - - "<"
21
21
  - !ruby/object:Gem::Version
22
22
  version: 2.a
@@ -26,7 +26,7 @@ dependencies:
26
26
  requirements:
27
27
  - - ">="
28
28
  - !ruby/object:Gem::Version
29
- version: '0.12'
29
+ version: 0.17.1
30
30
  - - "<"
31
31
  - !ruby/object:Gem::Version
32
32
  version: 2.a
@@ -50,14 +50,14 @@ dependencies:
50
50
  requirements:
51
51
  - - "~>"
52
52
  - !ruby/object:Gem::Version
53
- version: 1.26.1
53
+ version: 1.26.3
54
54
  type: :development
55
55
  prerelease: false
56
56
  version_requirements: !ruby/object:Gem::Requirement
57
57
  requirements:
58
58
  - - "~>"
59
59
  - !ruby/object:Gem::Version
60
- version: 1.26.1
60
+ version: 1.26.3
61
61
  - !ruby/object:Gem::Dependency
62
62
  name: minitest
63
63
  requirement: !ruby/object:Gem::Requirement
@@ -183,7 +183,9 @@ files:
183
183
  - lib/google/cloud/bigtable/v2/bigtable/paths.rb
184
184
  - lib/google/cloud/bigtable/v2/version.rb
185
185
  - proto_docs/README.md
186
+ - proto_docs/google/api/client.rb
186
187
  - proto_docs/google/api/field_behavior.rb
188
+ - proto_docs/google/api/launch_stage.rb
187
189
  - proto_docs/google/api/resource.rb
188
190
  - proto_docs/google/api/routing.rb
189
191
  - proto_docs/google/bigtable/v2/bigtable.rb
@@ -192,6 +194,7 @@ files:
192
194
  - proto_docs/google/bigtable/v2/response_params.rb
193
195
  - proto_docs/google/protobuf/any.rb
194
196
  - proto_docs/google/protobuf/duration.rb
197
+ - proto_docs/google/protobuf/timestamp.rb
195
198
  - proto_docs/google/protobuf/wrappers.rb
196
199
  - proto_docs/google/rpc/status.rb
197
200
  homepage: https://github.com/googleapis/google-cloud-ruby
@@ -213,7 +216,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
213
216
  - !ruby/object:Gem::Version
214
217
  version: '0'
215
218
  requirements: []
216
- rubygems_version: 3.3.14
219
+ rubygems_version: 3.4.2
217
220
  signing_key:
218
221
  specification_version: 4
219
222
  summary: API Client library for the Cloud Bigtable V2 API