fluent-plugin-google-cloud 0.6.20 → 0.6.21

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 38cbf8464d2ca792e5d134d1edd66a2a4f6654ca
4
- data.tar.gz: 2d15be19df0ca83a865c0cea38a4199f6ef42b7e
3
+ metadata.gz: 33b71adc0c744f682e9f3548911ec4f550988864
4
+ data.tar.gz: 3c45d732c056e34e32f05f2708012f4b69205cca
5
5
  SHA512:
6
- metadata.gz: 04eaab1746f9d66c14cd402fb6f7960affbc2e36ac79ccecc84eb00c8af19e5793f7c591d94cfa1765f7a3f0d8e1807ecd24238a4209f99d29635a61a4793e68
7
- data.tar.gz: 6a692ee7e882ce8b36e119432229b4fcc3f7aa855c59f1e806943885c465165a10d664b0a42baac1577277039d94ddb0d8f05a7fa035196a55b894d8ad606d1c
6
+ metadata.gz: 9dab6cb5bcecc828bc025458f3adb04ba0690fcac1052c936a7e3beaebca7ea35de72295f694e73342c92a5cf1129be7bcc2e5040d405280bb23822c6f7521fb
7
+ data.tar.gz: 4ea689e785e16c15c4b30286203aaf54caa5e23aa1987a30252f75ec02278a41a3fa064170e4b5233de9f2104cdf1ae86f7012dd85c19f773c44e25034cfcf10
@@ -10,7 +10,7 @@ eos
10
10
  gem.homepage =
11
11
  'https://github.com/GoogleCloudPlatform/fluent-plugin-google-cloud'
12
12
  gem.license = 'Apache-2.0'
13
- gem.version = '0.6.20'
13
+ gem.version = '0.6.21'
14
14
  gem.authors = ['Ling Huang', 'Igor Peshansky']
15
15
  gem.email = ['stackdriver-agents@google.com']
16
16
  gem.required_ruby_version = Gem::Requirement.new('>= 2.2')
@@ -227,7 +227,7 @@ module Fluent
227
227
  Fluent::Plugin.register_output('google_cloud', self)
228
228
 
229
229
  PLUGIN_NAME = 'Fluentd Google Cloud Logging plugin'.freeze
230
- PLUGIN_VERSION = '0.6.20'.freeze
230
+ PLUGIN_VERSION = '0.6.21'.freeze
231
231
 
232
232
  # Name of the the Google cloud logging write scope.
233
233
  LOGGING_SCOPE = 'https://www.googleapis.com/auth/logging.write'.freeze
@@ -396,6 +396,9 @@ module Fluent
396
396
  # requests when talking to Stackdriver Logging API.
397
397
  config_param :split_logs_by_tag, :bool, :default => false
398
398
 
399
+ # Whether to attempt adjusting invalid log entry timestamps.
400
+ config_param :adjust_invalid_timestamps, :bool, :default => true
401
+
399
402
  # rubocop:enable Style/HashSyntax
400
403
 
401
404
  # TODO: Add a log_name config option rather than just using the tag?
@@ -792,6 +795,7 @@ module Fluent
792
795
  GRPC::PermissionDenied
793
796
  error_details_map = construct_error_details_map_grpc(gax_error)
794
797
  if error_details_map.empty?
798
+ increment_failed_requests_count(error.code)
795
799
  increment_dropped_entries_count(entries_count, error.code)
796
800
  @log.warn "Dropping #{entries_count} log message(s)",
797
801
  error: error.to_s, error_code: error.code.to_s
@@ -1249,27 +1253,10 @@ module Fluent
1249
1253
  # Examples:
1250
1254
  # "container.<container_id>" // Docker container.
1251
1255
  # "k8s_pod.<namespace_name>.<pod_name>" // GKE pod.
1252
- if @enable_metadata_agent && local_resource_id
1253
- @log.debug 'Calling metadata agent with local_resource_id: ' \
1254
- "#{local_resource_id}."
1255
- retrieved_resource = query_metadata_agent_for_monitored_resource(
1256
+ if local_resource_id
1257
+ converted_resource = monitored_resource_from_local_resource_id(
1256
1258
  local_resource_id)
1257
- @log.debug 'Retrieved monitored resource from metadata agent: ' \
1258
- "#{retrieved_resource.inspect}."
1259
- if retrieved_resource
1260
- resource = retrieved_resource
1261
- # TODO(qingling128): Fix this temporary renaming from 'gke_container'
1262
- # to 'container'.
1263
- resource.type = 'container' if resource.type == 'gke_container'
1264
- else
1265
- # TODO(qingling128): This entire else clause is temporary before we
1266
- # implement buffering and caching.
1267
- @log.warn('Failed to retrieve monitored resource from Metadata' \
1268
- " Agent with local_resource_id #{local_resource_id}.")
1269
- constructed_k8s_resource = construct_k8s_resource_locally(
1270
- local_resource_id)
1271
- resource = constructed_k8s_resource if constructed_k8s_resource
1272
- end
1259
+ resource = converted_resource if converted_resource
1273
1260
  end
1274
1261
 
1275
1262
  # Once the resource type is settled down, determine the labels.
@@ -1349,6 +1336,32 @@ module Fluent
1349
1336
  [resource, common_labels]
1350
1337
  end
1351
1338
 
1339
+ # Take a locally unique resource id and convert it to the globally unique
1340
+ # monitored resource.
1341
+ def monitored_resource_from_local_resource_id(local_resource_id)
1342
+ return unless local_resource_id
1343
+ if @enable_metadata_agent
1344
+ @log.debug 'Calling metadata agent with local_resource_id: ' \
1345
+ "#{local_resource_id}."
1346
+ resource = query_metadata_agent_for_monitored_resource(
1347
+ local_resource_id)
1348
+ @log.debug 'Retrieved monitored resource from metadata agent: ' \
1349
+ "#{resource.inspect}."
1350
+ if resource
1351
+ # TODO(qingling128): Fix this temporary renaming from 'gke_container'
1352
+ # to 'container'.
1353
+ resource.type = 'container' if resource.type == 'gke_container'
1354
+ return resource
1355
+ end
1356
+ end
1357
+ # Fall back to constructing monitored resource locally.
1358
+ # TODO(qingling128): This entire else clause is temporary until we
1359
+ # implement buffering and caching.
1360
+ @log.debug('Failed to retrieve monitored resource from Metadata' \
1361
+ " Agent with local_resource_id #{local_resource_id}.")
1362
+ construct_k8s_resource_locally(local_resource_id)
1363
+ end
1364
+
1352
1365
  # Extract entry level monitored resource and common labels that should be
1353
1366
  # applied to individual entries.
1354
1367
  def determine_entry_level_monitored_resource_and_labels(
@@ -1578,22 +1591,22 @@ module Fluent
1578
1591
  ts_nanos
1579
1592
  end
1580
1593
 
1581
- # Adjust timestamps from the future.
1582
- # There are two cases:
1583
- # 1. The parsed timestamp is later in the current year:
1584
- # This can happen when system log lines from previous years are missing
1585
- # the year, so the date parser assumes the current year.
1586
- # We treat these lines as coming from last year. This could label
1587
- # 2-year-old logs incorrectly, but this probably isn't super important.
1588
- #
1589
- # 2. The parsed timestamp is past the end of the current year:
1590
- # Since the year is different from the current year, this isn't the
1591
- # missing year in system logs. It is unlikely that users explicitly
1592
- # write logs at a future date. This could result from an unsynchronized
1593
- # clock on a VM, or some random value being parsed as the timestamp.
1594
- # We reset the timestamp on those lines to the default value and let the
1595
- # downstream API handle it.
1596
- if timestamp
1594
+ if @adjust_invalid_timestamps && timestamp
1595
+ # Adjust timestamps from the future.
1596
+ # There are two cases:
1597
+ # 1. The parsed timestamp is later in the current year:
1598
+ # This can happen when system log lines from previous years are missing
1599
+ # the year, so the date parser assumes the current year.
1600
+ # We treat these lines as coming from last year. This could label
1601
+ # 2-year-old logs incorrectly, but this probably isn't super important.
1602
+ #
1603
+ # 2. The parsed timestamp is past the end of the current year:
1604
+ # Since the year is different from the current year, this isn't the
1605
+ # missing year in system logs. It is unlikely that users explicitly
1606
+ # write logs at a future date. This could result from an unsynchronized
1607
+ # clock on a VM, or some random value being parsed as the timestamp.
1608
+ # We reset the timestamp on those lines to the default value and let the
1609
+ # downstream API handle it.
1597
1610
  next_year = Time.mktime(current_time.year + 1)
1598
1611
  one_day_later = current_time.to_datetime.next_day.to_time
1599
1612
  if timestamp >= next_year # Case 2.
@@ -1605,6 +1618,7 @@ module Fluent
1605
1618
  # The value of ts_nanos should not change when subtracting a year.
1606
1619
  end
1607
1620
  end
1621
+
1608
1622
  [ts_secs, ts_nanos]
1609
1623
  end
1610
1624
 
@@ -2172,6 +2186,9 @@ module Fluent
2172
2186
  error_details = ensure_array(gax_error.status_details)
2173
2187
  raise JSON::ParserError, 'The error details are empty.' if
2174
2188
  error_details.empty?
2189
+ raise JSON::ParserError, 'No partial error info in error details.' unless
2190
+ error_details[0].is_a?(
2191
+ Google::Logging::V2::WriteLogEntriesPartialErrors)
2175
2192
  log_entry_errors = ensure_hash(error_details[0].log_entry_errors)
2176
2193
  log_entry_errors.each do |index, log_entry_error|
2177
2194
  error_key = [log_entry_error[:code], log_entry_error[:message]].freeze
@@ -724,58 +724,65 @@ module BaseTest
724
724
  one_second_into_next_year = next_year + 1
725
725
  one_day_into_next_year = next_year.to_date.next_day.to_time
726
726
  {
727
- Time.at(123_456.789) => Time.at(123_456.789),
728
- Time.at(0) => Time.at(0),
729
- current_time => current_time,
730
- one_second_before_next_year => adjusted_to_last_year,
731
- next_year => Time.at(0),
732
- one_second_into_next_year => Time.at(0),
733
- one_day_into_next_year => Time.at(0)
734
- }.each do |ts, adjusted_ts|
735
- expected_ts = []
736
- emit_index = 0
737
- setup_logging_stubs do
738
- @logs_sent = []
739
- d = create_driver
740
- # Test the "native" fluentd timestamp as well as our nanosecond tags.
741
- d.emit({ 'message' => log_entry(emit_index) }, ts.to_f)
742
- expected_ts.push(adjusted_ts)
743
- emit_index += 1
744
- d.emit('message' => log_entry(emit_index),
745
- 'timeNanos' => ts.tv_sec * 1_000_000_000 + ts.tv_nsec)
746
- expected_ts.push(adjusted_ts)
747
- emit_index += 1
748
- d.emit('message' => log_entry(emit_index),
749
- 'timestamp' => { 'seconds' => ts.tv_sec, 'nanos' => ts.tv_nsec })
750
- expected_ts.push(adjusted_ts)
751
- emit_index += 1
752
- d.emit('message' => log_entry(emit_index),
753
- 'timestampSeconds' => ts.tv_sec, 'timestampNanos' => ts.tv_nsec)
754
- expected_ts.push(adjusted_ts)
755
- emit_index += 1
756
- d.emit('message' => log_entry(emit_index),
757
- 'timestampSeconds' => ts.tv_sec.to_s,
758
- 'timestampNanos' => ts.tv_nsec.to_s)
759
- expected_ts.push(adjusted_ts)
760
- emit_index += 1
761
- d.run
762
- verify_index = 0
763
- verify_log_entries(emit_index, COMPUTE_PARAMS) do |entry, i|
764
- verify_default_log_entry_text(entry['textPayload'], i, entry)
765
- assert_equal_with_default entry['timestamp']['seconds'],
766
- expected_ts[verify_index].tv_sec, 0, entry
767
- assert_equal_with_default \
768
- entry['timestamp']['nanos'],
769
- expected_ts[verify_index].tv_nsec, 0, entry do
770
- # Fluentd v0.14 onwards supports nanosecond timestamp values.
771
- # Added in 600 ns delta to avoid flaky tests introduced
772
- # due to rounding error in double-precision floating-point numbers
773
- # (to account for the missing 9 bits of precision ~ 512 ns).
774
- # See http://wikipedia.org/wiki/Double-precision_floating-point_format.
775
- assert_in_delta expected_ts[verify_index].tv_nsec,
776
- entry['timestamp']['nanos'], 600, entry
727
+ APPLICATION_DEFAULT_CONFIG => {
728
+ Time.at(123_456.789) => Time.at(123_456.789),
729
+ Time.at(0) => Time.at(0),
730
+ current_time => current_time,
731
+ one_second_before_next_year => adjusted_to_last_year,
732
+ next_year => Time.at(0),
733
+ one_second_into_next_year => Time.at(0),
734
+ one_day_into_next_year => Time.at(0)
735
+ },
736
+ NO_ADJUST_TIMESTAMPS_CONFIG => {
737
+ Time.at(123_456.789) => Time.at(123_456.789),
738
+ Time.at(0) => Time.at(0),
739
+ current_time => current_time,
740
+ one_second_before_next_year => one_second_before_next_year,
741
+ next_year => next_year,
742
+ one_second_into_next_year => one_second_into_next_year,
743
+ one_day_into_next_year => one_day_into_next_year
744
+ }
745
+ }.each do |config, timestamps|
746
+ timestamps.each do |ts, expected_ts|
747
+ emit_index = 0
748
+ setup_logging_stubs do
749
+ @logs_sent = []
750
+ d = create_driver(config)
751
+ # Test the "native" fluentd timestamp as well as our nanosecond tags.
752
+ d.emit({ 'message' => log_entry(emit_index) }, ts.to_f)
753
+ emit_index += 1
754
+ d.emit('message' => log_entry(emit_index),
755
+ 'timeNanos' => ts.tv_sec * 1_000_000_000 + ts.tv_nsec)
756
+ emit_index += 1
757
+ d.emit('message' => log_entry(emit_index),
758
+ 'timestamp' => { 'seconds' => ts.tv_sec,
759
+ 'nanos' => ts.tv_nsec })
760
+ emit_index += 1
761
+ d.emit('message' => log_entry(emit_index),
762
+ 'timestampSeconds' => ts.tv_sec,
763
+ 'timestampNanos' => ts.tv_nsec)
764
+ emit_index += 1
765
+ d.emit('message' => log_entry(emit_index),
766
+ 'timestampSeconds' => ts.tv_sec.to_s,
767
+ 'timestampNanos' => ts.tv_nsec.to_s)
768
+ emit_index += 1
769
+ d.run
770
+ verify_log_entries(emit_index, COMPUTE_PARAMS) do |entry, i|
771
+ verify_default_log_entry_text(entry['textPayload'], i, entry)
772
+ assert_equal_with_default entry['timestamp']['seconds'],
773
+ expected_ts.tv_sec, 0, entry
774
+ assert_equal_with_default \
775
+ entry['timestamp']['nanos'],
776
+ expected_ts.tv_nsec, 0, entry do
777
+ # Fluentd v0.14 onwards supports nanosecond timestamp values.
778
+ # Added in 600 ns delta to avoid flaky tests introduced
779
+ # due to rounding error in double-precision floating-point numbers
780
+ # (to account for the missing 9 bits of precision ~ 512 ns).
781
+ # See http://wikipedia.org/wiki/Double-precision_floating-point_format.
782
+ assert_in_delta expected_ts.tv_nsec,
783
+ entry['timestamp']['nanos'], 600, entry
784
+ end
777
785
  end
778
- verify_index += 1
779
786
  end
780
787
  end
781
788
  end
@@ -1338,14 +1345,14 @@ module BaseTest
1338
1345
  setup_metadata_agent_stub: true,
1339
1346
  setup_k8s_stub: true,
1340
1347
  log_entry: k8s_container_log_entry(log_entry(0)),
1341
- expected_params: COMPUTE_PARAMS
1348
+ expected_params: K8S_CONTAINER_PARAMS_FROM_LOCAL
1342
1349
  },
1343
1350
  {
1344
1351
  config: APPLICATION_DEFAULT_CONFIG,
1345
1352
  setup_metadata_agent_stub: false,
1346
1353
  setup_k8s_stub: true,
1347
1354
  log_entry: k8s_container_log_entry(log_entry(0)),
1348
- expected_params: COMPUTE_PARAMS
1355
+ expected_params: K8S_CONTAINER_PARAMS_FROM_LOCAL
1349
1356
  },
1350
1357
  # When enable_metadata_agent is true.
1351
1358
  {
@@ -1409,7 +1416,7 @@ module BaseTest
1409
1416
  setup_metadata_agent_stub: true,
1410
1417
  setup_k8s_stub: true,
1411
1418
  log_entry: k8s_node_log_entry(log_entry(0)),
1412
- expected_params: COMPUTE_PARAMS
1419
+ expected_params: K8S_NODE_PARAMS_FROM_LOCAL
1413
1420
  },
1414
1421
  {
1415
1422
  config: ENABLE_METADATA_AGENT_CONFIG,
@@ -12,6 +12,36 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ # Add some helper methods to standard classes.
16
+ module Google
17
+ module Protobuf
18
+ Any.class_eval do
19
+ # TODO(igorpeshansky): Remove this once
20
+ # https://github.com/google/protobuf/pull/4719 gets released.
21
+ def self.pack(msg, type_url_prefix = 'type.googleapis.com/')
22
+ any = Google::Protobuf::Any.new
23
+ any.pack(msg, type_url_prefix)
24
+ any
25
+ end
26
+ end
27
+ end
28
+ end
29
+ String.class_eval do
30
+ def inspect_octal
31
+ specials = {
32
+ 'a' => '\\007',
33
+ 'b' => '\\010',
34
+ 'v' => '\\013',
35
+ 'f' => '\\014',
36
+ 'r' => '\\015'
37
+ }.freeze
38
+ inspect.gsub(/\\([abvfr])/) { specials[Regexp.last_match(1)] } \
39
+ .gsub(/\\x([0-9A-F][0-9A-F])/) do
40
+ format('\\%03o', Regexp.last_match(1).to_i(16))
41
+ end
42
+ end
43
+ end
44
+
15
45
  # Constants used by unit tests for Google Cloud Logging plugin.
16
46
  module Constants
17
47
  include Fluent::GoogleCloudOutput::ServiceConstants
@@ -219,6 +249,10 @@ module Constants
219
249
  split_logs_by_tag true
220
250
  ).freeze
221
251
 
252
+ NO_ADJUST_TIMESTAMPS_CONFIG = %(
253
+ adjust_invalid_timestamps false
254
+ ).freeze
255
+
222
256
  ENABLE_PROMETHEUS_CONFIG = %(
223
257
  enable_monitoring true
224
258
  monitoring_type prometheus
@@ -809,45 +843,69 @@ module Constants
809
843
  }
810
844
  }.freeze
811
845
 
812
- # rubocop:disable Style/StringLiterals
813
- PARTIAL_SUCCESS_GRPC_METADATA = {
814
- 'google.logging.v2.writelogentriespartialerrors-bin' =>
815
- Google::Logging::V2::WriteLogEntriesPartialErrors.encode(
816
- Google::Logging::V2::WriteLogEntriesPartialErrors.new(
817
- log_entry_errors: {
818
- 0 => Google::Rpc::Status.new(
819
- code: GRPC::Core::StatusCodes::PERMISSION_DENIED,
820
- message: "User not authorized.",
821
- details: []),
822
- 1 => Google::Rpc::Status.new(
823
- code: GRPC::Core::StatusCodes::INVALID_ARGUMENT,
824
- message: "Log name contains illegal character :",
825
- details: []),
826
- 3 => Google::Rpc::Status.new(
827
- code: GRPC::Core::StatusCodes::INVALID_ARGUMENT,
828
- message: "Log name contains illegal character :",
829
- details: []) })),
830
- 'google.rpc.debuginfo-bin' =>
831
- "\x12\xA7\x03[ORIGINAL ERROR] generic::permission_denied: User not auth" \
832
- "orized. [google.rpc.error_details_ext] { message: \"User not authorize" \
833
- "d.\" details { type_url: \"type.googleapis.com/google.logging.v2.Write" \
834
- "LogEntriesPartialErrors\" value: \"\\n\\034\\010\\000\\022\\030\\010\\" \
835
- "007\\022\\024User not authorized.\\n-\\010\\001\\022)\\010\\003\\022%L" \
836
- "og name contains illegal character :\\n-\\010\\002\\022)\\010\\003\\02" \
837
- "2%Log name contains illegal character :\" } }",
838
- 'grpc-status-details-bin' =>
839
- "\b\a\x12\x14User not authorized.\x1A\xC2\x01\nBtype.googleapis.com/goo" \
840
- "gle.logging.v2.WriteLogEntriesPartialErrors\x12|\n\x1C\b\x00\x12\x18\b" \
841
- "\a\x12\x14User not authorized.\n-\b\x01\x12)\b\x03\x12%Log name contai" \
842
- "ns illegal character :\n-\b\x02\x12)\b\x03\x12%Log name contains illeg" \
843
- "al character :\x1A\xD7\x03\n(type.googleapis.com/google.rpc.DebugInfo" \
844
- "\x12\xAA\x03\x12\xA7\x03[ORIGINAL ERROR] generic::permission_denied: U" \
845
- "ser not authorized. [google.rpc.error_details_ext] { message: \"User n" \
846
- "ot authorized.\" details { type_url: \"type.googleapis.com/google.logg" \
847
- "ing.v2.WriteLogEntriesPartialErrors\" value: \"\\n\\034\\010\\000\\022" \
848
- "\\030\\010\\007\\022\\024User not authorized.\\n-\\010\\001\\022)\\010" \
849
- "\\003\\022%Log name contains illegal character :\\n-\\010\\002\\022)" \
850
- "\\010\\003\\022%Log name contains illegal character :\" } }"
846
+ PARTIAL_SUCCESS_GRPC_METADATA = begin
847
+ partial_errors = Google::Logging::V2::WriteLogEntriesPartialErrors.new(
848
+ log_entry_errors: {
849
+ 0 => Google::Rpc::Status.new(
850
+ code: GRPC::Core::StatusCodes::PERMISSION_DENIED,
851
+ message: 'User not authorized.',
852
+ details: []),
853
+ 1 => Google::Rpc::Status.new(
854
+ code: GRPC::Core::StatusCodes::INVALID_ARGUMENT,
855
+ message: 'Log name contains illegal character :',
856
+ details: []),
857
+ 3 => Google::Rpc::Status.new(
858
+ code: GRPC::Core::StatusCodes::INVALID_ARGUMENT,
859
+ message: 'Log name contains illegal character :',
860
+ details: [])
861
+ })
862
+ status = Google::Rpc::Status.new(
863
+ message: 'User not authorized.',
864
+ details: [Google::Protobuf::Any.pack(partial_errors)])
865
+ debug_info = Google::Rpc::DebugInfo.new(
866
+ detail: '[ORIGINAL ERROR] generic::permission_denied: User not' \
867
+ ' authorized. [google.rpc.error_details_ext] { message:' \
868
+ " #{status.message.inspect} details { type_url:" \
869
+ " #{status.details[0].type_url.inspect} value:" \
870
+ " #{status.details[0].value.inspect_octal} } }")
871
+ status_details = Google::Rpc::Status.new(
872
+ code: 7, message: 'User not authorized.',
873
+ details: [Google::Protobuf::Any.pack(partial_errors),
874
+ Google::Protobuf::Any.pack(debug_info)])
875
+ {
876
+ 'google.logging.v2.writelogentriespartialerrors-bin' =>
877
+ partial_errors.to_proto,
878
+ 'google.rpc.debuginfo-bin' => debug_info.to_proto,
879
+ 'grpc-status-details-bin' => status_details.to_proto
880
+ }.freeze
881
+ end
882
+
883
+ PARSE_ERROR_RESPONSE_BODY = {
884
+ 'error' => {
885
+ 'code' => 400,
886
+ 'message' => 'Request contains an invalid argument.',
887
+ 'status' => 'INVALID_ARGUMENT',
888
+ 'details' => [
889
+ {
890
+ '@type' => 'type.googleapis.com/google.rpc.DebugInfo',
891
+ 'detail' =>
892
+ '[ORIGINAL ERROR] RPC::CLIENT_ERROR: server could not parse' \
893
+ " request sent by client; initialization error is: ''"
894
+ }
895
+ ]
896
+ }
851
897
  }.freeze
852
- # rubocop:enable Style/StringLiterals
898
+
899
+ PARSE_ERROR_GRPC_METADATA = begin
900
+ debug_info = Google::Rpc::DebugInfo.new(
901
+ detail: '[ORIGINAL ERROR] RPC::CLIENT_ERROR: server could not parse' \
902
+ " request sent by client; initialization error is: ''")
903
+ status_details = Google::Rpc::Status.new(
904
+ code: 3, message: 'internal client error',
905
+ details: [Google::Protobuf::Any.pack(debug_info)])
906
+ {
907
+ 'google.rpc.debuginfo-bin' => debug_info.to_proto,
908
+ 'grpc-status-details-bin' => status_details.to_proto
909
+ }.freeze
910
+ end
853
911
  end
@@ -81,6 +81,31 @@ class GoogleCloudOutputTest < Test::Unit::TestCase
81
81
  assert_requested(:post, WRITE_LOG_ENTRIES_URI, times: 1)
82
82
  end
83
83
 
84
+ def test_non_api_error
85
+ setup_gce_metadata_stubs
86
+ setup_prometheus
87
+ # The API Client should not retry this and the plugin should consume
88
+ # the exception.
89
+ root_error_code = PARSE_ERROR_RESPONSE_BODY['error']['code']
90
+ stub_request(:post, WRITE_LOG_ENTRIES_URI)
91
+ .to_return(status: root_error_code,
92
+ body: PARSE_ERROR_RESPONSE_BODY.to_json)
93
+ d = create_driver(ENABLE_PROMETHEUS_CONFIG)
94
+ d.emit('message' => log_entry(0))
95
+ d.run
96
+ assert_prometheus_metric_value(
97
+ :stackdriver_successful_requests_count, 0, grpc: false, code: 200)
98
+ assert_prometheus_metric_value(
99
+ :stackdriver_failed_requests_count, 1, grpc: false, code: 400)
100
+ assert_prometheus_metric_value(
101
+ :stackdriver_ingested_entries_count, 0, grpc: false, code: 200)
102
+ assert_prometheus_metric_value(
103
+ :stackdriver_dropped_entries_count, 1, grpc: false, code: 400)
104
+ assert_prometheus_metric_value(
105
+ :stackdriver_retried_entries_count, 0, grpc: false)
106
+ assert_requested(:post, WRITE_LOG_ENTRIES_URI, times: 1)
107
+ end
108
+
84
109
  def test_server_error
85
110
  setup_gce_metadata_stubs
86
111
  # The API client should retry this once, then throw an exception which
@@ -97,6 +97,34 @@ class GoogleCloudOutputGRPCTest < Test::Unit::TestCase
97
97
  end
98
98
  end
99
99
 
100
+ def test_non_api_error
101
+ setup_gce_metadata_stubs
102
+ setup_prometheus
103
+ setup_logging_stubs(
104
+ GRPC::InvalidArgument.new('internal client error',
105
+ PARSE_ERROR_GRPC_METADATA)) do
106
+ # The API Client should not retry this and the plugin should consume
107
+ # the exception.
108
+ d = create_driver(ENABLE_PROMETHEUS_CONFIG)
109
+ d.emit('message' => log_entry(0))
110
+ d.run
111
+ assert_prometheus_metric_value(
112
+ :stackdriver_successful_requests_count, 0,
113
+ grpc: true, code: GRPC::Core::StatusCodes::OK)
114
+ assert_prometheus_metric_value(
115
+ :stackdriver_failed_requests_count, 1,
116
+ grpc: true, code: GRPC::Core::StatusCodes::INVALID_ARGUMENT)
117
+ assert_prometheus_metric_value(
118
+ :stackdriver_ingested_entries_count, 0,
119
+ grpc: true, code: GRPC::Core::StatusCodes::OK)
120
+ assert_prometheus_metric_value(
121
+ :stackdriver_dropped_entries_count, 1,
122
+ grpc: true, code: GRPC::Core::StatusCodes::INVALID_ARGUMENT)
123
+ assert_prometheus_metric_value(
124
+ :stackdriver_retried_entries_count, 0, grpc: true)
125
+ end
126
+ end
127
+
100
128
  def test_server_error
101
129
  setup_gce_metadata_stubs
102
130
  {
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-google-cloud
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.20
4
+ version: 0.6.21
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ling Huang
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2018-06-20 00:00:00.000000000 Z
12
+ date: 2018-06-28 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: fluentd