fluent-plugin-bigquery 3.1.0 → 3.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 52e15b9cc1e5fba553895298e0e1a4510b2c3be0e333a8c9853ef8fb9a30e721
4
- data.tar.gz: 9be1a5a48e75f63bd83c103111664690a7e8fa583dfda548a7da2dfd3437960f
3
+ metadata.gz: 2a8c0c40659fb474ce5c072f6aa8c9708c4f93755da5c2a763de19e9c3971b86
4
+ data.tar.gz: 2c9366bf88b9c7a6673c765bb6d57e06e24aae6dd0a731458470d122905086f7
5
5
  SHA512:
6
- metadata.gz: 8fd48a77fa9cf4b04706c4c3d041aa36ccc5011024fd6b37287c7ac661d0137458940e832410ae14a2385d77a0370908a22a6e856cbc9de4194da5a0866691ff
7
- data.tar.gz: aff96e78358ced9a0a213739e8968bc4caa65afa1915ba4bc1a4660161978418ced12dbdec539ef960967f628c8893fd821db28ffe4e4401fe22010e200934ee
6
+ metadata.gz: 558e461e2c19e70e1748185962f33e3b827c95c293afad90737a8cd9da57fea28bf2109a0660e0f112e7ae9ea60dd64f063960017123fcf84cce434bb7883c8f
7
+ data.tar.gz: aab29d009005ed142542397498da79a5b875dfae989c529aa102194fe142db7247cdf2034a774362cdb25603f0a000a23338306e859232e0981d33c8c928075c
@@ -9,9 +9,10 @@ jobs:
9
9
  fail-fast: false
10
10
  matrix:
11
11
  ruby:
12
- - 2.7
13
12
  - 3.0
14
13
  - 3.1
14
+ - 3.2
15
+ - 3.3
15
16
  os:
16
17
  - ubuntu-latest
17
18
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
@@ -25,6 +26,5 @@ jobs:
25
26
  CI: true
26
27
  run: |
27
28
  ruby -v
28
- gem install bundler rake
29
29
  bundle install --jobs 4 --retry 3
30
30
  bundle exec rake test
@@ -9,9 +9,10 @@ jobs:
9
9
  fail-fast: false
10
10
  matrix:
11
11
  ruby:
12
- - 2.7
13
12
  - 3.0
14
13
  - 3.1
14
+ - 3.2
15
+ - 3.3
15
16
  os:
16
17
  - windows-latest
17
18
  name: Ruby ${{ matrix.ruby }} unit testing on ${{ matrix.os }}
@@ -25,6 +26,5 @@ jobs:
25
26
  CI: true
26
27
  run: |
27
28
  ruby -v
28
- gem install bundler rake
29
29
  bundle install --jobs 4 --retry 3
30
30
  bundle exec rake test
data/.gitignore CHANGED
@@ -17,5 +17,8 @@ test/tmp
17
17
  test/version_tmp
18
18
  tmp
19
19
  script/
20
+ .idea/
20
21
 
21
22
  fluentd-0.12
23
+
24
+ integration/log
data/CHANGELOG.md ADDED
@@ -0,0 +1,8 @@
1
+ ## [v3.1.0](https://github.com/fluent-plugins-nursery/fluent-plugin-bigquery/compare/v3.0.1...v3.1.0) (2022-12-16)
2
+
3
+
4
+ ### Features
5
+
6
+ * Support GEOGRAPHY type field ([#201](https://github.com/fluent-plugins-nursery/fluent-plugin-bigquery/issues/201)) ([734faa9](https://github.com/fluent-plugins-nursery/fluent-plugin-bigquery/commit/734faa9adb7cec1ed579fc6a0bd9ce72d48b82d0))
7
+ * Support JSON type field ([#204](https://github.com/fluent-plugins-nursery/fluent-plugin-bigquery/issues/204)) ([ec62bfa](https://github.com/fluent-plugins-nursery/fluent-plugin-bigquery/commit/ec62bfa2f858feb440e8bb8e8f8d6b8689f709bb))
8
+
data/README.md CHANGED
@@ -30,7 +30,7 @@ If you use official alpine based fluentd docker image (https://github.com/fluent
30
30
  You need to install `bigdecimal` gem on your own dockerfile.
31
31
  Because alpine based image has only minimal ruby environment in order to reduce image size.
32
32
  And in most case, dependency to embedded gem is not written on gemspec.
33
- Because embbeded gem dependency sometimes restricts ruby environment.
33
+ Because embedded gem dependency sometimes restricts ruby environment.
34
34
 
35
35
  ## Configuration
36
36
 
@@ -40,12 +40,11 @@ Because embbeded gem dependency sometimes restricts ruby environment.
40
40
 
41
41
  | name | type | required? | placeholder? | default | description |
42
42
  | :-------------------------------------------- | :------------ | :----------- | :---------- | :------------------------- | :----------------------- |
43
- | auth_method | enum | yes | no | private_key | `private_key` or `json_key` or `compute_engine` or `application_default` |
43
+ | auth_method | enum | yes | no | private_key | `private_key` or `json_key` or `compute_engine` or `application_default` (GKE Workload Identity) |
44
44
  | email | string | yes (private_key) | no | nil | GCP Service Account Email |
45
45
  | private_key_path | string | yes (private_key) | no | nil | GCP Private Key file path |
46
46
  | private_key_passphrase | string | yes (private_key) | no | nil | GCP Private Key Passphrase |
47
47
  | json_key | string | yes (json_key) | no | nil | GCP JSON Key file path or JSON Key string |
48
- | location | string | no | no | nil | BigQuery Data Location. The geographic location of the job. Required except for US and EU. |
49
48
  | project | string | yes | yes | nil | |
50
49
  | dataset | string | yes | yes | nil | |
51
50
  | table | string | yes (either `tables`) | yes | nil | |
@@ -59,7 +58,7 @@ Because embbeded gem dependency sometimes restricts ruby environment.
59
58
  | schema_cache_expire | integer | no | no | 600 | Value is second. If current time is after expiration interval, re-fetch table schema definition. |
60
59
  | request_timeout_sec | integer | no | no | nil | Bigquery API response timeout |
61
60
  | request_open_timeout_sec | integer | no | no | 60 | Bigquery API connection, and request timeout. If you send big data to Bigquery, set large value. |
62
- | time_partitioning_type | enum | no (either day) | no | nil | Type of bigquery time partitioning feature. |
61
+ | time_partitioning_type | enum | no (either day or hour) | no | nil | Type of bigquery time partitioning feature. |
63
62
  | time_partitioning_field | string | no | no | nil | Field used to determine how to create a time-based partition. |
64
63
  | time_partitioning_expiration | time | no | no | nil | Expiration milliseconds for bigquery time partitioning. |
65
64
  | clustering_fields | array(string) | no | no | nil | One or more fields on which data should be clustered. The order of the specified columns determines the sort order of the data. |
@@ -194,15 +193,15 @@ For high rate inserts over streaming inserts, you should specify flush intervals
194
193
  ```apache
195
194
  <match dummy>
196
195
  @type bigquery_insert
197
-
196
+
198
197
  <buffer>
199
198
  flush_interval 0.1 # flush as frequent as possible
200
-
199
+
201
200
  total_limit_size 10g
202
-
201
+
203
202
  flush_thread_count 16
204
203
  </buffer>
205
-
204
+
206
205
  auth_method private_key # default
207
206
  email xxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxx@developer.gserviceaccount.com
208
207
  private_key_path /home/username/.keys/00000000000000000000000000000000-privatekey.p12
@@ -255,7 +254,7 @@ Important options for high rate events are:
255
254
  * threads for insert api calls in parallel
256
255
  * specify this option for 100 or more records per seconds
257
256
  * 10 or more threads seems good for inserts over internet
258
- * less threads may be good for Google Compute Engine instances (with low latency for BigQuery)
257
+ * fewer threads may be good for Google Compute Engine instances (with low latency for BigQuery)
259
258
  * `buffer/flush_interval`
260
259
  * interval between data flushes (default 0.25)
261
260
  * you can set subsecond values such as `0.15` on Fluentd v0.10.42 or later
@@ -294,7 +293,7 @@ There are four methods supported to fetch access token for the service account.
294
293
  1. Public-Private key pair of GCP(Google Cloud Platform)'s service account
295
294
  2. JSON key of GCP(Google Cloud Platform)'s service account
296
295
  3. Predefined access token (Compute Engine only)
297
- 4. Google application default credentials (http://goo.gl/IUuyuX)
296
+ 4. [Google application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) / GKE Workload Identity
298
297
 
299
298
  #### Public-Private key pair of GCP's service account
300
299
 
@@ -339,7 +338,7 @@ You need to only include `private_key` and `client_email` key from JSON key file
339
338
 
340
339
  #### Predefined access token (Compute Engine only)
341
340
 
342
- When you run fluentd on Googlce Compute Engine instance,
341
+ When you run fluentd on Google Compute Engine instance,
343
342
  you don't need to explicitly create a service account for fluentd.
344
343
  In this authentication method, you need to add the API scope "https://www.googleapis.com/auth/bigquery" to the scope list of your
345
344
  Compute Engine instance, then you can configure fluentd like this.
@@ -360,14 +359,16 @@ Compute Engine instance, then you can configure fluentd like this.
360
359
 
361
360
  #### Application default credentials
362
361
 
363
- The Application Default Credentials provide a simple way to get authorization credentials for use in calling Google APIs, which are described in detail at http://goo.gl/IUuyuX.
362
+ The Application Default Credentials provide a simple way to get authorization credentials for use in calling Google APIs, which are described in detail at https://cloud.google.com/docs/authentication/application-default-credentials.
363
+
364
+ **This is the method you should choose if you want to use Workload Identity on GKE**.
364
365
 
365
366
  In this authentication method, the credentials returned are determined by the environment the code is running in. Conditions are checked in the following order:credentials are get from following order.
366
367
 
367
368
  1. The environment variable `GOOGLE_APPLICATION_CREDENTIALS` is checked. If this variable is specified it should point to a JSON key file that defines the credentials.
368
- 2. The environment variable `GOOGLE_PRIVATE_KEY` and `GOOGLE_CLIENT_EMAIL` are checked. If this variables are specified `GOOGLE_PRIVATE_KEY` should point to `private_key`, `GOOGLE_CLIENT_EMAIL` should point to `client_email` in a JSON key.
369
- 3. Well known path is checked. If file is exists, the file used as a JSON key file. This path is `$HOME/.config/gcloud/application_default_credentials.json`.
370
- 4. System default path is checked. If file is exists, the file used as a JSON key file. This path is `/etc/google/auth/application_default_credentials.json`.
369
+ 2. The environment variable `GOOGLE_PRIVATE_KEY` and `GOOGLE_CLIENT_EMAIL` are checked. If these variables are specified `GOOGLE_PRIVATE_KEY` should point to `private_key`, `GOOGLE_CLIENT_EMAIL` should point to `client_email` in a JSON key.
370
+ 3. Well known path is checked. If the file exists, it is used as a JSON key file. This path is `$HOME/.config/gcloud/application_default_credentials.json`.
371
+ 4. System default path is checked. If the file exists, it is used as a JSON key file. This path is `/etc/google/auth/application_default_credentials.json`.
371
372
  5. If you are running in Google Compute Engine production, the built-in service account associated with the virtual machine instance will be used.
372
373
  6. If none of these conditions is true, an error will occur.
373
374
 
@@ -543,11 +544,11 @@ The second method is to specify a path to a BigQuery schema file instead of list
543
544
  @type bigquery_insert
544
545
 
545
546
  ...
546
-
547
+
547
548
  schema_path /path/to/httpd.schema
548
549
  </match>
549
550
  ```
550
- where /path/to/httpd.schema is a path to the JSON-encoded schema file which you used for creating the table on BigQuery. By using external schema file you are able to write full schema that does support NULLABLE/REQUIRED/REPEATED, this feature is really useful and adds full flexbility.
551
+ where /path/to/httpd.schema is a path to the JSON-encoded schema file which you used for creating the table on BigQuery. By using external schema file you are able to write full schema that does support NULLABLE/REQUIRED/REPEATED, this feature is really useful and adds full flexibility.
551
552
 
552
553
  The third method is to set `fetch_schema` to `true` to enable fetch a schema using BigQuery API. In this case, your fluent.conf looks like:
553
554
 
@@ -556,7 +557,7 @@ The third method is to set `fetch_schema` to `true` to enable fetch a schema usi
556
557
  @type bigquery_insert
557
558
 
558
559
  ...
559
-
560
+
560
561
  fetch_schema true
561
562
  # fetch_schema_table other_table # if you want to fetch schema from other table
562
563
  </match>
@@ -594,5 +595,5 @@ You can set `insert_id_field` option to specify the field to use as `insertId` p
594
595
  ## Authors
595
596
 
596
597
  * @tagomoris: First author, original version
597
- * KAIZEN platform Inc.: Maintener, Since 2014.08.19
598
+ * KAIZEN platform Inc.: Maintainer, Since 2014.08.19
598
599
  * @joker1007
@@ -5,6 +5,7 @@ module Fluent
5
5
  RETRYABLE_ERROR_REASON = %w(backendError internalError rateLimitExceeded tableUnavailable).freeze
6
6
  RETRYABLE_INSERT_ERRORS_REASON = %w(timeout backendError internalError rateLimitExceeded).freeze
7
7
  RETRYABLE_STATUS_CODE = [500, 502, 503, 504]
8
+ REGION_NOT_WRITABLE_MESSAGE = -"is not writable in the region"
8
9
 
9
10
  class << self
10
11
  # @param e [Google::Apis::Error]
@@ -19,6 +20,10 @@ module Fluent
19
20
 
20
21
  # @param e [Google::Apis::Error]
21
22
  def retryable_error?(e)
23
+ retryable_server_error?(e) || retryable_region_not_writable?(e)
24
+ end
25
+
26
+ def retryable_server_error?(e)
22
27
  e.is_a?(Google::Apis::ServerError) && RETRYABLE_STATUS_CODE.include?(e.status_code)
23
28
  end
24
29
 
@@ -30,6 +35,10 @@ module Fluent
30
35
  RETRYABLE_INSERT_ERRORS_REASON.include?(reason)
31
36
  end
32
37
 
38
+ def retryable_region_not_writable?(e)
39
+ e.is_a?(Google::Apis::ClientError) && e.status_code == 400 && e.message.include?(REGION_NOT_WRITABLE_MESSAGE)
40
+ end
41
+
33
42
  # Guard for instantiation
34
43
  private :new
35
44
  def inherited(subclass)
@@ -116,6 +116,16 @@ module Fluent
116
116
  end
117
117
  end
118
118
 
119
+ class BigNumericFieldSchema < FieldSchema
120
+ def type
121
+ :bignumeric
122
+ end
123
+
124
+ def format_one(value, is_load: false)
125
+ value.to_s
126
+ end
127
+ end
128
+
119
129
  class BooleanFieldSchema < FieldSchema
120
130
  def type
121
131
  :boolean
@@ -200,6 +210,7 @@ module Fluent
200
210
  integer: IntegerFieldSchema,
201
211
  float: FloatFieldSchema,
202
212
  numeric: NumericFieldSchema,
213
+ bignumeric: BigNumericFieldSchema,
203
214
  boolean: BooleanFieldSchema,
204
215
  timestamp: TimestampFieldSchema,
205
216
  date: DateFieldSchema,
@@ -1,5 +1,5 @@
1
1
  module Fluent
2
2
  module BigQueryPlugin
3
- VERSION = "3.1.0".freeze
3
+ VERSION = "3.3.0".freeze
4
4
  end
5
5
  end
@@ -101,6 +101,7 @@ module Fluent
101
101
  end
102
102
  end
103
103
  rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
104
+ log.debug "insert error: #{e.message}", status_code: e.respond_to?(:status_code) ? e.status_code : nil, reason: e.respond_to?(:reason) ? e.reason : nil
104
105
  error_data = { project_id: project, dataset: dataset, table: table_id, code: e.status_code, message: e.message }
105
106
  wrapped = Fluent::BigQuery::Error.wrap(e)
106
107
  if wrapped.retryable?
@@ -112,7 +113,7 @@ module Fluent
112
113
  raise wrapped
113
114
  end
114
115
 
115
- JobReference = Struct.new(:chunk_id, :chunk_id_hex, :project_id, :dataset_id, :table_id, :job_id) do
116
+ JobReference = Struct.new(:chunk_id, :chunk_id_hex, :project_id, :dataset_id, :table_id, :job_id, :location) do
116
117
  def as_hash(*keys)
117
118
  if keys.empty?
118
119
  to_h
@@ -161,7 +162,7 @@ module Fluent
161
162
  upload_source: upload_source,
162
163
  content_type: "application/octet-stream",
163
164
  )
164
- JobReference.new(chunk_id, chunk_id_hex, project, dataset, table_id, res.job_reference.job_id)
165
+ JobReference.new(chunk_id, chunk_id_hex, project, dataset, table_id, res.job_reference.job_id, res.job_reference.location)
165
166
  rescue Google::Apis::ServerError, Google::Apis::ClientError, Google::Apis::AuthorizationError => e
166
167
  log.error "job.load API", project_id: project, dataset: dataset, table: table_id, code: e.status_code, message: e.message
167
168
 
@@ -175,7 +176,7 @@ module Fluent
175
176
  def fetch_load_job(job_reference)
176
177
  project = job_reference.project_id
177
178
  job_id = job_reference.job_id
178
- location = @options[:location]
179
+ location = job_reference.location
179
180
 
180
181
  res = client.get_job(project, job_id, location: location)
181
182
  log.debug "load job fetched", id: job_id, state: res.status.state, **job_reference.as_hash(:project_id, :dataset_id, :table_id)
@@ -29,9 +29,6 @@ module Fluent
29
29
  config_param :private_key_path, :string, default: nil
30
30
  config_param :private_key_passphrase, :string, default: 'notasecret', secret: true
31
31
  config_param :json_key, default: nil, secret: true
32
- # The geographic location of the job. Required except for US and EU.
33
- # https://github.com/googleapis/google-api-ruby-client/blob/master/generated/google/apis/bigquery_v2/service.rb#L350
34
- config_param :location, :string, default: nil
35
32
 
36
33
  # see as simple reference
37
34
  # https://github.com/abronte/BigQuery/blob/master/lib/bigquery.rb
@@ -69,7 +66,7 @@ module Fluent
69
66
  config_param :request_open_timeout_sec, :time, default: 60
70
67
 
71
68
  ## Partitioning
72
- config_param :time_partitioning_type, :enum, list: [:day], default: nil
69
+ config_param :time_partitioning_type, :enum, list: [:day, :hour], default: nil
73
70
  config_param :time_partitioning_field, :string, default: nil
74
71
  config_param :time_partitioning_expiration, :time, default: nil
75
72
 
@@ -135,7 +132,6 @@ module Fluent
135
132
  private_key_path: @private_key_path, private_key_passphrase: @private_key_passphrase,
136
133
  email: @email,
137
134
  json_key: @json_key,
138
- location: @location,
139
135
  source_format: @source_format,
140
136
  skip_invalid_rows: @skip_invalid_rows,
141
137
  ignore_unknown_values: @ignore_unknown_values,
@@ -65,7 +65,10 @@ class BigQueryLoadOutputTest < Test::Unit::TestCase
65
65
  }
66
66
  }
67
67
  }, upload_source: duck_type(:write, :sync, :rewind), content_type: "application/octet-stream") do
68
- stub!.job_reference.stub!.job_id { "dummy_job_id" }
68
+ stub!.job_reference.stub! do |s|
69
+ s.job_id { "dummy_job_id" }
70
+ s.location { "us" }
71
+ end
69
72
  end
70
73
  end
71
74
 
@@ -118,7 +121,10 @@ class BigQueryLoadOutputTest < Test::Unit::TestCase
118
121
  },
119
122
  job_reference: {project_id: 'yourproject_id', job_id: satisfy { |x| x =~ /fluentd_job_.*/}} ,
120
123
  }, upload_source: duck_type(:write, :sync, :rewind), content_type: "application/octet-stream") do
121
- stub!.job_reference.stub!.job_id { "dummy_job_id" }
124
+ stub!.job_reference.stub! do |s|
125
+ s.job_id { "dummy_job_id" }
126
+ s.location { "us" }
127
+ end
122
128
  end
123
129
  end
124
130
 
@@ -155,10 +161,13 @@ class BigQueryLoadOutputTest < Test::Unit::TestCase
155
161
  }
156
162
  }
157
163
  }, upload_source: duck_type(:write, :sync, :rewind), content_type: "application/octet-stream") do
158
- stub!.job_reference.stub!.job_id { "dummy_job_id" }
164
+ stub!.job_reference.stub! do |s|
165
+ s.job_id { "dummy_job_id" }
166
+ s.location { "us" }
167
+ end
159
168
  end
160
169
 
161
- mock(writer.client).get_job('yourproject_id', 'dummy_job_id', :location=>nil) do
170
+ mock(writer.client).get_job('yourproject_id', 'dummy_job_id', location: "us") do
162
171
  stub! do |s|
163
172
  s.id { 'dummy_job_id' }
164
173
  s.configuration.stub! do |_s|
@@ -238,10 +247,13 @@ class BigQueryLoadOutputTest < Test::Unit::TestCase
238
247
  }
239
248
  }
240
249
  }, upload_source: duck_type(:write, :sync, :rewind), content_type: "application/octet-stream") do
241
- stub!.job_reference.stub!.job_id { "dummy_job_id" }
250
+ stub!.job_reference.stub! do |s|
251
+ s.job_id { "dummy_job_id" }
252
+ s.location { "us" }
253
+ end
242
254
  end
243
255
 
244
- mock(writer.client).get_job('yourproject_id', 'dummy_job_id', :location=>nil) do
256
+ mock(writer.client).get_job('yourproject_id', 'dummy_job_id', location: "us") do
245
257
  stub! do |s|
246
258
  s.id { 'dummy_job_id' }
247
259
  s.configuration.stub! do |_s|
@@ -318,7 +330,10 @@ class BigQueryLoadOutputTest < Test::Unit::TestCase
318
330
  }
319
331
  }
320
332
  }, upload_source: duck_type(:write, :sync, :rewind), content_type: "application/octet-stream") do
321
- stub!.job_reference.stub!.job_id { "dummy_job_id" }
333
+ stub!.job_reference.stub! do |s|
334
+ s.job_id { "dummy_job_id" }
335
+ s.location { "us" }
336
+ end
322
337
  end
323
338
  end
324
339
 
@@ -29,9 +29,14 @@ class RecordSchemaTest < Test::Unit::TestCase
29
29
  "mode" => "REPEATED"
30
30
  },
31
31
  {
32
- "name" => "utilisation",
32
+ "name" => "utilization",
33
33
  "type" => "NUMERIC",
34
34
  "mode" => "NULLABLE"
35
+ },
36
+ {
37
+ "name" => "bigutilization",
38
+ "type" => "BIGNUMERIC",
39
+ "mode" => "NULLABLE"
35
40
  }
36
41
  ]
37
42
  end
@@ -64,15 +69,20 @@ class RecordSchemaTest < Test::Unit::TestCase
64
69
  "mode" => "REPEATED"
65
70
  },
66
71
  {
67
- "name" => "utilisation",
72
+ "name" => "utilization",
68
73
  "type" => "NUMERIC",
69
74
  "mode" => "NULLABLE"
70
75
  },
76
+ {
77
+ "name" => "bigutilization",
78
+ "type" => "BIGNUMERIC",
79
+ "mode" => "NULLABLE"
80
+ },
71
81
  {
72
82
  "name" => "new_column",
73
83
  "type" => "STRING",
74
84
  "mode" => "REQUIRED"
75
- }
85
+ },
76
86
  ]
77
87
  end
78
88
 
@@ -104,9 +114,14 @@ class RecordSchemaTest < Test::Unit::TestCase
104
114
  "mode" => "REPEATED"
105
115
  },
106
116
  {
107
- "name" => "utilisation",
117
+ "name" => "utilization",
108
118
  "type" => "NUMERIC",
109
119
  "mode" => "NULLABLE"
120
+ },
121
+ {
122
+ "name" => "bigutilization",
123
+ "type" => "BIGNUMERIC",
124
+ "mode" => "NULLABLE"
110
125
  }
111
126
  ]
112
127
  end
@@ -157,12 +172,12 @@ class RecordSchemaTest < Test::Unit::TestCase
157
172
  time = Time.local(2016, 2, 7, 19, 0, 0).utc
158
173
 
159
174
  formatted = fields.format_one({
160
- "time" => time, "tty" => ["tty1", "tty2", "tty3"], "pwd" => "/home", "user" => {name: "joker1007", uid: 10000}, "argv" => ["foo", 42], "utilisation" => "0.837"
175
+ "time" => time, "tty" => ["tty1", "tty2", "tty3"], "pwd" => "/home", "user" => {name: "joker1007", uid: 10000}, "argv" => ["foo", 42], "utilization" => "0.837", "bigutilization" => "0.837"
161
176
  })
162
177
  assert_equal(
163
178
  formatted,
164
179
  {
165
- "time" => time.strftime("%Y-%m-%d %H:%M:%S.%6L %:z"), "tty" => MultiJson.dump(["tty1", "tty2", "tty3"]), "pwd" => "/home", "user" => MultiJson.dump({name: "joker1007", uid: 10000}), "argv" => ["foo", "42"], "utilisation" => "0.837"
180
+ "time" => time.strftime("%Y-%m-%d %H:%M:%S.%6L %:z"), "tty" => MultiJson.dump(["tty1", "tty2", "tty3"]), "pwd" => "/home", "user" => MultiJson.dump({name: "joker1007", uid: 10000}), "argv" => ["foo", "42"], "utilization" => "0.837", "bigutilization" => "0.837"
166
181
  }
167
182
  )
168
183
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: fluent-plugin-bigquery
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.1.0
4
+ version: 3.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Naoya Ito
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2022-12-16 00:00:00.000000000 Z
12
+ date: 2024-11-22 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: rake
@@ -142,6 +142,7 @@ files:
142
142
  - ".github/workflows/linux.yml"
143
143
  - ".github/workflows/windows.yml"
144
144
  - ".gitignore"
145
+ - CHANGELOG.md
145
146
  - Gemfile
146
147
  - LICENSE.txt
147
148
  - README.md
@@ -190,7 +191,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
190
191
  - !ruby/object:Gem::Version
191
192
  version: '0'
192
193
  requirements: []
193
- rubygems_version: 3.3.7
194
+ rubygems_version: 3.3.27
194
195
  signing_key:
195
196
  specification_version: 4
196
197
  summary: Fluentd plugin to store data on Google BigQuery