influxdb-client 1.0.0.pre.191 → 1.1.0.pre.459

Sign up to get free protection for your applications and to get access to all the features.
@@ -45,6 +45,7 @@ module InfluxDB2
45
45
  @time = time
46
46
  @precision = precision
47
47
  end
48
+ attr_reader :precision
48
49
 
49
50
  # Create DataPoint instance from specified data.
50
51
  #
@@ -0,0 +1,93 @@
1
+ # The MIT License
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in
11
+ # all copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ # THE SOFTWARE.
20
+ require_relative 'models/dialect'
21
+ require_relative 'models/query'
22
+ require_relative 'flux_csv_parser'
23
+ require 'json'
24
+
25
+ module InfluxDB2
26
+ # The client of the InfluxDB 2.0 that implement Query HTTP API endpoint.
27
+ #
28
+ class QueryApi < DefaultApi
29
+ DEFAULT_DIALECT = InfluxDB2::Dialect.new(header: true, delimiter: ',', comment_prefix: '#',
30
+ annotations: %w[datatype group default])
31
+
32
+ # @param [Hash] options The options to be used by the client.
33
+ def initialize(options:)
34
+ super(options: options)
35
+ end
36
+
37
+ # @param [Object] query the flux query to execute. The data could be represent by [String], [Query]
38
+ # @param [String] org specifies the source organization
39
+ # @return [String] result of query
40
+ def query_raw(query: nil, org: nil, dialect: DEFAULT_DIALECT)
41
+ _post_query(query: query, org: org, dialect: dialect).read_body
42
+ end
43
+
44
+ # @param [Object] query the flux query to execute. The data could be represent by [String], [Query]
45
+ # @param [String] org specifies the source organization
46
+ # @return [Array] list of FluxTables which are matched the query
47
+ def query(query: nil, org: nil, dialect: DEFAULT_DIALECT)
48
+ response = query_raw(query: query, org: org, dialect: dialect)
49
+ parser = InfluxDB2::FluxCsvParser.new(response)
50
+
51
+ parser.parse
52
+ parser.tables
53
+ end
54
+
55
+ # @param [Object] query the flux query to execute. The data could be represent by [String], [Query]
56
+ # @param [String] org specifies the source organization
57
+ # @return stream of Flux Records
58
+ def query_stream(query: nil, org: nil, dialect: DEFAULT_DIALECT)
59
+ response = _post_query(query: query, org: org, dialect: dialect)
60
+
61
+ InfluxDB2::FluxCsvParser.new(response, stream: true)
62
+ end
63
+
64
+ private
65
+
66
+ def _post_query(query: nil, org: nil, dialect: DEFAULT_DIALECT)
67
+ org_param = org || @options[:org]
68
+ _check('org', org_param)
69
+
70
+ payload = _generate_payload(query, dialect)
71
+ return nil if payload.nil?
72
+
73
+ uri = URI.parse(File.join(@options[:url], '/api/v2/query'))
74
+ uri.query = URI.encode_www_form(org: org_param)
75
+
76
+ _post(payload.to_body.to_json, uri)
77
+ end
78
+
79
+ def _generate_payload(query, dialect)
80
+ if query.nil?
81
+ nil
82
+ elsif query.is_a?(Query)
83
+ query
84
+ elsif query.is_a?(String)
85
+ if query.empty?
86
+ nil
87
+ else
88
+ Query.new(query: query, dialect: dialect, type: nil)
89
+ end
90
+ end
91
+ end
92
+ end
93
+ end
@@ -19,5 +19,5 @@
19
19
  # THE SOFTWARE.
20
20
 
21
21
  module InfluxDB2
22
- VERSION = '1.0.0'.freeze
22
+ VERSION = '1.1.0'.freeze
23
23
  end
@@ -0,0 +1,89 @@
1
+ # The MIT License
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in
11
+ # all copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ # THE SOFTWARE.
20
+
21
+ module InfluxDB2
22
+ # Worker for handling write batching queue
23
+ #
24
+ class Worker
25
+ def initialize(api_client, write_options)
26
+ @api_client = api_client
27
+ @write_options = write_options
28
+
29
+ @queue = Queue.new
30
+ @queue_event = Queue.new
31
+
32
+ @queue_event.push(true)
33
+
34
+ @thread_flush = Thread.new do
35
+ until api_client.closed
36
+ sleep @write_options.flush_interval / 1_000
37
+ check_background_queue
38
+ end
39
+ end
40
+
41
+ @thread_size = Thread.new do
42
+ until api_client.closed
43
+ check_background_queue(size: true) if @queue.length >= @write_options.batch_size
44
+ sleep 0.01
45
+ end
46
+ end
47
+ end
48
+
49
+ def push(payload)
50
+ @queue.push(payload)
51
+ end
52
+
53
+ def check_background_queue(size: false, flush_all: false)
54
+ @queue_event.pop
55
+ data = {}
56
+ points = 0
57
+
58
+ if size && @queue.length < @write_options.batch_size
59
+ @queue_event.push(true)
60
+ return
61
+ end
62
+
63
+ while (flush_all || points < @write_options.batch_size) && !@queue.empty?
64
+ begin
65
+ item = @queue.pop(true)
66
+ key = item.key
67
+ data[key] = [] unless data.key?(key)
68
+ data[key] << item.data
69
+ points += 1
70
+ rescue ThreadError
71
+ return
72
+ end
73
+ end
74
+
75
+ write(data) unless data.values.flatten.empty?
76
+ @queue_event.push(true)
77
+ end
78
+
79
+ def flush_all
80
+ check_background_queue(flush_all: true) unless @queue.empty?
81
+ end
82
+
83
+ def write(data)
84
+ data.each do |key, points|
85
+ @api_client.write_raw(points.join("\n"), precision: key.precision, bucket: key.bucket, org: key.org)
86
+ end
87
+ end
88
+ end
89
+ end
@@ -17,8 +17,31 @@
17
17
  # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
18
  # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
19
  # THE SOFTWARE.
20
+ require_relative 'worker'
20
21
 
21
22
  module InfluxDB2
23
+ module WriteType
24
+ SYNCHRONOUS = 1
25
+ BATCHING = 2
26
+ end
27
+
28
+ # Creates write api configuration.
29
+ #
30
+ # @param write_type: methods of write (batching, asynchronous, synchronous)
31
+ # @param batch_size: the number of data point to collect in batch
32
+ # @param flush_interval: flush data at least in this interval
33
+ class WriteOptions
34
+ def initialize(write_type: WriteType::SYNCHRONOUS, batch_size: 1_000, flush_interval: 1_000)
35
+ @write_type = write_type
36
+ @batch_size = batch_size
37
+ @flush_interval = flush_interval
38
+ end
39
+
40
+ attr_reader :write_type, :batch_size, :flush_interval
41
+ end
42
+
43
+ SYNCHRONOUS = InfluxDB2::WriteOptions.new(write_type: WriteType::SYNCHRONOUS)
44
+
22
45
  # Precision constants.
23
46
  #
24
47
  class WritePrecision
@@ -37,15 +60,15 @@ module InfluxDB2
37
60
 
38
61
  # Write time series data into InfluxDB.
39
62
  #
40
- class WriteApi
41
- DEFAULT_TIMEOUT = 10
42
- DEFAULT_REDIRECT_COUNT = 10
43
-
63
+ class WriteApi < DefaultApi
44
64
  # @param [Hash] options The options to be used by the client.
45
- def initialize(options:)
46
- @options = options
47
- @max_redirect_count = @options[:max_redirect_count] || DEFAULT_REDIRECT_COUNT
65
+ # @param [WriteOptions] write_options Write api configuration.
66
+ def initialize(options:, write_options: SYNCHRONOUS)
67
+ super(options: options)
68
+ @write_options = write_options
69
+ @closed = false
48
70
  end
71
+ attr_reader :closed
49
72
 
50
73
  # Write data into specified Bucket.
51
74
  #
@@ -87,66 +110,108 @@ module InfluxDB2
87
110
  _check('bucket', bucket_param)
88
111
  _check('org', org_param)
89
112
 
90
- payload = _generate_payload(data)
113
+ payload = _generate_payload(data, bucket: bucket_param, org: org_param, precision: precision_param)
91
114
  return nil if payload.nil?
92
115
 
116
+ if WriteType::BATCHING == @write_options.write_type
117
+ _worker.push(payload)
118
+ else
119
+ write_raw(payload, precision: precision_param, bucket: bucket_param, org: org_param)
120
+ end
121
+ end
122
+
123
+ # @return [ true ] Always true.
124
+ def close!
125
+ _worker.flush_all unless _worker.nil?
126
+ @closed = true
127
+ true
128
+ end
129
+
130
+ # @param [String] payload data as String
131
+ # @param [WritePrecision] precision The precision for the unix timestamps within the body line-protocol
132
+ # @param [String] bucket specifies the destination bucket for writes
133
+ # @param [String] org specifies the destination organization for writes
134
+ def write_raw(payload, precision: nil, bucket: nil, org: nil)
135
+ precision_param = precision || @options[:precision]
136
+ bucket_param = bucket || @options[:bucket]
137
+ org_param = org || @options[:org]
138
+ _check('precision', precision_param)
139
+ _check('bucket', bucket_param)
140
+ _check('org', org_param)
141
+
142
+ return nil unless payload.instance_of?(String) || payload.empty?
143
+
93
144
  uri = URI.parse(File.join(@options[:url], '/api/v2/write'))
94
145
  uri.query = URI.encode_www_form(bucket: bucket_param, org: org_param, precision: precision_param.to_s)
95
146
 
96
147
  _post(payload, uri)
97
148
  end
98
149
 
99
- private
150
+ # Item for batching queue
151
+ class BatchItem
152
+ def initialize(key, data)
153
+ @key = key
154
+ @data = data
155
+ end
156
+ attr_reader :key, :data
157
+ end
100
158
 
101
- def _post(payload, uri, limit = @max_redirect_count)
102
- raise InfluxError.from_message("Too many HTTP redirects. Exceeded limit: #{@max_redirect_count}") if limit.zero?
103
-
104
- http = Net::HTTP.new(uri.host, uri.port)
105
- http.open_timeout = @options[:open_timeout] || DEFAULT_TIMEOUT
106
- http.write_timeout = @options[:write_timeout] || DEFAULT_TIMEOUT if Net::HTTP.method_defined? :write_timeout
107
- http.read_timeout = @options[:read_timeout] || DEFAULT_TIMEOUT
108
- http.use_ssl = @options[:use_ssl].nil? ? true : @options[:use_ssl]
109
-
110
- request = Net::HTTP::Post.new(uri.request_uri)
111
- request['Authorization'] = "Token #{@options[:token]}"
112
- request.body = payload
113
-
114
- begin
115
- response = http.request(request)
116
- case response
117
- when Net::HTTPSuccess then
118
- response
119
- when Net::HTTPRedirection then
120
- location = response['location']
121
- _post(payload, URI.parse(location), limit - 1)
122
- else
123
- raise InfluxError.from_response(response)
124
- end
125
- ensure
126
- http.finish if http.started?
159
+ # Key for batch item
160
+ class BatchItemKey
161
+ def initialize(bucket, org, precision = DEFAULT_WRITE_PRECISION)
162
+ @bucket = bucket
163
+ @org = org
164
+ @precision = precision
165
+ end
166
+ attr_reader :bucket, :org, :precision
167
+
168
+ def ==(other)
169
+ @bucket == other.bucket && @org == other.org && @precision == other.precision
170
+ end
171
+
172
+ alias eql? ==
173
+
174
+ def hash
175
+ @bucket.hash ^ @org.hash ^ @precision.hash # XOR
127
176
  end
128
177
  end
129
178
 
130
- def _check(key, value)
131
- raise ArgumentError, "The '#{key}' should be defined as argument or default option: #{@options}" if value.nil?
179
+ private
180
+
181
+ WORKER_MUTEX = Mutex.new
182
+ def _worker
183
+ return nil unless @write_options.write_type == WriteType::BATCHING
184
+
185
+ return @worker if @worker
186
+
187
+ WORKER_MUTEX.synchronize do
188
+ # this return is necessary because the previous mutex holder
189
+ # might have already assigned the @worker
190
+ return @worker if @worker
191
+
192
+ @worker = Worker.new(self, @write_options)
193
+ end
132
194
  end
133
195
 
134
- def _generate_payload(data)
196
+ def _generate_payload(data, precision: nil, bucket: nil, org: nil)
135
197
  if data.nil?
136
198
  nil
137
199
  elsif data.is_a?(Point)
138
- data.to_line_protocol
200
+ _generate_payload(data.to_line_protocol, bucket: bucket, org: org, precision: data.precision ||
201
+ DEFAULT_WRITE_PRECISION)
139
202
  elsif data.is_a?(String)
140
203
  if data.empty?
141
204
  nil
205
+ elsif @write_options.write_type == WriteType::BATCHING
206
+ BatchItem.new(BatchItemKey.new(bucket, org, precision), data)
142
207
  else
143
208
  data
144
209
  end
145
210
  elsif data.is_a?(Hash)
146
- _generate_payload(Point.from_hash(data))
211
+ _generate_payload(Point.from_hash(data), bucket: bucket, org: org, precision: precision)
147
212
  elsif data.respond_to? :map
148
213
  data.map do |item|
149
- _generate_payload(item)
214
+ _generate_payload(item, bucket: bucket, org: org, precision: precision)
150
215
  end.reject(&:nil?).join("\n".freeze)
151
216
  end
152
217
  end
@@ -0,0 +1,326 @@
1
+ # The MIT License
2
+ #
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # The above copyright notice and this permission notice shall be included in
11
+ # all copies or substantial portions of the Software.
12
+ #
13
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ # THE SOFTWARE.
20
+
21
+ require 'test_helper'
22
+
23
+ class FluxCsvParserTest < MiniTest::Test
24
+ def test_multiple_values
25
+ data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,long,string\n" \
26
+ "#group,false,false,true,true,true,true,true,true,false,false,false\n" \
27
+ "#default,_result,,,,,,,,,,\n" \
28
+ ",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
29
+ ",,0,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test\n" \
30
+ ",,1,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,B,west,484,22,test\n" \
31
+ ",,2,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,usage_system,cpu,A,west,1444,38,test\n" \
32
+ ',,3,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,user_usage,cpu,A,west,2401,49,test'
33
+
34
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
35
+
36
+ column_headers = tables[0].columns
37
+ assert_equal 11, column_headers.size
38
+
39
+ values = [false, false, true, true, true, true, true, true, false, false, false]
40
+ _assert_columns(column_headers, values: values)
41
+ assert_equal 4, tables.size
42
+
43
+ _assert_multiple_record(tables)
44
+ end
45
+
46
+ def test_parse_shortcut
47
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
48
+ "dateTime:RFC3339,long,string,string,string,boolean\n" \
49
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
50
+ "#default,_result,,,,,,,,,true\n" \
51
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
52
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,true\n"
53
+
54
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
55
+
56
+ assert_equal 1, tables.size
57
+ assert_equal 1, tables[0].records.size
58
+
59
+ record = tables[0].records[0]
60
+
61
+ assert_equal _parse_time('1970-01-01T00:00:10Z'), record.start
62
+ assert_equal _parse_time('1970-01-01T00:00:20Z'), record.stop
63
+ assert_equal _parse_time('1970-01-01T00:00:10Z'), record.time
64
+ assert_equal 10, record.value
65
+ assert_equal 'free', record.field
66
+ assert_equal 'mem', record.measurement
67
+ end
68
+
69
+ def test_mapping_boolean
70
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
71
+ "dateTime:RFC3339,long,string,string,string,boolean\n" \
72
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
73
+ "#default,_result,,,,,,,,,true\n" \
74
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
75
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,true\n" \
76
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,false\n" \
77
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,x\n" \
78
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
79
+
80
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
81
+ records = tables[0].records
82
+
83
+ assert_equal true, records[0].values['value']
84
+ assert_equal false, records[1].values['value']
85
+ assert_equal false, records[2].values['value']
86
+ assert_equal true, records[3].values['value']
87
+ end
88
+
89
+ def test_mapping_unsigned_long
90
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
91
+ "dateTime:RFC3339,long,string,string,string,unsignedLong\n" \
92
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
93
+ "#default,_result,,,,,,,,,\n" \
94
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
95
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,17916881237904312345\n" \
96
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
97
+
98
+ expected = 17_916_881_237_904_312_345
99
+
100
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
101
+ records = tables[0].records
102
+
103
+ assert_equal expected, records[0].values['value']
104
+ assert_nil records[1].values['value']
105
+ end
106
+
107
+ def test_mapping_double
108
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
109
+ "dateTime:RFC3339,long,string,string,string,double\n" \
110
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
111
+ "#default,_result,,,,,,,,,\n" \
112
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
113
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,12.25\n" \
114
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n" \
115
+
116
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
117
+ records = tables[0].records
118
+
119
+ assert_equal 12.25, records[0].values['value']
120
+ assert_nil records[1].values['value']
121
+ end
122
+
123
+ def test_mapping_base64_binary
124
+ binary_data = 'test value'
125
+ encoded_data = Base64.encode64(binary_data)
126
+
127
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
128
+ "dateTime:RFC3339,long,string,string,string,base64Binary\n" \
129
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
130
+ "#default,_result,,,,,,,,,\n" \
131
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
132
+ ',,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,' + encoded_data + "\n" \
133
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
134
+
135
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
136
+ records = tables[0].records
137
+
138
+ value = records[0].values['value']
139
+
140
+ assert !value.nil?
141
+ assert_equal binary_data, value
142
+
143
+ assert_nil records[1].values['value']
144
+ end
145
+
146
+ def test_mapping_rfc3339
147
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
148
+ "dateTime:RFC3339,long,string,string,string,dateTime:RFC3339\n" \
149
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
150
+ "#default,_result,,,,,,,,,\n" \
151
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
152
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,1970-01-01T00:00:10Z\n" \
153
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
154
+
155
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
156
+ records = tables[0].records
157
+
158
+ assert_equal Time.parse('1970-01-01T00:00:10Z').to_datetime.rfc3339, records[0].values['value']
159
+ assert_nil records[1].values['value']
160
+ end
161
+
162
+ def test_mapping_duration
163
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339' \
164
+ ",dateTime:RFC3339,long,string,string,string,duration\n" \
165
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
166
+ "#default,_result,,,,,,,,,\n" \
167
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
168
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,125\n" \
169
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
170
+
171
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
172
+ records = tables[0].records
173
+
174
+ assert_equal 125, records[0].values['value']
175
+ assert_nil records[1].values['value']
176
+ end
177
+
178
+ def test_group_key
179
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
180
+ "dateTime:RFC3339,long,string,string,string,duration\n" \
181
+ "#group,false,false,false,false,true,false,false,false,false,true\n" \
182
+ "#default,_result,,,,,,,,,\n" \
183
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
184
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,125\n" \
185
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n" \
186
+
187
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
188
+
189
+ assert_equal 10, tables[0].columns.size
190
+ assert_equal 2, tables[0].group_key.size
191
+ end
192
+
193
+ def test_unknown_type_as_string
194
+ data = '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,' \
195
+ "dateTime:RFC3339,long,string,string,string,unknown\n" \
196
+ "#group,false,false,false,false,false,false,false,false,false,true\n" \
197
+ "#default,_result,,,,,,,,,\n" \
198
+ ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
199
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,12.25\n" \
200
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
201
+
202
+ tables = InfluxDB2::FluxCsvParser.new(data).parse.tables
203
+ records = tables[0].records
204
+
205
+ assert_equal '12.25', records[0].values['value']
206
+ assert_nil records[1].values['value']
207
+ end
208
+
209
+ private
210
+
211
+ def _parse_time(time)
212
+ Time.parse(time).to_datetime.rfc3339
213
+ end
214
+
215
+ def _assert_record(flux_record, values: nil, size: 0, value: nil)
216
+ values.keys.each do |key|
217
+ assert_equal values[key], flux_record.values[key]
218
+ end
219
+
220
+ if value.nil?
221
+ assert_nil value
222
+ else
223
+ assert_equal value, flux_record.value
224
+ end
225
+
226
+ assert_equal size, flux_record.values.size
227
+ end
228
+
229
+ def _assert_columns(column_headers, values: nil)
230
+ i = 0
231
+ values.each do |value|
232
+ assert_equal value, column_headers[i].group
233
+ i += 1
234
+ end
235
+ end
236
+
237
+ def _assert_multiple_record(tables)
238
+ # Record 1
239
+ table_records = tables[0].records
240
+ assert_equal 1, table_records.size
241
+
242
+ values = { 'table' => 0, 'host' => 'A', 'region' => 'west', 'value1' => 11, '_value2' => 121,
243
+ 'value_str' => 'test' }
244
+
245
+ _assert_record(table_records[0], values: values, size: 11)
246
+
247
+ # Record 2
248
+ table_records = tables[1].records
249
+ assert_equal 1, table_records.size
250
+
251
+ values = { 'table' => 1, 'host' => 'B', 'region' => 'west', 'value1' => 22, '_value2' => 484,
252
+ 'value_str' => 'test' }
253
+
254
+ _assert_record(table_records[0], values: values, size: 11)
255
+
256
+ # Record 3
257
+ table_records = tables[2].records
258
+ assert_equal 1, table_records.size
259
+
260
+ values = { 'table' => 2, 'host' => 'A', 'region' => 'west', 'value1' => 38, '_value2' => 1444,
261
+ 'value_str' => 'test' }
262
+
263
+ _assert_record(table_records[0], values: values, size: 11)
264
+
265
+ # Record 4
266
+ table_records = tables[3].records
267
+ assert_equal 1, table_records.size
268
+
269
+ values = { 'table' => 3, 'host' => 'A', 'region' => 'west', 'value1' => 49, '_value2' => 2401,
270
+ 'value_str' => 'test' }
271
+
272
+ _assert_record(table_records[0], values: values, size: 11)
273
+ end
274
+ end
275
+
276
+ class FluxCsvParserErrorTest < MiniTest::Test
277
+ def test_error
278
+ data = "#datatype,string,string\n" \
279
+ "#group,true,true\n" \
280
+ "#default,,\n" \
281
+ ",error,reference\n" \
282
+ ',failed to create physical plan: invalid time bounds from procedure from: bounds contain zero time,897'
283
+
284
+ parser = InfluxDB2::FluxCsvParser.new(data)
285
+
286
+ error = assert_raises InfluxDB2::FluxQueryError do
287
+ parser.parse
288
+ end
289
+
290
+ assert_equal 'failed to create physical plan: invalid time bounds from procedure from: bounds contain zero time',
291
+ error.message
292
+ assert_equal 897, error.reference
293
+ end
294
+
295
+ def test_error_without_reference
296
+ data = "#datatype,string,string\n" \
297
+ "#group,true,true\n" \
298
+ "#default,,\n" \
299
+ ",error,reference\n" \
300
+ ',failed to create physical plan: invalid time bounds from procedure from: bounds contain zero time,'
301
+
302
+ parser = InfluxDB2::FluxCsvParser.new(data)
303
+
304
+ error = assert_raises InfluxDB2::FluxQueryError do
305
+ parser.parse
306
+ end
307
+
308
+ assert_equal 'failed to create physical plan: invalid time bounds from procedure from: bounds contain zero time',
309
+ error.message
310
+ assert_equal 0, error.reference
311
+ end
312
+
313
+ def test_without_table_definition
314
+ data = ",result,table,_start,_stop,_time,_value,_field,_measurement,host,value\n" \
315
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,12.25\n" \
316
+ ",,0,1970-01-01T00:00:10Z,1970-01-01T00:00:20Z,1970-01-01T00:00:10Z,10,free,mem,A,\n"
317
+
318
+ parser = InfluxDB2::FluxCsvParser.new(data)
319
+
320
+ error = assert_raises InfluxDB2::FluxCsvParserError do
321
+ parser.parse
322
+ end
323
+
324
+ assert_equal 'Unable to parse CSV response. FluxTable definition was not found.', error.message
325
+ end
326
+ end