influxdb 0.1.9 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.rubocop.yml +41 -0
  4. data/.travis.yml +3 -2
  5. data/Gemfile +7 -1
  6. data/README.md +218 -102
  7. data/Rakefile +2 -6
  8. data/lib/influxdb.rb +15 -5
  9. data/lib/influxdb/client.rb +38 -433
  10. data/lib/influxdb/client/http.rb +123 -0
  11. data/lib/influxdb/config.rb +66 -0
  12. data/lib/influxdb/errors.rb +8 -2
  13. data/lib/influxdb/{logger.rb → logging.rb} +6 -5
  14. data/lib/influxdb/max_queue.rb +2 -1
  15. data/lib/influxdb/point_value.rb +27 -25
  16. data/lib/influxdb/query/cluster.rb +17 -0
  17. data/lib/influxdb/query/continuous_query.rb +22 -0
  18. data/lib/influxdb/query/core.rb +110 -0
  19. data/lib/influxdb/query/database.rb +21 -0
  20. data/lib/influxdb/query/retention_policy.rb +26 -0
  21. data/lib/influxdb/query/user.rb +41 -0
  22. data/lib/influxdb/version.rb +2 -2
  23. data/lib/influxdb/writer/async.rb +115 -0
  24. data/lib/influxdb/writer/udp.rb +21 -0
  25. data/spec/influxdb/cases/async_client_spec.rb +33 -0
  26. data/spec/influxdb/cases/query_cluster_spec.rb +65 -0
  27. data/spec/influxdb/cases/query_continuous_query_spec.rb +82 -0
  28. data/spec/influxdb/cases/query_core.rb +34 -0
  29. data/spec/influxdb/cases/query_database_spec.rb +58 -0
  30. data/spec/influxdb/cases/query_retention_policy_spec.rb +84 -0
  31. data/spec/influxdb/cases/query_series_spec.rb +50 -0
  32. data/spec/influxdb/cases/query_shard_space_spec.rb +105 -0
  33. data/spec/influxdb/cases/query_shard_spec.rb +43 -0
  34. data/spec/influxdb/cases/query_user_spec.rb +127 -0
  35. data/spec/influxdb/cases/querying_spec.rb +149 -0
  36. data/spec/influxdb/cases/retry_requests_spec.rb +102 -0
  37. data/spec/influxdb/cases/udp_client_spec.rb +21 -0
  38. data/spec/influxdb/cases/write_points_spec.rb +140 -0
  39. data/spec/influxdb/client_spec.rb +37 -810
  40. data/spec/influxdb/config_spec.rb +118 -0
  41. data/spec/influxdb/{logger_spec.rb → logging_spec.rb} +4 -8
  42. data/spec/influxdb/max_queue_spec.rb +29 -0
  43. data/spec/influxdb/point_value_spec.rb +81 -14
  44. data/spec/influxdb/worker_spec.rb +8 -11
  45. data/spec/spec_helper.rb +7 -10
  46. metadata +65 -30
  47. data/lib/influxdb/udp_client.rb +0 -16
  48. data/lib/influxdb/worker.rb +0 -80
  49. data/spec/influxdb/udp_client_spec.rb +0 -33
  50. data/spec/influxdb_spec.rb +0 -4
  51. data/spec/max_queue_spec.rb +0 -32
data/Rakefile CHANGED
@@ -15,12 +15,8 @@ RSpec.configure do |config|
15
15
  config.formatter = :documentation
16
16
  end
17
17
 
18
- task :default => :spec
18
+ task default: :spec
19
19
 
20
20
  task :console do
21
- require 'irb'
22
- require 'irb/completion'
23
- require 'influxdb'
24
- ARGV.clear
25
- IRB.start
21
+ sh 'pry -r ./lib/influxdb.rb'
26
22
  end
@@ -1,9 +1,19 @@
1
1
  require "influxdb/version"
2
2
  require "influxdb/errors"
3
- require "influxdb/logger"
4
-
3
+ require "influxdb/logging"
5
4
  require "influxdb/max_queue"
6
- require "influxdb/worker"
7
- require "influxdb/client"
8
- require "influxdb/udp_client"
9
5
  require "influxdb/point_value"
6
+ require "influxdb/config"
7
+
8
+ require "influxdb/writer/async"
9
+ require "influxdb/writer/udp"
10
+
11
+ require "influxdb/query/core"
12
+ require "influxdb/query/cluster"
13
+ require "influxdb/query/database"
14
+ require "influxdb/query/user"
15
+ require "influxdb/query/continuous_query"
16
+ require "influxdb/query/retention_policy"
17
+
18
+ require "influxdb/client/http"
19
+ require "influxdb/client"
@@ -1,329 +1,70 @@
1
- require 'uri'
2
- require 'cgi'
3
- require 'net/http'
4
- require 'net/https'
5
1
  require 'json'
6
2
  require 'cause'
7
3
 
8
4
  module InfluxDB
9
- class Client
10
- attr_accessor :hosts,
11
- :port,
12
- :username,
13
- :password,
14
- :database,
15
- :time_precision,
16
- :auth_method,
17
- :use_ssl,
18
- :verify_ssl,
19
- :ssl_ca_cert,
20
- :stopped
5
+ # rubocop:disable Metrics/MethodLength
6
+ # rubocop:disable Metrics/AbcSize
21
7
 
22
- attr_accessor :queue, :worker, :udp_client
8
+ # InfluxDB client class
9
+ class Client
10
+ attr_reader :config, :writer
23
11
 
24
12
  include InfluxDB::Logging
13
+ include InfluxDB::HTTP
14
+ include InfluxDB::Query::Core
15
+ include InfluxDB::Query::Cluster
16
+ include InfluxDB::Query::Database
17
+ include InfluxDB::Query::User
18
+ include InfluxDB::Query::ContinuousQuery
19
+ include InfluxDB::Query::RetentionPolicy
25
20
 
26
21
  # Initializes a new InfluxDB client
27
22
  #
28
23
  # === Examples:
29
24
  #
30
- # InfluxDB::Client.new # connect to localhost using root/root
31
- # # as the credentials and doesn't connect to a db
25
+ # # connect to localhost using root/root
26
+ # # as the credentials and doesn't connect to a db
27
+ #
28
+ # InfluxDB::Client.new
29
+ #
30
+ # # connect to localhost using root/root
31
+ # # as the credentials and 'db' as the db name
32
32
  #
33
- # InfluxDB::Client.new 'db' # connect to localhost using root/root
34
- # # as the credentials and 'db' as the db name
33
+ # InfluxDB::Client.new 'db'
35
34
  #
36
- # InfluxDB::Client.new :username => 'username' # override username, other defaults remain unchanged
35
+ # # override username, other defaults remain unchanged
37
36
  #
38
- # Influxdb::Client.new 'db', :username => 'username' # override username, use 'db' as the db name
37
+ # InfluxDB::Client.new username: 'username'
39
38
  #
40
- # Influxdb::Client.new 'db', :path => '/prefix' # use the specified path prefix when building the
41
- # # url e.g.: /prefix/db/dbname...
39
+ # # override username, use 'db' as the db name
40
+ # Influxdb::Client.new 'db', username: 'username'
42
41
  #
43
42
  # === Valid options in hash
44
43
  #
45
44
  # +:host+:: the hostname to connect to
46
45
  # +:port+:: the port to connect to
46
+ # +:prefix+:: the specified path prefix when building the url e.g.: /prefix/db/dbname...
47
47
  # +:username+:: the username to use when executing commands
48
48
  # +:password+:: the password associated with the username
49
- # +:use_ssl+:: use ssl to connect?
49
+ # +:use_ssl+:: use ssl to connect
50
50
  # +:verify_ssl+:: verify ssl server certificate?
51
- # +:ssl_ca_cert+:: ssl CA certificate, chainfile or CA path. The system CA path is automatically included.
52
- def initialize *args
53
- @database = args.first if args.first.is_a? String
51
+ # +:ssl_ca_cert+:: ssl CA certificate, chainfile or CA path.
52
+ # The system CA path is automatically included
53
+ def initialize(*args)
54
54
  opts = args.last.is_a?(Hash) ? args.last : {}
55
- @hosts = Array(opts[:hosts] || opts[:host] || ["localhost"])
56
- @port = opts[:port] || 8086
57
- @path = opts[:path] || ""
58
- @username = opts[:username] || "root"
59
- @password = opts[:password] || "root"
60
- @auth_method = %w{params basic_auth}.include?(opts[:auth_method]) ? opts[:auth_method] : "params"
61
- @use_ssl = opts[:use_ssl] || false
62
- @verify_ssl = opts.fetch(:verify_ssl, true)
63
- @ssl_ca_cert = opts[:ssl_ca_cert] || false
64
- @time_precision = opts[:time_precision] || "s"
65
- @initial_delay = opts[:initial_delay] || 0.01
66
- @max_delay = opts[:max_delay] || 30
67
- @open_timeout = opts[:write_timeout] || 5
68
- @read_timeout = opts[:read_timeout] || 300
69
- @async = opts[:async] || false
70
- @retry = opts.fetch(:retry, nil)
71
- @retry = case @retry
72
- when Integer
73
- @retry
74
- when true, nil
75
- -1
76
- when false
77
- 0
78
- end
79
-
80
- @worker = InfluxDB::Worker.new(self) if @async
81
- self.udp_client = opts[:udp] ? InfluxDB::UDPClient.new(opts[:udp][:host], opts[:udp][:port]) : nil
82
-
83
- at_exit { stop! } if @retry > 0
84
- end
85
-
86
- def ping
87
- get "/ping"
88
- end
89
-
90
- ## allow options, e.g. influxdb.create_database('foo', replicationFactor: 3)
91
- def create_database(name, options = {})
92
- url = full_url("/cluster/database_configs/#{name}")
93
- data = JSON.generate(options)
94
- post(url, data)
95
- end
96
-
97
- def delete_database(name)
98
- delete full_url("/db/#{name}")
99
- end
100
-
101
- def get_database_list
102
- get full_url("/db")
103
- end
104
-
105
- def authenticate_cluster_admin
106
- get(full_url('/cluster_admins/authenticate'), true)
107
- end
108
-
109
- def create_cluster_admin(username, password)
110
- url = full_url("/cluster_admins")
111
- data = JSON.generate({:name => username, :password => password})
112
- post(url, data)
113
- end
114
-
115
- def update_cluster_admin(username, password)
116
- url = full_url("/cluster_admins/#{username}")
117
- data = JSON.generate({:password => password})
118
- post(url, data)
119
- end
120
-
121
- def delete_cluster_admin(username)
122
- delete full_url("/cluster_admins/#{username}")
123
- end
124
-
125
- def get_cluster_admin_list
126
- get full_url("/cluster_admins")
127
- end
128
-
129
- def authenticate_database_user(database)
130
- get(full_url("/db/#{database}/authenticate"), true)
131
- end
132
-
133
- def create_database_user(database, username, password, options={})
134
- url = full_url("/db/#{database}/users")
135
- data = JSON.generate({:name => username, :password => password}.merge(options))
136
- post(url, data)
137
- end
138
-
139
- def update_database_user(database, username, options = {})
140
- url = full_url("/db/#{database}/users/#{username}")
141
- data = JSON.generate(options)
142
- post(url, data)
143
- end
144
-
145
- def delete_database_user(database, username)
146
- delete full_url("/db/#{database}/users/#{username}")
147
- end
148
-
149
- def get_database_user_list(database)
150
- get full_url("/db/#{database}/users")
151
- end
152
-
153
- def get_database_user_info(database, username)
154
- get full_url("/db/#{database}/users/#{username}")
155
- end
156
-
157
- def alter_database_privilege(database, username, admin=true)
158
- update_database_user(database, username, :admin => admin)
159
- end
160
-
161
- # NOTE: Only cluster admin can call this
162
- def continuous_queries(database)
163
- get full_url("/db/#{database}/continuous_queries")
164
- end
165
-
166
- def get_shard_list()
167
- get full_url("/cluster/shards")
168
- end
169
-
170
- def delete_shard(shard_id, server_ids)
171
- data = JSON.generate({"serverIds" => server_ids})
172
- delete full_url("/cluster/shards/#{shard_id}"), data
173
- end
174
-
175
- # EXAMPLE:
176
- #
177
- # db.create_continuous_query(
178
- # "select mean(sys) as sys, mean(usr) as usr from cpu group by time(15m)",
179
- # "cpu.15m",
180
- # )
181
- #
182
- # NOTE: Only cluster admin can call this
183
- def create_continuous_query(query, name)
184
- query("#{query} into #{name}")
185
- end
186
-
187
- # NOTE: Only cluster admin can call this
188
- def get_continuous_query_list
189
- query("list continuous queries")
190
- end
191
-
192
- # NOTE: Only cluster admin can call this
193
- def delete_continuous_query(id)
194
- query("drop continuous query #{id}")
195
- end
196
-
197
- def get_shard_space_list
198
- get full_url("/cluster/shard_spaces")
199
- end
200
-
201
- def get_shard_space(database_name, shard_space_name)
202
- get_shard_space_list.find do |shard_space|
203
- shard_space["database"] == database_name &&
204
- shard_space["name"] == shard_space_name
205
- end
206
- end
207
-
208
- def create_shard_space(database_name, options = {})
209
- url = full_url("/cluster/shard_spaces/#{database_name}")
210
- data = JSON.generate(default_shard_space_options.merge(options))
211
-
212
- post(url, data)
213
- end
55
+ opts[:database] = args.first if args.first.is_a? String
56
+ @config = InfluxDB::Config.new(opts)
57
+ @stopped = false
214
58
 
215
- def delete_shard_space(database_name, shard_space_name)
216
- delete full_url("/cluster/shard_spaces/#{database_name}/#{shard_space_name}")
217
- end
218
-
219
- ## Get the shard space first, so the user doesn't have to specify the existing options
220
- def update_shard_space(database_name, shard_space_name, options)
221
- shard_space_options = get_shard_space(database_name, shard_space_name)
222
- shard_space_options.delete("database")
223
-
224
- url = full_url("/cluster/shard_spaces/#{database_name}/#{shard_space_name}")
225
- data = JSON.generate(shard_space_options.merge(options))
226
-
227
- post(url, data)
228
- end
229
-
230
- def default_shard_space_options
231
- {
232
- "name" => "default",
233
- "regEx" => "/.*/",
234
- "retentionPolicy" => "inf",
235
- "shardDuration" => "7d",
236
- "replicationFactor" => 1,
237
- "split" => 1
238
- }
239
- end
240
-
241
- def configure_database(database_name, options = {})
242
- url = full_url("/cluster/database_configs/#{database_name}")
243
- data = JSON.generate(default_database_configuration.merge(options))
59
+ @writer = self
244
60
 
245
- post(url, data)
246
- end
247
-
248
- def default_database_configuration
249
- {:spaces => [default_shard_space_options]}
250
- end
251
-
252
- def write_point(name, data, async=@async, time_precision=@time_precision)
253
- write_points([{:name => name, :data => data}], async, time_precision)
254
- end
255
-
256
- # Example:
257
- # db.write_points(
258
- # [
259
- # {
260
- # name: 'first_name',
261
- # data: {
262
- # value: 'val1'
263
- # }
264
- # },
265
- # {
266
- # name: 'first_name',
267
- # data: {
268
- # value: 'val1'
269
- # }
270
- # }
271
- # ]
272
- # )
273
- def write_points(name_data_hashes_array, async=@async, time_precision=@time_precision)
274
-
275
- payloads = []
276
- name_data_hashes_array.each do |attrs|
277
- payloads << generate_payload(attrs[:name], attrs[:data])
61
+ if config.async?
62
+ @writer = InfluxDB::Writer::Async.new(self, config.async)
63
+ elsif config.udp?
64
+ @writer = InfluxDB::Writer::UDP.new(self, config.udp)
278
65
  end
279
66
 
280
- if async
281
- worker.push(payloads)
282
- elsif udp_client
283
- udp_client.send(payloads)
284
- else
285
- _write(payloads, time_precision)
286
- end
287
- end
288
-
289
- def generate_payload(name, data)
290
- data = data.is_a?(Array) ? data : [data]
291
- columns = data.reduce(:merge).keys.sort {|a,b| a.to_s <=> b.to_s}
292
- payload = {:name => name, :points => [], :columns => columns}
293
-
294
- data.each do |point|
295
- payload[:points] << columns.inject([]) do |array, column|
296
- array << InfluxDB::PointValue.new(point[column]).dump
297
- end
298
- end
299
-
300
- payload
301
- end
302
-
303
- def _write(payload, time_precision=@time_precision)
304
- url = full_url("/db/#{@database}/series", :time_precision => time_precision)
305
- data = JSON.generate(payload)
306
- post(url, data)
307
- end
308
-
309
- def query(query, time_precision=@time_precision)
310
- url = full_url("/db/#{@database}/series", :q => query, :time_precision => time_precision)
311
- series = get(url)
312
-
313
- if block_given?
314
- series.each { |s| yield s['name'], denormalize_series(s) }
315
- else
316
- series.reduce({}) do |col, s|
317
- name = s['name']
318
- denormalized_series = denormalize_series s
319
- col[name] = denormalized_series
320
- col
321
- end
322
- end
323
- end
324
-
325
- def delete_series(series)
326
- delete full_url("/db/#{@database}/series/#{series}")
67
+ at_exit { stop! } if config.retry > 0
327
68
  end
328
69
 
329
70
  def stop!
@@ -333,141 +74,5 @@ module InfluxDB
333
74
  def stopped?
334
75
  @stopped
335
76
  end
336
-
337
- private
338
-
339
- def full_url(path, params={})
340
- unless basic_auth?
341
- params[:u] = @username
342
- params[:p] = @password
343
- end
344
-
345
- query = params.map { |k, v| [CGI.escape(k.to_s), "=", CGI.escape(v.to_s)].join }.join("&")
346
-
347
- URI::Generic.build(:path => "#{@path}#{path}", :query => query).to_s
348
- end
349
-
350
- def basic_auth?
351
- @auth_method == 'basic_auth'
352
- end
353
-
354
- def get(url, return_response = false)
355
- connect_with_retry do |http|
356
- request = Net::HTTP::Get.new(url)
357
- request.basic_auth @username, @password if basic_auth?
358
- response = http.request(request)
359
- if response.kind_of? Net::HTTPSuccess
360
- if return_response
361
- return response
362
- else
363
- return JSON.parse(response.body)
364
- end
365
- elsif response.kind_of? Net::HTTPUnauthorized
366
- raise InfluxDB::AuthenticationError.new response.body
367
- else
368
- raise InfluxDB::Error.new response.body
369
- end
370
- end
371
- end
372
-
373
- def post(url, data)
374
- headers = {"Content-Type" => "application/json"}
375
- connect_with_retry do |http|
376
- request = Net::HTTP::Post.new(url, headers)
377
- request.basic_auth @username, @password if basic_auth?
378
- response = http.request(request, data)
379
- if response.kind_of? Net::HTTPSuccess
380
- return response
381
- elsif response.kind_of? Net::HTTPUnauthorized
382
- raise InfluxDB::AuthenticationError.new response.body
383
- else
384
- raise InfluxDB::Error.new response.body
385
- end
386
- end
387
- end
388
-
389
- def delete(url, data = nil)
390
- headers = {"Content-Type" => "application/json"}
391
- connect_with_retry do |http|
392
- request = Net::HTTP::Delete.new(url, headers)
393
- request.basic_auth @username, @password if basic_auth?
394
- response = http.request(request, data)
395
- if response.kind_of? Net::HTTPSuccess
396
- return response
397
- elsif response.kind_of? Net::HTTPUnauthorized
398
- raise InfluxDB::AuthenticationError.new response.body
399
- else
400
- raise InfluxDB::Error.new response.body
401
- end
402
- end
403
- end
404
-
405
- def connect_with_retry(&block)
406
- hosts = @hosts.dup
407
- delay = @initial_delay
408
- retry_count = 0
409
-
410
- begin
411
- hosts.push(host = hosts.shift)
412
- http = Net::HTTP.new(host, @port)
413
- http.open_timeout = @open_timeout
414
- http.read_timeout = @read_timeout
415
- http.use_ssl = @use_ssl
416
- http.verify_mode = OpenSSL::SSL::VERIFY_NONE unless @verify_ssl
417
-
418
- if @use_ssl
419
- store = OpenSSL::X509::Store.new
420
- store.set_default_paths
421
- if @ssl_ca_cert
422
- if File.directory?(@ssl_ca_cert)
423
- store.add_path(@ssl_ca_cert)
424
- else
425
- store.add_file(@ssl_ca_cert)
426
- end
427
- end
428
- http.cert_store = store
429
- end
430
-
431
- block.call(http)
432
-
433
- rescue Timeout::Error, *InfluxDB::NET_HTTP_EXCEPTIONS => e
434
- retry_count += 1
435
- if (@retry == -1 or retry_count <= @retry) and !stopped?
436
- log :error, "Failed to contact host #{host}: #{e.inspect} - retrying in #{delay}s."
437
- log :info, "Queue size is #{@queue.length}." unless @queue.nil?
438
- sleep delay
439
- delay = [@max_delay, delay * 2].min
440
- retry
441
- else
442
- raise InfluxDB::ConnectionError, "Tried #{retry_count-1} times to reconnect but failed."
443
- end
444
- ensure
445
- http.finish if http.started?
446
- end
447
- end
448
-
449
- def denormalize_series series
450
- columns = series['columns']
451
-
452
- h = Hash.new(-1)
453
- columns = columns.map {|v| h[v] += 1; h[v] > 0 ? "#{v}~#{h[v]}" : v }
454
-
455
- series['points'].map do |point|
456
- decoded_point = point.map do |value|
457
- InfluxDB::PointValue.new(value).load
458
- end
459
- Hash[columns.zip(decoded_point)]
460
- end
461
- end
462
-
463
- WORKER_MUTEX = Mutex.new
464
- def worker
465
- return @worker if @worker
466
- WORKER_MUTEX.synchronize do
467
- #this return is necessary because the previous mutex holder might have already assigned the @worker
468
- return @worker if @worker
469
- @worker = InfluxDB::Worker.new(self)
470
- end
471
- end
472
77
  end
473
78
  end