efigence-influxdb 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (46) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +22 -0
  3. data/.rubocop.yml +41 -0
  4. data/.travis.yml +11 -0
  5. data/Gemfile +11 -0
  6. data/LICENSE.txt +22 -0
  7. data/README.md +362 -0
  8. data/Rakefile +22 -0
  9. data/efigence-influxdb.gemspec +28 -0
  10. data/lib/influxdb.rb +21 -0
  11. data/lib/influxdb/client.rb +77 -0
  12. data/lib/influxdb/client/http.rb +98 -0
  13. data/lib/influxdb/config.rb +60 -0
  14. data/lib/influxdb/errors.rb +40 -0
  15. data/lib/influxdb/logging.rb +22 -0
  16. data/lib/influxdb/max_queue.rb +18 -0
  17. data/lib/influxdb/point_value.rb +31 -0
  18. data/lib/influxdb/query/cluster.rb +17 -0
  19. data/lib/influxdb/query/continuous_query.rb +36 -0
  20. data/lib/influxdb/query/core.rb +109 -0
  21. data/lib/influxdb/query/database.rb +21 -0
  22. data/lib/influxdb/query/series.rb +13 -0
  23. data/lib/influxdb/query/shard.rb +14 -0
  24. data/lib/influxdb/query/shard_space.rb +60 -0
  25. data/lib/influxdb/query/user.rb +38 -0
  26. data/lib/influxdb/version.rb +3 -0
  27. data/lib/influxdb/writer/async.rb +115 -0
  28. data/lib/influxdb/writer/udp.rb +21 -0
  29. data/spec/influxdb/cases/async_client_spec.rb +33 -0
  30. data/spec/influxdb/cases/query_cluster_spec.rb +65 -0
  31. data/spec/influxdb/cases/query_database_spec.rb +58 -0
  32. data/spec/influxdb/cases/query_series_spec.rb +50 -0
  33. data/spec/influxdb/cases/query_shard_spec.rb +43 -0
  34. data/spec/influxdb/cases/query_user_spec.rb +127 -0
  35. data/spec/influxdb/cases/querying_spec.rb +159 -0
  36. data/spec/influxdb/cases/retry_requests_spec.rb +97 -0
  37. data/spec/influxdb/cases/udp_client_spec.rb +21 -0
  38. data/spec/influxdb/cases/write_points_spec.rb +141 -0
  39. data/spec/influxdb/client_spec.rb +58 -0
  40. data/spec/influxdb/config_spec.rb +118 -0
  41. data/spec/influxdb/logging_spec.rb +48 -0
  42. data/spec/influxdb/max_queue_spec.rb +29 -0
  43. data/spec/influxdb/point_value_spec.rb +66 -0
  44. data/spec/influxdb/worker_spec.rb +23 -0
  45. data/spec/spec_helper.rb +8 -0
  46. metadata +192 -0
@@ -0,0 +1,36 @@
1
+ module InfluxDB
2
+ module Query
3
+ module ContinuousQuery # :nodoc:
4
+ def continuous_queries(database)
5
+ resp = execute("SHOW CONTINUOUS QUERIES", parse: true)
6
+ fetch_series(resp).select {|v| v['name'] == database}
7
+ .fetch(0, {})
8
+ .fetch('values', [])
9
+ .map {|v| {'name' => v.first, 'query' => v.last}}
10
+ end
11
+ # # @example
12
+ # #
13
+ # # db.create_continuous_query(
14
+ # # "select mean(sys) as sys, mean(usr) as usr from cpu group by time(15m)",
15
+ # # "cpu.15m",
16
+ # # )
17
+ # #
18
+ # # NOTE: Only cluster admin can call this
19
+ # def create_continuous_query(query, name)
20
+ # query("#{query} into #{name}")
21
+ # end
22
+
23
+ # # NOTE: Only cluster admin can call this
24
+ # def list_continuous_queries
25
+ # query("list continuous queries")
26
+ # .fetch("continuous queries", [])
27
+ # .map { |l| l["query"] }
28
+ # end
29
+
30
+ # # NOTE: Only cluster admin can call this
31
+ # def delete_continuous_query(id)
32
+ # query("drop continuous query #{id}")
33
+ # end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,109 @@
1
+ module InfluxDB
2
+ module Query # :nodoc: all
3
+ # rubocop:disable Metrics/MethodLength
4
+ # rubocop:disable Metrics/AbcSize
5
+ module Core
6
+
7
+ def ping
8
+ get "/ping"
9
+ end
10
+
11
+ def query(query, opts = {})
12
+ precision = opts.fetch(:precision, config.time_precision)
13
+ denormalize = opts.fetch(:denormalize, config.denormalize)
14
+
15
+ url = full_url("/query", q: query, db: config.database, precision: precision)
16
+ resp = get(url, parse: true)
17
+ series = fetch_series(resp)
18
+
19
+ if block_given?
20
+ series.each { |s| yield s['name'], s['tags'], denormalize ? denormalize_series(s) : raw_values(s) }
21
+ else
22
+ denormalize ? list_series(series) : series
23
+ end
24
+ end
25
+
26
+ # Example:
27
+ # write_points([
28
+ # {
29
+ # series: 'cpu',
30
+ # tags: { host: 'server_nl', regios: 'us' },
31
+ # values: {internal: 5, external: 6},
32
+ # timestamp: 1422568543702900257
33
+ # },
34
+ # {
35
+ # series: 'gpu',
36
+ # values: {value: 0.9999},
37
+ # }
38
+ # ])
39
+ def write_points(data, precision = nil)
40
+ data = data.is_a?(Array) ? data : [data]
41
+ payload = generate_payload(data)
42
+ writer.write(payload, precision)
43
+ end
44
+
45
+ # Example:
46
+ # write_point('cpu', tags: {region: 'us'}, values: {internal: 60})
47
+ def write_point(series, data, precision = nil)
48
+ data.merge!(series: series)
49
+ write_points(data, precision)
50
+ end
51
+
52
+ def write(data, precision)
53
+ precision ||= config.time_precision
54
+ url = full_url("/write", db: config.database, precision: precision)
55
+ post(url, data)
56
+ end
57
+
58
+ private
59
+
60
+ def list_series(series)
61
+ series.map do |s|
62
+ {
63
+ 'name' => s['name'],
64
+ 'tags' => s['tags'],
65
+ 'values' => denormalize_series(s)
66
+ }
67
+ end
68
+ end
69
+
70
+ def fetch_series(response)
71
+ response.fetch('results', [])
72
+ .fetch(0, {})
73
+ .fetch('series', [])
74
+ end
75
+
76
+ def generate_payload(data)
77
+ data.map do |point|
78
+ InfluxDB::PointValue.new(point).dump
79
+ end.join("\n")
80
+ end
81
+
82
+ def execute(query, options={})
83
+ url = full_url("/query", q: query)
84
+ get(url, options)
85
+ end
86
+
87
+ def denormalize_series(series)
88
+ series["values"].map do |values|
89
+ Hash[series["columns"].zip(values)]
90
+ end
91
+ end
92
+
93
+ def raw_values(series)
94
+ series.select { |k,_| %w(columns values).include?(k) }
95
+ end
96
+
97
+ def full_url(path, params = {})
98
+ unless basic_auth?
99
+ params[:u] = config.username
100
+ params[:p] = config.password
101
+ end
102
+
103
+ query = params.map { |k, v| [CGI.escape(k.to_s), "=", CGI.escape(v.to_s)].join }.join("&")
104
+
105
+ URI::Generic.build(path: path, query: query).to_s
106
+ end
107
+ end
108
+ end
109
+ end
@@ -0,0 +1,21 @@
1
+ module InfluxDB
2
+ module Query
3
+ module Database # :nodoc:
4
+ def create_database(name)
5
+ execute("CREATE DATABASE #{name}")
6
+ end
7
+
8
+ def delete_database(name)
9
+ execute("DROP DATABASE #{name}")
10
+ end
11
+
12
+ def list_databases
13
+ resp = execute("SHOW DATABASES", parse: true)
14
+ fetch_series(resp).fetch(0, {})
15
+ .fetch('values', [])
16
+ .flatten
17
+ .map {|v| { 'name' => v }}
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,13 @@
1
+ module InfluxDB
2
+ module Query
3
+ module Series # :nodoc:
4
+ # def delete_series(series)
5
+ # delete full_url("/db/#{config.database}/series/#{series}")
6
+ # end
7
+
8
+ # def list_series
9
+ # query('list series').fetch('list_series_result').map { |l| l['name'] }
10
+ # end
11
+ end
12
+ end
13
+ end
@@ -0,0 +1,14 @@
1
+ module InfluxDB
2
+ module Query
3
+ module Shard # :nodoc:
4
+ # def list_shards
5
+ # get full_url("/cluster/shards")
6
+ # end
7
+
8
+ # def delete_shard(shard_id, server_ids)
9
+ # data = JSON.generate("serverIds" => server_ids)
10
+ # delete full_url("/cluster/shards/#{shard_id}"), data
11
+ # end
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,60 @@
1
+ module InfluxDB
2
+ module Query
3
+ module ShardSpace # :nodoc:
4
+ # def configure_database(database_name, options = {})
5
+ # url = full_url("/cluster/database_configs/#{database_name}")
6
+ # data = JSON.generate(default_database_configuration.merge(options))
7
+
8
+ # post(url, data)
9
+ # end
10
+
11
+ # def list_shard_spaces
12
+ # get full_url("/cluster/shard_spaces")
13
+ # end
14
+
15
+ # def shard_space_info(database_name, shard_space_name)
16
+ # list_shard_spaces.find do |shard_space|
17
+ # shard_space["database"] == database_name &&
18
+ # shard_space["name"] == shard_space_name
19
+ # end
20
+ # end
21
+
22
+ # def create_shard_space(database_name, options = {})
23
+ # url = full_url("/cluster/shard_spaces/#{database_name}")
24
+ # data = JSON.generate(default_shard_space_options.merge(options))
25
+
26
+ # post(url, data)
27
+ # end
28
+
29
+ # def delete_shard_space(database_name, shard_space_name)
30
+ # delete full_url("/cluster/shard_spaces/#{database_name}/#{shard_space_name}")
31
+ # end
32
+
33
+ # ## Get the shard space first, so the user doesn't have to specify the existing options
34
+ # def update_shard_space(database_name, shard_space_name, options)
35
+ # shard_space_options = get_shard_space(database_name, shard_space_name)
36
+ # shard_space_options.delete("database")
37
+
38
+ # url = full_url("/cluster/shard_spaces/#{database_name}/#{shard_space_name}")
39
+ # data = JSON.generate(shard_space_options.merge(options))
40
+
41
+ # post(url, data)
42
+ # end
43
+
44
+ # def default_shard_space_options
45
+ # {
46
+ # "name" => "default",
47
+ # "regEx" => "/.*/",
48
+ # "retentionPolicy" => "inf",
49
+ # "shardDuration" => "7d",
50
+ # "replicationFactor" => 1,
51
+ # "split" => 1
52
+ # }
53
+ # end
54
+
55
+ # def default_database_configuration
56
+ # { spaces: [default_shard_space_options] }
57
+ # end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,38 @@
1
+ module InfluxDB
2
+ module Query
3
+ module User # :nodoc:
4
+ # create_database_user('testdb', 'user', 'pass') => grants all privileges by default
5
+ # create_database_user('testdb', 'user', 'pass', :permissions => :read) => use [:read|:write|:all]
6
+ def create_database_user(database, username, password, options={})
7
+ permissions = options.fetch(:permissions, :all)
8
+ execute("CREATE user #{username} WITH PASSWORD '#{password}'; GRANT #{permissions.to_s.upcase} ON #{database} TO #{username}")
9
+ end
10
+
11
+ def update_user_password(username, password)
12
+ execute("SET PASSWORD FOR #{username} = '#{password}'")
13
+ end
14
+
15
+ # permission => [:read|:write|:all]
16
+ def grant_user_privileges(username, database, permission)
17
+ execute("GRANT #{permission.to_s.upcase} ON #{database} TO #{username}")
18
+ end
19
+
20
+ # permission => [:read|:write|:all]
21
+ def revoke_user_privileges(username, database, permission)
22
+ execute("REVOKE #{permission.to_s.upcase} ON #{database} FROM #{username}")
23
+ end
24
+
25
+ def delete_user(username)
26
+ execute("DROP USER #{username}")
27
+ end
28
+
29
+ # => [{"username"=>"usr", "admin"=>true}, {"username"=>"justauser", "admin"=>false}]
30
+ def list_users
31
+ resp = execute("SHOW USERS", parse: true)
32
+ fetch_series(resp).fetch(0, {})
33
+ .fetch('values', [])
34
+ .map {|v| {'username' => v.first, 'admin' => v.last}}
35
+ end
36
+ end
37
+ end
38
+ end
@@ -0,0 +1,3 @@
1
+ module InfluxDB # :nodoc:
2
+ VERSION = "0.1.0"
3
+ end
@@ -0,0 +1,115 @@
1
+ require 'thread'
2
+ require "net/http"
3
+ require "uri"
4
+
5
+ module InfluxDB
6
+ module Writer # :nodoc: all
7
+ class Async
8
+ attr_reader :config, :client
9
+
10
+ def initialize(client, config)
11
+ @client = client
12
+ @config = config
13
+ end
14
+
15
+ def write(data, precision = nil)
16
+ data = data.is_a?(Array) ? data : [data]
17
+ data.map { |p| worker.push(p) }
18
+ end
19
+
20
+ WORKER_MUTEX = Mutex.new
21
+ def worker
22
+ return @worker if @worker
23
+ WORKER_MUTEX.synchronize do
24
+ # this return is necessary because the previous mutex holder
25
+ # might have already assigned the @worker
26
+ return @worker if @worker
27
+ @worker = Worker.new(client, config)
28
+ end
29
+ end
30
+
31
+ class Worker
32
+ attr_reader :client, :queue, :threads
33
+
34
+ include InfluxDB::Logging
35
+
36
+ MAX_POST_POINTS = 1000
37
+ MAX_QUEUE_SIZE = 10_000
38
+ NUM_WORKER_THREADS = 3
39
+ SLEEP_INTERVAL = 5
40
+
41
+ def initialize(client, config)
42
+ @client = client
43
+ config = config.is_a?(Hash) ? config : {}
44
+ @queue = InfluxDB::MaxQueue.new config.fetch(:max_queue, MAX_QUEUE_SIZE)
45
+
46
+ spawn_threads!
47
+
48
+ at_exit do
49
+ log :debug, "Thread exiting, flushing queue."
50
+ check_background_queue until queue.empty?
51
+ end
52
+ end
53
+
54
+ def push(payload)
55
+ queue.push(payload)
56
+ end
57
+
58
+ def current_threads
59
+ Thread.list.select { |t| t[:influxdb] == object_id }
60
+ end
61
+
62
+ def current_thread_count
63
+ Thread.list.count { |t| t[:influxdb] == object_id }
64
+ end
65
+
66
+ # rubocop:disable Metrics/CyclomaticComplexity
67
+ # rubocop:disable Metrics/MethodLength
68
+ # rubocop:disable Metrics/AbcSize
69
+
70
+ def spawn_threads!
71
+ @threads = []
72
+ NUM_WORKER_THREADS.times do |thread_num|
73
+ log :debug, "Spawning background worker thread #{thread_num}."
74
+
75
+ @threads << Thread.new do
76
+ Thread.current[:influxdb] = object_id
77
+
78
+ until client.stopped?
79
+ check_background_queue(thread_num)
80
+ sleep rand(SLEEP_INTERVAL)
81
+ end
82
+
83
+ log :debug, "Exit background worker thread #{thread_num}."
84
+ end
85
+ end
86
+ end
87
+
88
+ def check_background_queue(thread_num = 0)
89
+ log :debug,
90
+ "Checking background queue on thread #{thread_num} (#{current_thread_count} active)"
91
+
92
+ loop do
93
+ data = []
94
+
95
+ while data.size < MAX_POST_POINTS && !queue.empty?
96
+ p = queue.pop(true) rescue next
97
+ data.push p
98
+ end
99
+
100
+ return if data.empty?
101
+
102
+ begin
103
+ log :debug, "Found data in the queue! (#{data.length} points)"
104
+ client.write(data)
105
+ rescue => e
106
+ puts "Cannot write data: #{e.inspect}"
107
+ end
108
+
109
+ break if queue.length > MAX_POST_POINTS
110
+ end
111
+ end
112
+ end
113
+ end
114
+ end
115
+ end
@@ -0,0 +1,21 @@
1
+ module InfluxDB
2
+ module Writer
3
+ # Writes data to InfluxDB through UDP
4
+ class UDP
5
+ attr_accessor :socket
6
+ attr_reader :host, :port
7
+ def initialize(client, config)
8
+ @client = client
9
+ config = config.is_a?(Hash) ? config : {}
10
+ @host = config.fetch(:host, 'localhost')
11
+ @port = config.fetch(:port, 4444)
12
+ self.socket = UDPSocket.new
13
+ socket.connect(host, port)
14
+ end
15
+
16
+ def write(payload, precision = nil)
17
+ socket.send(payload, 0)
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,33 @@
1
+ require "spec_helper"
2
+ require "timeout"
3
+
4
+ describe InfluxDB::Client do
5
+ let(:subject) { described_class.new(async: true) }
6
+
7
+ let(:worker_klass) { InfluxDB::Writer::Async::Worker }
8
+
9
+ specify { expect(subject.writer).to be_a(InfluxDB::Writer::Async) }
10
+
11
+ describe "#write_point" do
12
+ let(:payload) { "responses,region=eu value=5" }
13
+
14
+ it "sends writes to client" do
15
+ # exact times can be 2 or 3 (because we have 3 worker threads),
16
+ # but cannot be less than 2 due to MAX_POST_POINTS limit
17
+ expect(subject).to(receive(:write)).at_least(2).times
18
+
19
+ (worker_klass::MAX_POST_POINTS + 100).times do
20
+ subject.write_point('a', {})
21
+ end
22
+
23
+ Timeout.timeout(2 * worker_klass::SLEEP_INTERVAL) do
24
+ subject.stop!
25
+ # ensure threads exit
26
+ subject.writer.worker.threads.each(&:join)
27
+
28
+ # flush queue (we cannot test `at_exit`)
29
+ subject.writer.worker.check_background_queue
30
+ end
31
+ end
32
+ end
33
+ end