blazer 2.4.0 → 2.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -236,3 +236,7 @@ h2 {
236
236
  .schema-table {
237
237
  max-width: 500px;
238
238
  }
239
+
240
+ .selectize-dropdown .create {
241
+ padding: 5px 8px;
242
+ }
@@ -179,6 +179,7 @@ module Blazer
179
179
  if params[:commit] == "Fork"
180
180
  @query = Blazer::Query.new
181
181
  @query.creator = blazer_user if @query.respond_to?(:creator)
182
+ @query.status = "active" if @query.respond_to?(:status)
182
183
  end
183
184
  unless @query.editable?(blazer_user)
184
185
  @query.errors.add(:base, "Sorry, permission denied")
@@ -9,7 +9,7 @@ module Blazer
9
9
  validates :statement, presence: true
10
10
 
11
11
  scope :active, -> { column_names.include?("status") ? where(status: "active") : all }
12
- scope :named, -> { where("blazer_queries.name <> ''") }
12
+ scope :named, -> { where.not(name: "") }
13
13
 
14
14
  def to_param
15
15
  [id, name].compact.join("-").gsub("'", "").parameterize
@@ -42,9 +42,9 @@ module Blazer
42
42
  end
43
43
  end
44
44
  end
45
- rescue => e
46
- error = e.message
47
- end
45
+ rescue => e
46
+ error = e.message
47
+ end
48
48
 
49
49
  [columns, rows, error]
50
50
  end
@@ -0,0 +1,45 @@
1
+ module Blazer
2
+ module Adapters
3
+ class HiveAdapter < BaseAdapter
4
+ def run_statement(statement, comment)
5
+ columns = []
6
+ rows = []
7
+ error = nil
8
+
9
+ begin
10
+ result = client.execute("#{statement} /*#{comment}*/")
11
+ columns = result.any? ? result.first.keys : []
12
+ rows = result.map(&:values)
13
+ rescue => e
14
+ error = e.message
15
+ end
16
+
17
+ [columns, rows, error]
18
+ end
19
+
20
+ def tables
21
+ client.execute("SHOW TABLES").map { |r| r["tab_name"] }
22
+ end
23
+
24
+ def preview_statement
25
+ "SELECT * FROM {table} LIMIT 10"
26
+ end
27
+
28
+ protected
29
+
30
+ def client
31
+ @client ||= begin
32
+ uri = URI.parse(settings["url"])
33
+ Hexspace::Client.new(
34
+ host: uri.host,
35
+ port: uri.port,
36
+ username: uri.user,
37
+ password: uri.password,
38
+ database: uri.path.sub(/\A\//, ""),
39
+ mode: uri.scheme.to_sym
40
+ )
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
@@ -0,0 +1,54 @@
1
+ module Blazer
2
+ module Adapters
3
+ class IgniteAdapter < BaseAdapter
4
+ def run_statement(statement, comment)
5
+ columns = []
6
+ rows = []
7
+ error = nil
8
+
9
+ begin
10
+ result = client.query("#{statement} /*#{comment}*/", schema: default_schema, statement_type: :select, timeout: data_source.timeout)
11
+ columns = result.any? ? result.first.keys : []
12
+ rows = result.map(&:values)
13
+ rescue => e
14
+ error = e.message
15
+ end
16
+
17
+ [columns, rows, error]
18
+ end
19
+
20
+ def preview_statement
21
+ "SELECT * FROM {table} LIMIT 10"
22
+ end
23
+
24
+ def tables
25
+ sql = "SELECT table_schema, table_name FROM information_schema.tables WHERE table_schema NOT IN ('INFORMATION_SCHEMA', 'SYS')"
26
+ result = data_source.run_statement(sql)
27
+ result.rows.reject { |row| row[1].start_with?("__") }.map do |row|
28
+ (row[0] == default_schema ? row[1] : "#{row[0]}.#{row[1]}").downcase
29
+ end
30
+ end
31
+
32
+ # TODO figure out error
33
+ # Table `__T0` can be accessed only within Ignite query context.
34
+ # def schema
35
+ # sql = "SELECT table_schema, table_name, column_name, data_type, ordinal_position FROM information_schema.columns WHERE table_schema NOT IN ('INFORMATION_SCHEMA', 'SYS')"
36
+ # result = data_source.run_statement(sql)
37
+ # result.rows.group_by { |r| [r[0], r[1]] }.map { |k, vs| {schema: k[0], table: k[1], columns: vs.sort_by { |v| v[2] }.map { |v| {name: v[2], data_type: v[3]} }} }.sort_by { |t| [t[:schema] == default_schema ? "" : t[:schema], t[:table]] }
38
+ # end
39
+
40
+ private
41
+
42
+ def default_schema
43
+ "PUBLIC"
44
+ end
45
+
46
+ def client
47
+ @client ||= begin
48
+ uri = URI(settings["url"])
49
+ Ignite::Client.new(host: uri.host, port: uri.port, username: uri.user, password: uri.password)
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,9 @@
1
+ module Blazer
2
+ module Adapters
3
+ class SparkAdapter < HiveAdapter
4
+ def tables
5
+ client.execute("SHOW TABLES").map { |r| r["tableName"] }
6
+ end
7
+ end
8
+ end
9
+ end
@@ -123,7 +123,7 @@ module Blazer
123
123
  end
124
124
 
125
125
  def supports_cohort_analysis?
126
- postgresql?
126
+ postgresql? || mysql?
127
127
  end
128
128
 
129
129
  # TODO treat date columns as already in time zone
@@ -131,6 +131,27 @@ module Blazer
131
131
  raise "Cohort analysis not supported" unless supports_cohort_analysis?
132
132
 
133
133
  cohort_column = statement =~ /\bcohort_time\b/ ? "cohort_time" : "conversion_time"
134
+ tzname = Blazer.time_zone.tzinfo.name
135
+
136
+ if mysql?
137
+ time_sql = "CONVERT_TZ(cohorts.cohort_time, '+00:00', ?)"
138
+ case period
139
+ when "day"
140
+ date_sql = "CAST(DATE_FORMAT(#{time_sql}, '%Y-%m-%d') AS DATE)"
141
+ date_params = [tzname]
142
+ when "week"
143
+ date_sql = "CAST(DATE_FORMAT(#{time_sql} - INTERVAL ((5 + DAYOFWEEK(#{time_sql})) % 7) DAY, '%Y-%m-%d') AS DATE)"
144
+ date_params = [tzname, tzname]
145
+ else
146
+ date_sql = "CAST(DATE_FORMAT(#{time_sql}, '%Y-%m-01') AS DATE)"
147
+ date_params = [tzname]
148
+ end
149
+ bucket_sql = "CAST(CEIL(TIMESTAMPDIFF(SECOND, cohorts.cohort_time, query.conversion_time) / ?) AS INTEGER)"
150
+ else
151
+ date_sql = "date_trunc(?, cohorts.cohort_time::timestamptz AT TIME ZONE ?)::date"
152
+ date_params = [period, tzname]
153
+ bucket_sql = "CEIL(EXTRACT(EPOCH FROM query.conversion_time - cohorts.cohort_time) / ?)::int"
154
+ end
134
155
 
135
156
  # WITH not an optimization fence in Postgres 12+
136
157
  statement = <<~SQL
@@ -143,14 +164,14 @@ module Blazer
143
164
  GROUP BY 1
144
165
  )
145
166
  SELECT
146
- date_trunc(?, cohorts.cohort_time::timestamptz AT TIME ZONE ?)::date AS period,
167
+ #{date_sql} AS period,
147
168
  0 AS bucket,
148
169
  COUNT(DISTINCT cohorts.user_id)
149
170
  FROM cohorts GROUP BY 1
150
171
  UNION ALL
151
172
  SELECT
152
- date_trunc(?, cohorts.cohort_time::timestamptz AT TIME ZONE ?)::date AS period,
153
- CEIL(EXTRACT(EPOCH FROM query.conversion_time - cohorts.cohort_time) / ?)::int AS bucket,
173
+ #{date_sql} AS period,
174
+ #{bucket_sql} AS bucket,
154
175
  COUNT(DISTINCT query.user_id)
155
176
  FROM cohorts INNER JOIN query ON query.user_id = cohorts.user_id
156
177
  WHERE query.conversion_time IS NOT NULL
@@ -158,8 +179,7 @@ module Blazer
158
179
  #{cohort_column == "conversion_time" ? "AND query.conversion_time != cohorts.cohort_time" : ""}
159
180
  GROUP BY 1, 2
160
181
  SQL
161
- tzname = Blazer.time_zone.tzinfo.name
162
- params = [statement, period, tzname, period, tzname, days.to_i * 86400]
182
+ params = [statement] + date_params + date_params + [days.to_i * 86400]
163
183
  connection_model.send(:sanitize_sql_array, params)
164
184
  end
165
185
 
@@ -206,6 +226,8 @@ module Blazer
206
226
  "public"
207
227
  elsif sqlserver?
208
228
  "dbo"
229
+ elsif connection_model.respond_to?(:connection_db_config)
230
+ connection_model.connection_db_config.database
209
231
  else
210
232
  connection_model.connection_config[:database]
211
233
  end
@@ -144,6 +144,7 @@ module Blazer
144
144
 
145
145
  def adapter_instance
146
146
  @adapter_instance ||= begin
147
+ # TODO add required settings to adapters
147
148
  unless settings["url"] || Rails.env.development? || ["bigquery", "athena", "snowflake", "salesforce"].include?(settings["adapter"])
148
149
  raise Blazer::Error, "Empty url for data source: #{id}"
149
150
  end
@@ -182,11 +183,12 @@ module Blazer
182
183
  Blazer::Result.new(self, columns, rows, error, nil, cache && !cache_data.nil?)
183
184
  end
184
185
 
186
+ # TODO check for adapter with same name, default to sql
185
187
  def detect_adapter
186
- schema = settings["url"].to_s.split("://").first
187
- case schema
188
- when "mongodb", "presto", "cassandra"
189
- schema
188
+ scheme = settings["url"].to_s.split("://").first
189
+ case scheme
190
+ when "mongodb", "presto", "cassandra", "ignite"
191
+ scheme
190
192
  else
191
193
  "sql"
192
194
  end
data/lib/blazer/result.rb CHANGED
@@ -174,7 +174,19 @@ module Blazer
174
174
  def anomaly?(series)
175
175
  series = series.reject { |v| v[0].nil? }.sort_by { |v| v[0] }
176
176
 
177
- if Blazer.anomaly_checks == "trend"
177
+ case Blazer.anomaly_checks
178
+ when "prophet"
179
+ df = Rover::DataFrame.new(series[0..-2].map { |v| {"ds" => v[0], "y" => v[1]} })
180
+ m = Prophet.new(interval_width: 0.99)
181
+ m.logger.level = ::Logger::FATAL # no logging
182
+ m.fit(df)
183
+ future = Rover::DataFrame.new(series[-1..-1].map { |v| {"ds" => v[0]} })
184
+ forecast = m.predict(future).to_a[0]
185
+ lower = forecast["yhat_lower"]
186
+ upper = forecast["yhat_upper"]
187
+ value = series.last[1]
188
+ value < lower || value > upper
189
+ when "trend"
178
190
  anomalies = Trend.anomalies(Hash[series])
179
191
  anomalies.include?(series.last[0])
180
192
  else
@@ -1,3 +1,3 @@
1
1
  module Blazer
2
- VERSION = "2.4.0"
2
+ VERSION = "2.4.4"
3
3
  end
data/lib/blazer.rb CHANGED
@@ -18,12 +18,15 @@ require "blazer/adapters/cassandra_adapter"
18
18
  require "blazer/adapters/drill_adapter"
19
19
  require "blazer/adapters/druid_adapter"
20
20
  require "blazer/adapters/elasticsearch_adapter"
21
+ require "blazer/adapters/hive_adapter"
22
+ require "blazer/adapters/ignite_adapter"
21
23
  require "blazer/adapters/influxdb_adapter"
22
24
  require "blazer/adapters/mongodb_adapter"
23
25
  require "blazer/adapters/neo4j_adapter"
24
26
  require "blazer/adapters/presto_adapter"
25
27
  require "blazer/adapters/salesforce_adapter"
26
28
  require "blazer/adapters/soda_adapter"
29
+ require "blazer/adapters/spark_adapter"
27
30
  require "blazer/adapters/sql_adapter"
28
31
  require "blazer/adapters/snowflake_adapter"
29
32
 
@@ -239,11 +242,14 @@ Blazer.register_adapter "cassandra", Blazer::Adapters::CassandraAdapter
239
242
  Blazer.register_adapter "drill", Blazer::Adapters::DrillAdapter
240
243
  Blazer.register_adapter "druid", Blazer::Adapters::DruidAdapter
241
244
  Blazer.register_adapter "elasticsearch", Blazer::Adapters::ElasticsearchAdapter
245
+ Blazer.register_adapter "hive", Blazer::Adapters::HiveAdapter
246
+ Blazer.register_adapter "ignite", Blazer::Adapters::IgniteAdapter
242
247
  Blazer.register_adapter "influxdb", Blazer::Adapters::InfluxdbAdapter
243
248
  Blazer.register_adapter "neo4j", Blazer::Adapters::Neo4jAdapter
244
249
  Blazer.register_adapter "presto", Blazer::Adapters::PrestoAdapter
245
250
  Blazer.register_adapter "mongodb", Blazer::Adapters::MongodbAdapter
246
251
  Blazer.register_adapter "salesforce", Blazer::Adapters::SalesforceAdapter
247
252
  Blazer.register_adapter "soda", Blazer::Adapters::SodaAdapter
253
+ Blazer.register_adapter "spark", Blazer::Adapters::SparkAdapter
248
254
  Blazer.register_adapter "sql", Blazer::Adapters::SqlAdapter
249
255
  Blazer.register_adapter "snowflake", Blazer::Adapters::SnowflakeAdapter
@@ -63,11 +63,11 @@ check_schedules:
63
63
 
64
64
  # enable anomaly detection
65
65
  # note: with trend, time series are sent to https://trendapi.org
66
- # anomaly_checks: trend / r
66
+ # anomaly_checks: prophet / trend / r
67
67
 
68
68
  # enable forecasting
69
69
  # note: with trend, time series are sent to https://trendapi.org
70
- # forecasting: trend / prophet
70
+ # forecasting: prophet / trend
71
71
 
72
72
  # enable map
73
73
  # mapbox_access_token: <%%= ENV["MAPBOX_ACCESS_TOKEN"] %>
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: blazer
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.4.0
4
+ version: 2.4.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Kane
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-12-15 00:00:00.000000000 Z
11
+ date: 2021-09-16 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: railties
@@ -66,36 +66,8 @@ dependencies:
66
66
  - - ">="
67
67
  - !ruby/object:Gem::Version
68
68
  version: 0.1.1
69
- - !ruby/object:Gem::Dependency
70
- name: bundler
71
- requirement: !ruby/object:Gem::Requirement
72
- requirements:
73
- - - ">="
74
- - !ruby/object:Gem::Version
75
- version: '0'
76
- type: :development
77
- prerelease: false
78
- version_requirements: !ruby/object:Gem::Requirement
79
- requirements:
80
- - - ">="
81
- - !ruby/object:Gem::Version
82
- version: '0'
83
- - !ruby/object:Gem::Dependency
84
- name: rake
85
- requirement: !ruby/object:Gem::Requirement
86
- requirements:
87
- - - ">="
88
- - !ruby/object:Gem::Version
89
- version: '0'
90
- type: :development
91
- prerelease: false
92
- version_requirements: !ruby/object:Gem::Requirement
93
- requirements:
94
- - - ">="
95
- - !ruby/object:Gem::Version
96
- version: '0'
97
69
  description:
98
- email: andrew@chartkick.com
70
+ email: andrew@ankane.org
99
71
  executables: []
100
72
  extensions: []
101
73
  extra_rdoc_files: []
@@ -194,6 +166,8 @@ files:
194
166
  - lib/blazer/adapters/drill_adapter.rb
195
167
  - lib/blazer/adapters/druid_adapter.rb
196
168
  - lib/blazer/adapters/elasticsearch_adapter.rb
169
+ - lib/blazer/adapters/hive_adapter.rb
170
+ - lib/blazer/adapters/ignite_adapter.rb
197
171
  - lib/blazer/adapters/influxdb_adapter.rb
198
172
  - lib/blazer/adapters/mongodb_adapter.rb
199
173
  - lib/blazer/adapters/neo4j_adapter.rb
@@ -201,6 +175,7 @@ files:
201
175
  - lib/blazer/adapters/salesforce_adapter.rb
202
176
  - lib/blazer/adapters/snowflake_adapter.rb
203
177
  - lib/blazer/adapters/soda_adapter.rb
178
+ - lib/blazer/adapters/spark_adapter.rb
204
179
  - lib/blazer/adapters/sql_adapter.rb
205
180
  - lib/blazer/data_source.rb
206
181
  - lib/blazer/detect_anomalies.R
@@ -250,7 +225,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
250
225
  - !ruby/object:Gem::Version
251
226
  version: '0'
252
227
  requirements: []
253
- rubygems_version: 3.1.4
228
+ rubygems_version: 3.2.22
254
229
  signing_key:
255
230
  specification_version: 4
256
231
  summary: Explore your data with SQL. Easily create charts and dashboards, and share