sql-jarvis 1.8.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +14 -0
- data/CHANGELOG.md +228 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +22 -0
- data/README.md +775 -0
- data/Rakefile +1 -0
- data/app/assets/fonts/blazer/glyphicons-halflings-regular.eot +0 -0
- data/app/assets/fonts/blazer/glyphicons-halflings-regular.svg +288 -0
- data/app/assets/fonts/blazer/glyphicons-halflings-regular.ttf +0 -0
- data/app/assets/fonts/blazer/glyphicons-halflings-regular.woff +0 -0
- data/app/assets/fonts/blazer/glyphicons-halflings-regular.woff2 +0 -0
- data/app/assets/javascripts/blazer/Chart.js +14145 -0
- data/app/assets/javascripts/blazer/Sortable.js +1144 -0
- data/app/assets/javascripts/blazer/ace.js +6 -0
- data/app/assets/javascripts/blazer/ace/ace.js +11 -0
- data/app/assets/javascripts/blazer/ace/ext-language_tools.js +5 -0
- data/app/assets/javascripts/blazer/ace/mode-sql.js +1 -0
- data/app/assets/javascripts/blazer/ace/snippets/sql.js +1 -0
- data/app/assets/javascripts/blazer/ace/snippets/text.js +1 -0
- data/app/assets/javascripts/blazer/ace/theme-twilight.js +1 -0
- data/app/assets/javascripts/blazer/application.js +79 -0
- data/app/assets/javascripts/blazer/bootstrap.js +2366 -0
- data/app/assets/javascripts/blazer/chartkick.js +1693 -0
- data/app/assets/javascripts/blazer/daterangepicker.js +1505 -0
- data/app/assets/javascripts/blazer/fuzzysearch.js +24 -0
- data/app/assets/javascripts/blazer/highlight.pack.js +1 -0
- data/app/assets/javascripts/blazer/jquery.js +10308 -0
- data/app/assets/javascripts/blazer/jquery.stickytableheaders.js +263 -0
- data/app/assets/javascripts/blazer/jquery_ujs.js +469 -0
- data/app/assets/javascripts/blazer/moment-timezone.js +1007 -0
- data/app/assets/javascripts/blazer/moment.js +3043 -0
- data/app/assets/javascripts/blazer/queries.js +110 -0
- data/app/assets/javascripts/blazer/routes.js +23 -0
- data/app/assets/javascripts/blazer/selectize.js +3667 -0
- data/app/assets/javascripts/blazer/stupidtable.js +114 -0
- data/app/assets/javascripts/blazer/vue.js +7515 -0
- data/app/assets/stylesheets/blazer/application.css +198 -0
- data/app/assets/stylesheets/blazer/bootstrap.css.erb +6202 -0
- data/app/assets/stylesheets/blazer/daterangepicker-bs3.css +375 -0
- data/app/assets/stylesheets/blazer/github.css +125 -0
- data/app/assets/stylesheets/blazer/selectize.default.css +387 -0
- data/app/controllers/blazer/base_controller.rb +103 -0
- data/app/controllers/blazer/checks_controller.rb +56 -0
- data/app/controllers/blazer/dashboards_controller.rb +105 -0
- data/app/controllers/blazer/queries_controller.rb +325 -0
- data/app/helpers/blazer/base_helper.rb +57 -0
- data/app/mailers/blazer/check_mailer.rb +27 -0
- data/app/models/blazer/audit.rb +6 -0
- data/app/models/blazer/check.rb +95 -0
- data/app/models/blazer/connection.rb +5 -0
- data/app/models/blazer/dashboard.rb +13 -0
- data/app/models/blazer/dashboard_query.rb +9 -0
- data/app/models/blazer/query.rb +31 -0
- data/app/models/blazer/record.rb +5 -0
- data/app/views/blazer/_nav.html.erb +16 -0
- data/app/views/blazer/_variables.html.erb +102 -0
- data/app/views/blazer/check_mailer/failing_checks.html.erb +6 -0
- data/app/views/blazer/check_mailer/state_change.html.erb +47 -0
- data/app/views/blazer/checks/_form.html.erb +71 -0
- data/app/views/blazer/checks/edit.html.erb +1 -0
- data/app/views/blazer/checks/index.html.erb +40 -0
- data/app/views/blazer/checks/new.html.erb +1 -0
- data/app/views/blazer/dashboards/_form.html.erb +76 -0
- data/app/views/blazer/dashboards/edit.html.erb +1 -0
- data/app/views/blazer/dashboards/new.html.erb +1 -0
- data/app/views/blazer/dashboards/show.html.erb +47 -0
- data/app/views/blazer/queries/_form.html.erb +240 -0
- data/app/views/blazer/queries/edit.html.erb +2 -0
- data/app/views/blazer/queries/home.html.erb +152 -0
- data/app/views/blazer/queries/new.html.erb +2 -0
- data/app/views/blazer/queries/run.html.erb +163 -0
- data/app/views/blazer/queries/schema.html.erb +18 -0
- data/app/views/blazer/queries/show.html.erb +73 -0
- data/app/views/layouts/blazer/application.html.erb +24 -0
- data/blazer.gemspec +26 -0
- data/config/routes.rb +16 -0
- data/lib/blazer.rb +185 -0
- data/lib/blazer/adapters/athena_adapter.rb +128 -0
- data/lib/blazer/adapters/base_adapter.rb +53 -0
- data/lib/blazer/adapters/bigquery_adapter.rb +67 -0
- data/lib/blazer/adapters/drill_adapter.rb +28 -0
- data/lib/blazer/adapters/elasticsearch_adapter.rb +49 -0
- data/lib/blazer/adapters/mongodb_adapter.rb +39 -0
- data/lib/blazer/adapters/presto_adapter.rb +45 -0
- data/lib/blazer/adapters/sql_adapter.rb +182 -0
- data/lib/blazer/data_source.rb +193 -0
- data/lib/blazer/detect_anomalies.R +19 -0
- data/lib/blazer/engine.rb +47 -0
- data/lib/blazer/result.rb +170 -0
- data/lib/blazer/run_statement.rb +40 -0
- data/lib/blazer/run_statement_job.rb +21 -0
- data/lib/blazer/version.rb +3 -0
- data/lib/generators/blazer/install_generator.rb +39 -0
- data/lib/generators/blazer/templates/config.yml +62 -0
- data/lib/generators/blazer/templates/install.rb +45 -0
- data/lib/tasks/blazer.rake +10 -0
- metadata +211 -0
@@ -0,0 +1,128 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class AthenaAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
require "digest/md5"
|
6
|
+
|
7
|
+
columns = []
|
8
|
+
rows = []
|
9
|
+
error = nil
|
10
|
+
|
11
|
+
begin
|
12
|
+
resp =
|
13
|
+
client.start_query_execution(
|
14
|
+
query_string: statement,
|
15
|
+
# use token so we fetch cached results after query is run
|
16
|
+
client_request_token: Digest::MD5.hexdigest(statement),
|
17
|
+
query_execution_context: {
|
18
|
+
database: database,
|
19
|
+
},
|
20
|
+
result_configuration: {
|
21
|
+
output_location: settings["output_location"]
|
22
|
+
}
|
23
|
+
)
|
24
|
+
query_execution_id = resp.query_execution_id
|
25
|
+
|
26
|
+
timeout = data_source.timeout || 300
|
27
|
+
stop_at = Time.now + timeout
|
28
|
+
resp = nil
|
29
|
+
|
30
|
+
begin
|
31
|
+
resp = client.get_query_results(
|
32
|
+
query_execution_id: query_execution_id
|
33
|
+
)
|
34
|
+
rescue Aws::Athena::Errors::InvalidRequestException => e
|
35
|
+
if e.message != "Query has not yet finished. Current state: RUNNING"
|
36
|
+
raise e
|
37
|
+
end
|
38
|
+
if Time.now < stop_at
|
39
|
+
sleep(3)
|
40
|
+
retry
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
if resp && resp.result_set
|
45
|
+
column_info = resp.result_set.result_set_metadata.column_info
|
46
|
+
columns = column_info.map(&:name)
|
47
|
+
column_types = column_info.map(&:type)
|
48
|
+
|
49
|
+
untyped_rows = []
|
50
|
+
|
51
|
+
# paginated
|
52
|
+
resp.each do |page|
|
53
|
+
untyped_rows.concat page.result_set.rows.map { |r| r.data.map(&:var_char_value) }
|
54
|
+
end
|
55
|
+
|
56
|
+
utc = ActiveSupport::TimeZone['Etc/UTC']
|
57
|
+
|
58
|
+
rows = untyped_rows[1..-1] || []
|
59
|
+
column_types.each_with_index do |ct, i|
|
60
|
+
# TODO more column_types
|
61
|
+
case ct
|
62
|
+
when "timestamp"
|
63
|
+
rows.each do |row|
|
64
|
+
row[i] = utc.parse(row[i])
|
65
|
+
end
|
66
|
+
when "date"
|
67
|
+
rows.each do |row|
|
68
|
+
row[i] = Date.parse(row[i])
|
69
|
+
end
|
70
|
+
when "bigint"
|
71
|
+
rows.each do |row|
|
72
|
+
row[i] = row[i].to_i
|
73
|
+
end
|
74
|
+
when "double"
|
75
|
+
rows.each do |row|
|
76
|
+
row[i] = row[i].to_f
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
elsif resp
|
81
|
+
error = fetch_error(query_execution_id)
|
82
|
+
else
|
83
|
+
error = Blazer::TIMEOUT_MESSAGE
|
84
|
+
end
|
85
|
+
rescue Aws::Athena::Errors::InvalidRequestException => e
|
86
|
+
error = e.message
|
87
|
+
if error == "Query did not finish successfully. Final query state: FAILED"
|
88
|
+
error = fetch_error(query_execution_id)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
[columns, rows, error]
|
93
|
+
end
|
94
|
+
|
95
|
+
def tables
|
96
|
+
glue.get_tables(database_name: database).table_list.map(&:name).sort
|
97
|
+
end
|
98
|
+
|
99
|
+
def schema
|
100
|
+
glue.get_tables(database_name: database).table_list.map { |t| {table: t.name, columns: t.storage_descriptor.columns.map { |c| {name: c.name, data_type: c.type} }} }
|
101
|
+
end
|
102
|
+
|
103
|
+
def preview_statement
|
104
|
+
"SELECT * FROM {table} LIMIT 10"
|
105
|
+
end
|
106
|
+
|
107
|
+
private
|
108
|
+
|
109
|
+
def database
|
110
|
+
@database ||= settings["database"] || "default"
|
111
|
+
end
|
112
|
+
|
113
|
+
def fetch_error(query_execution_id)
|
114
|
+
client.get_query_execution(
|
115
|
+
query_execution_id: query_execution_id
|
116
|
+
).query_execution.status.state_change_reason
|
117
|
+
end
|
118
|
+
|
119
|
+
def client
|
120
|
+
@client ||= Aws::Athena::Client.new
|
121
|
+
end
|
122
|
+
|
123
|
+
def glue
|
124
|
+
@glue ||= Aws::Glue::Client.new
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class BaseAdapter
|
4
|
+
attr_reader :data_source
|
5
|
+
|
6
|
+
def initialize(data_source)
|
7
|
+
@data_source = data_source
|
8
|
+
end
|
9
|
+
|
10
|
+
def run_statement(statement, comment)
|
11
|
+
# the one required method
|
12
|
+
end
|
13
|
+
|
14
|
+
def tables
|
15
|
+
[] # optional, but nice to have
|
16
|
+
end
|
17
|
+
|
18
|
+
def schema
|
19
|
+
[] # optional, but nice to have
|
20
|
+
end
|
21
|
+
|
22
|
+
def preview_statement
|
23
|
+
"" # also optional, but nice to have
|
24
|
+
end
|
25
|
+
|
26
|
+
def reconnect
|
27
|
+
# optional
|
28
|
+
end
|
29
|
+
|
30
|
+
def cost(statement)
|
31
|
+
# optional
|
32
|
+
end
|
33
|
+
|
34
|
+
def explain(statement)
|
35
|
+
# optional
|
36
|
+
end
|
37
|
+
|
38
|
+
def cancel(run_id)
|
39
|
+
# optional
|
40
|
+
end
|
41
|
+
|
42
|
+
def cachable?(statement)
|
43
|
+
true # optional
|
44
|
+
end
|
45
|
+
|
46
|
+
protected
|
47
|
+
|
48
|
+
def settings
|
49
|
+
@data_source.settings
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -0,0 +1,67 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class BigQueryAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
columns = []
|
6
|
+
rows = []
|
7
|
+
error = nil
|
8
|
+
|
9
|
+
begin
|
10
|
+
options = {}
|
11
|
+
options[:timeout] = data_source.timeout.to_i * 1000 if data_source.timeout
|
12
|
+
results = bigquery.query(statement, options) # ms
|
13
|
+
if results.complete?
|
14
|
+
columns = results.first.keys.map(&:to_s) if results.size > 0
|
15
|
+
rows = results.map(&:values)
|
16
|
+
else
|
17
|
+
error = Blazer::TIMEOUT_MESSAGE
|
18
|
+
end
|
19
|
+
rescue => e
|
20
|
+
error = e.message
|
21
|
+
end
|
22
|
+
|
23
|
+
[columns, rows, error]
|
24
|
+
end
|
25
|
+
|
26
|
+
def tables
|
27
|
+
table_refs.map { |t| "#{t.project_id}.#{t.dataset_id}.#{t.table_id}" }
|
28
|
+
end
|
29
|
+
|
30
|
+
def schema
|
31
|
+
table_refs.map do |table_ref|
|
32
|
+
{
|
33
|
+
schema: table_ref.dataset_id,
|
34
|
+
table: table_ref.table_id,
|
35
|
+
columns: table_columns(table_ref)
|
36
|
+
}
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def preview_statement
|
41
|
+
"SELECT * FROM `{table}` LIMIT 10"
|
42
|
+
end
|
43
|
+
|
44
|
+
private
|
45
|
+
|
46
|
+
def bigquery
|
47
|
+
@bigquery ||= begin
|
48
|
+
require "google/cloud/bigquery"
|
49
|
+
Google::Cloud::Bigquery.new(
|
50
|
+
project: settings["project"],
|
51
|
+
keyfile: settings["keyfile"]
|
52
|
+
)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def table_refs
|
57
|
+
bigquery.datasets.map(&:tables).flat_map { |table_list| table_list.map(&:table_ref) }
|
58
|
+
end
|
59
|
+
|
60
|
+
def table_columns(table_ref)
|
61
|
+
schema = bigquery.service.get_table(table_ref.dataset_id, table_ref.table_id).schema
|
62
|
+
return [] if schema.nil?
|
63
|
+
schema.fields.map { |field| {name: field.name, data_type: field.type} }
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class DrillAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
columns = []
|
6
|
+
rows = []
|
7
|
+
error = nil
|
8
|
+
|
9
|
+
begin
|
10
|
+
# remove trailing semicolon
|
11
|
+
response = drill.query(statement.sub(/;\s*\z/, ""))
|
12
|
+
rows = response.map { |r| r.values }
|
13
|
+
columns = rows.any? ? response.first.keys : []
|
14
|
+
rescue => e
|
15
|
+
error = e.message
|
16
|
+
end
|
17
|
+
|
18
|
+
[columns, rows, error]
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def drill
|
24
|
+
@drill ||= ::Drill.new(url: settings["url"])
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class ElasticsearchAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
columns = []
|
6
|
+
rows = []
|
7
|
+
error = nil
|
8
|
+
|
9
|
+
begin
|
10
|
+
header, body = statement.gsub(/\/\/.+/, "").strip.split("\n", 2)
|
11
|
+
body = JSON.parse(body)
|
12
|
+
body["timeout"] ||= data_source.timeout if data_source.timeout
|
13
|
+
response = client.msearch(body: [JSON.parse(header), body])["responses"].first
|
14
|
+
if response["error"]
|
15
|
+
error = response["error"]
|
16
|
+
else
|
17
|
+
hits = response["hits"]["hits"]
|
18
|
+
source_keys = hits.flat_map { |r| r["_source"].keys }.uniq
|
19
|
+
hit_keys = (hits.first.try(:keys) || []) - ["_source"]
|
20
|
+
columns = source_keys + hit_keys
|
21
|
+
rows =
|
22
|
+
hits.map do |r|
|
23
|
+
source = r["_source"]
|
24
|
+
source_keys.map { |k| source[k] } + hit_keys.map { |k| r[k] }
|
25
|
+
end
|
26
|
+
end
|
27
|
+
rescue => e
|
28
|
+
error = e.message
|
29
|
+
end
|
30
|
+
|
31
|
+
[columns, rows, error]
|
32
|
+
end
|
33
|
+
|
34
|
+
def tables
|
35
|
+
client.indices.get_aliases(name: "*").map { |k, v| [k, v["aliases"].keys] }.flatten.uniq.sort
|
36
|
+
end
|
37
|
+
|
38
|
+
def preview_statement
|
39
|
+
%!// header\n{"index": "{table}"}\n\n// body\n{"query": {"match_all": {}}, "size": 10}!
|
40
|
+
end
|
41
|
+
|
42
|
+
protected
|
43
|
+
|
44
|
+
def client
|
45
|
+
@client ||= Elasticsearch::Client.new(url: settings["url"])
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class MongodbAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
columns = []
|
6
|
+
rows = []
|
7
|
+
error = nil
|
8
|
+
|
9
|
+
begin
|
10
|
+
documents = db.command({:$eval => "#{statement.strip}.toArray()"}).documents.first["retval"]
|
11
|
+
columns = documents.flat_map { |r| r.keys }.uniq
|
12
|
+
rows = documents.map { |r| columns.map { |c| r[c] } }
|
13
|
+
rescue => e
|
14
|
+
error = e.message
|
15
|
+
end
|
16
|
+
|
17
|
+
[columns, rows, error]
|
18
|
+
end
|
19
|
+
|
20
|
+
def tables
|
21
|
+
db.collection_names
|
22
|
+
end
|
23
|
+
|
24
|
+
def preview_statement
|
25
|
+
"db.{table}.find().limit(10)"
|
26
|
+
end
|
27
|
+
|
28
|
+
protected
|
29
|
+
|
30
|
+
def client
|
31
|
+
@client ||= Mongo::Client.new(settings["url"], connect_timeout: 1, socket_timeout: 1, server_selection_timeout: 1)
|
32
|
+
end
|
33
|
+
|
34
|
+
def db
|
35
|
+
@db ||= client.database
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class PrestoAdapter < BaseAdapter
|
4
|
+
def run_statement(statement, comment)
|
5
|
+
columns = []
|
6
|
+
rows = []
|
7
|
+
error = nil
|
8
|
+
|
9
|
+
begin
|
10
|
+
columns, rows = client.run("#{statement} /*#{comment}*/")
|
11
|
+
columns = columns.map(&:name)
|
12
|
+
rescue => e
|
13
|
+
error = e.message
|
14
|
+
end
|
15
|
+
|
16
|
+
[columns, rows, error]
|
17
|
+
end
|
18
|
+
|
19
|
+
def tables
|
20
|
+
_, rows = client.run("SHOW TABLES")
|
21
|
+
rows.map(&:first)
|
22
|
+
end
|
23
|
+
|
24
|
+
def preview_statement
|
25
|
+
"SELECT * FROM {table} LIMIT 10"
|
26
|
+
end
|
27
|
+
|
28
|
+
protected
|
29
|
+
|
30
|
+
def client
|
31
|
+
@client ||= begin
|
32
|
+
uri = URI.parse(settings["url"])
|
33
|
+
query = uri.query ? CGI::parse(uri.query) : {}
|
34
|
+
Presto::Client.new(
|
35
|
+
server: "#{uri.host}:#{uri.port}",
|
36
|
+
catalog: uri.path.to_s.sub(/\A\//, ""),
|
37
|
+
schema: query["schema"] || "public",
|
38
|
+
user: uri.user,
|
39
|
+
http_debug: false
|
40
|
+
)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,182 @@
|
|
1
|
+
module Blazer
|
2
|
+
module Adapters
|
3
|
+
class SqlAdapter < BaseAdapter
|
4
|
+
attr_reader :connection_model
|
5
|
+
|
6
|
+
def initialize(data_source)
|
7
|
+
super
|
8
|
+
|
9
|
+
@connection_model =
|
10
|
+
Class.new(Blazer::Connection) do
|
11
|
+
def self.name
|
12
|
+
"Blazer::Connection::Adapter#{object_id}"
|
13
|
+
end
|
14
|
+
establish_connection(data_source.settings["url"]) if data_source.settings["url"]
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def run_statement(statement, comment)
|
19
|
+
columns = []
|
20
|
+
rows = []
|
21
|
+
error = nil
|
22
|
+
|
23
|
+
begin
|
24
|
+
in_transaction do
|
25
|
+
set_timeout(data_source.timeout) if data_source.timeout
|
26
|
+
|
27
|
+
result = select_all("#{statement} /*#{comment}*/")
|
28
|
+
columns = result.columns
|
29
|
+
cast_method = Rails::VERSION::MAJOR < 5 ? :type_cast : :cast_value
|
30
|
+
result.rows.each do |untyped_row|
|
31
|
+
rows << (result.column_types.empty? ? untyped_row : columns.each_with_index.map { |c, i| untyped_row[i] ? result.column_types[c].send(cast_method, untyped_row[i]) : untyped_row[i] })
|
32
|
+
end
|
33
|
+
end
|
34
|
+
rescue => e
|
35
|
+
error = e.message.sub(/.+ERROR: /, "")
|
36
|
+
error = Blazer::TIMEOUT_MESSAGE if Blazer::TIMEOUT_ERRORS.any? { |e| error.include?(e) }
|
37
|
+
reconnect if error.include?("PG::ConnectionBad")
|
38
|
+
end
|
39
|
+
|
40
|
+
[columns, rows, error]
|
41
|
+
end
|
42
|
+
|
43
|
+
def tables
|
44
|
+
result = data_source.run_statement(connection_model.send(:sanitize_sql_array, ["SELECT table_name FROM information_schema.tables WHERE table_schema IN (?) ORDER BY table_name", schemas]), refresh_cache: true)
|
45
|
+
result.rows.map(&:first)
|
46
|
+
end
|
47
|
+
|
48
|
+
def schema
|
49
|
+
result = data_source.run_statement(connection_model.send(:sanitize_sql_array, ["SELECT table_schema, table_name, column_name, data_type, ordinal_position FROM information_schema.columns WHERE table_schema IN (?) ORDER BY 1, 2", schemas]))
|
50
|
+
result.rows.group_by { |r| [r[0], r[1]] }.map { |k, vs| {schema: k[0], table: k[1], columns: vs.sort_by { |v| v[2] }.map { |v| {name: v[2], data_type: v[3]} }} }
|
51
|
+
end
|
52
|
+
|
53
|
+
def preview_statement
|
54
|
+
if postgresql?
|
55
|
+
"SELECT * FROM \"{table}\" LIMIT 10"
|
56
|
+
elsif sqlserver?
|
57
|
+
"SELECT TOP (10) * FROM {table}"
|
58
|
+
else
|
59
|
+
"SELECT * FROM {table} LIMIT 10"
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def reconnect
|
64
|
+
connection_model.establish_connection(settings["url"])
|
65
|
+
end
|
66
|
+
|
67
|
+
def cost(statement)
|
68
|
+
result = explain(statement)
|
69
|
+
if sqlserver?
|
70
|
+
result["TotalSubtreeCost"]
|
71
|
+
else
|
72
|
+
match = /cost=\d+\.\d+..(\d+\.\d+) /.match(result)
|
73
|
+
match[1] if match
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def explain(statement)
|
78
|
+
if postgresql? || redshift?
|
79
|
+
select_all("EXPLAIN #{statement}").rows.first.first
|
80
|
+
elsif sqlserver?
|
81
|
+
begin
|
82
|
+
execute("SET SHOWPLAN_ALL ON")
|
83
|
+
result = select_all(statement).each.first
|
84
|
+
ensure
|
85
|
+
execute("SET SHOWPLAN_ALL OFF")
|
86
|
+
end
|
87
|
+
result
|
88
|
+
end
|
89
|
+
rescue
|
90
|
+
nil
|
91
|
+
end
|
92
|
+
|
93
|
+
def cancel(run_id)
|
94
|
+
if postgresql?
|
95
|
+
select_all("SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND query LIKE '%,run_id:#{run_id}%'")
|
96
|
+
elsif redshift?
|
97
|
+
first_row = select_all("SELECT pid FROM stv_recents WHERE status = 'Running' AND query LIKE '%,run_id:#{run_id}%'").first
|
98
|
+
if first_row
|
99
|
+
select_all("CANCEL #{first_row["pid"].to_i}")
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def cachable?(statement)
|
105
|
+
!%w[CREATE ALTER UPDATE INSERT DELETE].include?(statement.split.first.to_s.upcase)
|
106
|
+
end
|
107
|
+
|
108
|
+
protected
|
109
|
+
|
110
|
+
def select_all(statement)
|
111
|
+
connection_model.connection.select_all(statement)
|
112
|
+
end
|
113
|
+
|
114
|
+
# seperate from select_all to prevent mysql error
|
115
|
+
def execute(statement)
|
116
|
+
connection_model.connection.execute(statement)
|
117
|
+
end
|
118
|
+
|
119
|
+
def postgresql?
|
120
|
+
["PostgreSQL", "PostGIS"].include?(adapter_name)
|
121
|
+
end
|
122
|
+
|
123
|
+
def redshift?
|
124
|
+
["Redshift"].include?(adapter_name)
|
125
|
+
end
|
126
|
+
|
127
|
+
def mysql?
|
128
|
+
["MySQL", "Mysql2", "Mysql2Spatial"].include?(adapter_name)
|
129
|
+
end
|
130
|
+
|
131
|
+
def sqlserver?
|
132
|
+
["SQLServer", "tinytds", "mssql"].include?(adapter_name)
|
133
|
+
end
|
134
|
+
|
135
|
+
def adapter_name
|
136
|
+
# prevent bad data source from taking down queries/new
|
137
|
+
connection_model.connection.adapter_name rescue nil
|
138
|
+
end
|
139
|
+
|
140
|
+
def schemas
|
141
|
+
settings["schemas"] || [connection_model.connection_config[:schema] || default_schema]
|
142
|
+
end
|
143
|
+
|
144
|
+
def default_schema
|
145
|
+
if postgresql? || redshift?
|
146
|
+
"public"
|
147
|
+
elsif sqlserver?
|
148
|
+
"dbo"
|
149
|
+
else
|
150
|
+
connection_model.connection_config[:database]
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
def set_timeout(timeout)
|
155
|
+
if postgresql? || redshift?
|
156
|
+
execute("SET #{use_transaction? ? "LOCAL " : ""}statement_timeout = #{timeout.to_i * 1000}")
|
157
|
+
elsif mysql?
|
158
|
+
execute("SET max_execution_time = #{timeout.to_i * 1000}")
|
159
|
+
else
|
160
|
+
raise Blazer::TimeoutNotSupported, "Timeout not supported for #{adapter_name} adapter"
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def use_transaction?
|
165
|
+
settings.key?("use_transaction") ? settings["use_transaction"] : true
|
166
|
+
end
|
167
|
+
|
168
|
+
def in_transaction
|
169
|
+
connection_model.connection_pool.with_connection do
|
170
|
+
if use_transaction?
|
171
|
+
connection_model.transaction do
|
172
|
+
yield
|
173
|
+
raise ActiveRecord::Rollback
|
174
|
+
end
|
175
|
+
else
|
176
|
+
yield
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|