mysql_genius-core 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +21 -0
- data/lib/mysql_genius/core/ai/client.rb +99 -0
- data/lib/mysql_genius/core/ai/config.rb +43 -0
- data/lib/mysql_genius/core/ai/optimization.rb +81 -0
- data/lib/mysql_genius/core/ai/suggestion.rb +76 -0
- data/lib/mysql_genius/core/analysis/duplicate_indexes.rb +83 -0
- data/lib/mysql_genius/core/analysis/query_stats.rb +103 -0
- data/lib/mysql_genius/core/analysis/server_overview.rb +124 -0
- data/lib/mysql_genius/core/analysis/table_sizes.rb +66 -0
- data/lib/mysql_genius/core/analysis/unused_indexes.rb +57 -0
- data/lib/mysql_genius/core/column_definition.rb +30 -0
- data/lib/mysql_genius/core/connection/fake_adapter.rb +110 -0
- data/lib/mysql_genius/core/connection.rb +36 -0
- data/lib/mysql_genius/core/execution_result.rb +27 -0
- data/lib/mysql_genius/core/index_definition.rb +23 -0
- data/lib/mysql_genius/core/query_explainer.rb +60 -0
- data/lib/mysql_genius/core/query_runner/config.rb +21 -0
- data/lib/mysql_genius/core/query_runner.rb +94 -0
- data/lib/mysql_genius/core/result.rb +43 -0
- data/lib/mysql_genius/core/server_info.rb +33 -0
- data/lib/mysql_genius/core/sql_validator.rb +59 -0
- data/lib/mysql_genius/core/version.rb +7 -0
- data/lib/mysql_genius/core.rb +35 -0
- data/mysql_genius-core.gemspec +34 -0
- metadata +73 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 70f7ef02105463808683263b268e297b98907545c80b8379a36517b8a2f74a76
|
|
4
|
+
data.tar.gz: b6c41d695b0cc433dac7e625d08c15a470da36304ccf54eb27be66be0e79118d
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: 4c93fad1632ccd9fe8859b40880fad565e43467ae761bb550972a2af750a43636e22779a2e34b93678e16f6c268b1a950693a668b22e21f8822504abe42057e3
|
|
7
|
+
data.tar.gz: 2c7c33af4765ab2eb520de0f5a95442abfe5b7b8875f4d0ba31cf887c7f227070cb74ab9339faa394c3033ddc87dc959dfc2cf14e16ef5a244cee80dd5c590e9
|
data/CHANGELOG.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Changelog
|
|
2
|
+
|
|
3
|
+
## 0.4.0
|
|
4
|
+
|
|
5
|
+
First published release of `mysql_genius-core`. This gem is the Rails-free foundation library for `mysql_genius` and will be the shared core for the forthcoming `mysql_genius-desktop` standalone app. From 0.4.0 onward, `mysql_genius-core` and `mysql_genius` release in lockstep under matching version numbers.
|
|
6
|
+
|
|
7
|
+
### Added
|
|
8
|
+
- `MysqlGenius::Core::Connection` — connection contract with `ActiveRecordAdapter` (used by the Rails engine) and `FakeAdapter` (used in specs).
|
|
9
|
+
- `MysqlGenius::Core::SqlValidator` — SELECT-only validation, blocked-table enforcement, row-limit application.
|
|
10
|
+
- `MysqlGenius::Core::Ai::{Client, Suggestion, Optimization}` — AI service layer taking an explicit `Core::Ai::Config` instead of reading global configuration.
|
|
11
|
+
- `MysqlGenius::Core::Result`, `ColumnDefinition`, `IndexDefinition`, `ServerInfo` — value objects returned by adapters and analyses.
|
|
12
|
+
- `MysqlGenius::Core::Analysis::TableSizes` — queries `information_schema.tables` + per-table `COUNT(*)` with size/row/fragmentation metadata.
|
|
13
|
+
- `MysqlGenius::Core::Analysis::DuplicateIndexes` — detects left-prefix covering across indexes per table.
|
|
14
|
+
- `MysqlGenius::Core::Analysis::QueryStats` — reads `performance_schema.events_statements_summary_by_digest` with sort + limit.
|
|
15
|
+
- `MysqlGenius::Core::Analysis::UnusedIndexes` — reads `performance_schema.table_io_waits_summary_by_index_usage` JOINed with `information_schema.tables`.
|
|
16
|
+
- `MysqlGenius::Core::Analysis::ServerOverview` — reads `SHOW GLOBAL STATUS` / `SHOW GLOBAL VARIABLES` / `SELECT VERSION()`, computes derived metrics.
|
|
17
|
+
- `MysqlGenius::Core::ExecutionResult` — immutable value object for `QueryRunner`'s return.
|
|
18
|
+
- `MysqlGenius::Core::QueryRunner` + `QueryRunner::Config` — owns validation, row-limit/timeout-hint application, execution, column masking. Returns `ExecutionResult` or raises `Rejected` / `Timeout`.
|
|
19
|
+
- `MysqlGenius::Core::QueryExplainer` — owns EXPLAIN with optional validation-skipping. Returns `Core::Result` or raises `Rejected` / `Truncated`.
|
|
20
|
+
|
|
21
|
+
MariaDB vs MySQL is detected at runtime so timeout hints use the correct syntax (`SET STATEMENT max_statement_time` vs `MAX_EXECUTION_TIME` optimizer hint).
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "net/http"
|
|
4
|
+
require "json"
|
|
5
|
+
require "uri"
|
|
6
|
+
|
|
7
|
+
module MysqlGenius
|
|
8
|
+
module Core
|
|
9
|
+
module Ai
|
|
10
|
+
# HTTP client for OpenAI-compatible chat completion APIs.
|
|
11
|
+
# Construct with a Core::Ai::Config; call #chat with a messages array.
|
|
12
|
+
class Client
|
|
13
|
+
class NotConfigured < Core::Error; end
|
|
14
|
+
class ApiError < Core::Error; end
|
|
15
|
+
class TooManyRedirects < Core::Error; end
|
|
16
|
+
|
|
17
|
+
MAX_REDIRECTS = 3
|
|
18
|
+
|
|
19
|
+
def initialize(config)
|
|
20
|
+
@config = config
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def chat(messages:, temperature: 0)
|
|
24
|
+
if @config.client
|
|
25
|
+
return @config.client.call(messages: messages, temperature: temperature)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
raise NotConfigured, "AI is not configured" unless @config.enabled?
|
|
29
|
+
|
|
30
|
+
body = {
|
|
31
|
+
messages: messages,
|
|
32
|
+
response_format: { type: "json_object" },
|
|
33
|
+
temperature: temperature,
|
|
34
|
+
}
|
|
35
|
+
body[:model] = @config.model if @config.model && !@config.model.empty?
|
|
36
|
+
|
|
37
|
+
response = post_with_redirects(URI(@config.endpoint), body.to_json)
|
|
38
|
+
parsed = JSON.parse(response.body)
|
|
39
|
+
|
|
40
|
+
if parsed["error"]
|
|
41
|
+
raise ApiError, "AI API error: #{parsed["error"]["message"] || parsed["error"]}"
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
content = parsed.dig("choices", 0, "message", "content")
|
|
45
|
+
raise ApiError, "No content in AI response" if content.nil?
|
|
46
|
+
|
|
47
|
+
parse_json_content(content)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
def parse_json_content(content)
|
|
53
|
+
JSON.parse(content)
|
|
54
|
+
rescue JSON::ParserError
|
|
55
|
+
stripped = content.to_s
|
|
56
|
+
.gsub(/\A\s*```(?:json)?\s*/i, "")
|
|
57
|
+
.gsub(/\s*```\s*\z/, "")
|
|
58
|
+
.strip
|
|
59
|
+
begin
|
|
60
|
+
JSON.parse(stripped)
|
|
61
|
+
rescue JSON::ParserError
|
|
62
|
+
{ "raw" => content.to_s }
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def post_with_redirects(uri, body, redirects = 0)
|
|
67
|
+
raise TooManyRedirects, "Too many redirects" if redirects > MAX_REDIRECTS
|
|
68
|
+
|
|
69
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
|
70
|
+
http.use_ssl = uri.scheme == "https"
|
|
71
|
+
if http.use_ssl?
|
|
72
|
+
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
|
73
|
+
cert_file = ENV["SSL_CERT_FILE"] || OpenSSL::X509::DEFAULT_CERT_FILE
|
|
74
|
+
http.ca_file = cert_file if File.exist?(cert_file)
|
|
75
|
+
end
|
|
76
|
+
http.open_timeout = 10
|
|
77
|
+
http.read_timeout = 60
|
|
78
|
+
|
|
79
|
+
request = Net::HTTP::Post.new(uri)
|
|
80
|
+
request["Content-Type"] = "application/json"
|
|
81
|
+
if @config.auth_style == :bearer
|
|
82
|
+
request["Authorization"] = "Bearer #{@config.api_key}"
|
|
83
|
+
else
|
|
84
|
+
request["api-key"] = @config.api_key
|
|
85
|
+
end
|
|
86
|
+
request.body = body
|
|
87
|
+
|
|
88
|
+
response = http.request(request)
|
|
89
|
+
|
|
90
|
+
if response.is_a?(Net::HTTPRedirection)
|
|
91
|
+
post_with_redirects(URI(response["location"]), body, redirects + 1)
|
|
92
|
+
else
|
|
93
|
+
response
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module MysqlGenius
|
|
4
|
+
module Core
|
|
5
|
+
module Ai
|
|
6
|
+
# Keyword-init value object holding all the AI settings a Client
|
|
7
|
+
# needs. Passed explicitly to every AI service constructor — no
|
|
8
|
+
# module-level globals.
|
|
9
|
+
#
|
|
10
|
+
# Fields:
|
|
11
|
+
# client - optional callable; when set, bypasses HTTP.
|
|
12
|
+
# Signature: #call(messages:, temperature:) -> Hash
|
|
13
|
+
# endpoint - HTTPS URL of the chat completions endpoint
|
|
14
|
+
# api_key - API key (used as Bearer or api-key header)
|
|
15
|
+
# model - model name passed in the request body
|
|
16
|
+
# auth_style - :bearer or :api_key
|
|
17
|
+
# system_context - optional domain context string that services
|
|
18
|
+
# append to their system prompts
|
|
19
|
+
Config = Struct.new(
|
|
20
|
+
:client,
|
|
21
|
+
:endpoint,
|
|
22
|
+
:api_key,
|
|
23
|
+
:model,
|
|
24
|
+
:auth_style,
|
|
25
|
+
:system_context,
|
|
26
|
+
keyword_init: true,
|
|
27
|
+
) do
|
|
28
|
+
def initialize(*)
|
|
29
|
+
super
|
|
30
|
+
freeze
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def enabled?
|
|
34
|
+
return true if client
|
|
35
|
+
return false if endpoint.nil? || endpoint.to_s.empty?
|
|
36
|
+
return false if api_key.nil? || api_key.to_s.empty?
|
|
37
|
+
|
|
38
|
+
true
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
end
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module MysqlGenius
|
|
4
|
+
module Core
|
|
5
|
+
module Ai
|
|
6
|
+
# Analyses a SQL query + its EXPLAIN output and asks the AI client
|
|
7
|
+
# for optimization suggestions.
|
|
8
|
+
#
|
|
9
|
+
# Construct with:
|
|
10
|
+
# connection - a Core::Connection implementation
|
|
11
|
+
# client - a Core::Ai::Client
|
|
12
|
+
# config - the Core::Ai::Config
|
|
13
|
+
#
|
|
14
|
+
# Call:
|
|
15
|
+
# .call(sql, explain_rows, allowed_tables)
|
|
16
|
+
# explain_rows - Array of arrays OR a pre-formatted String
|
|
17
|
+
# -> Hash with "suggestions" key
|
|
18
|
+
class Optimization
|
|
19
|
+
def initialize(connection, client, config)
|
|
20
|
+
@connection = connection
|
|
21
|
+
@client = client
|
|
22
|
+
@config = config
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def call(sql, explain_rows, allowed_tables)
|
|
26
|
+
schema = build_schema_description(allowed_tables)
|
|
27
|
+
messages = [
|
|
28
|
+
{ role: "system", content: system_prompt(schema) },
|
|
29
|
+
{ role: "user", content: user_prompt(sql, explain_rows) },
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
@client.chat(messages: messages)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
private
|
|
36
|
+
|
|
37
|
+
def system_prompt(schema_description)
|
|
38
|
+
<<~PROMPT
|
|
39
|
+
You are a MySQL query optimization expert. Given a SQL query and its EXPLAIN output, analyze the query execution plan and provide actionable optimization suggestions.
|
|
40
|
+
|
|
41
|
+
Available schema:
|
|
42
|
+
#{schema_description}
|
|
43
|
+
|
|
44
|
+
Respond with JSON:
|
|
45
|
+
{
|
|
46
|
+
"suggestions": "Markdown-formatted analysis and suggestions. Include: 1) Summary of current execution plan (scan types, rows examined). 2) Specific recommendations such as indexes to add (provide exact CREATE INDEX statements), query rewrites, or structural changes. 3) Expected impact of each suggestion."
|
|
47
|
+
}
|
|
48
|
+
PROMPT
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def user_prompt(sql, explain_rows)
|
|
52
|
+
<<~PROMPT
|
|
53
|
+
SQL Query:
|
|
54
|
+
#{sql}
|
|
55
|
+
|
|
56
|
+
EXPLAIN Output:
|
|
57
|
+
#{format_explain(explain_rows)}
|
|
58
|
+
PROMPT
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def format_explain(explain_rows)
|
|
62
|
+
return explain_rows if explain_rows.is_a?(String)
|
|
63
|
+
|
|
64
|
+
explain_rows.map { |row| row.join(" | ") }.join("\n")
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
def build_schema_description(allowed_tables)
|
|
68
|
+
allowed_tables.map do |table|
|
|
69
|
+
next unless @connection.tables.include?(table)
|
|
70
|
+
|
|
71
|
+
columns = @connection.columns_for(table).map { |c| "#{c.name} (#{c.type})" }
|
|
72
|
+
indexes = @connection.indexes_for(table).map { |idx| "#{idx.name}: [#{idx.columns.join(", ")}]#{" UNIQUE" if idx.unique}" }
|
|
73
|
+
desc = "#{table}: #{columns.join(", ")}"
|
|
74
|
+
desc += "\n Indexes: #{indexes.join("; ")}" if indexes.any?
|
|
75
|
+
desc
|
|
76
|
+
end.compact.join("\n")
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module MysqlGenius
|
|
4
|
+
module Core
|
|
5
|
+
module Ai
|
|
6
|
+
# Turns a natural-language prompt + a list of allowed tables into
|
|
7
|
+
# a SELECT query via the AI client.
|
|
8
|
+
#
|
|
9
|
+
# Construct with:
|
|
10
|
+
# connection - a Core::Connection implementation
|
|
11
|
+
# client - a Core::Ai::Client (pre-built with the same config)
|
|
12
|
+
# config - the Core::Ai::Config (used for system_context)
|
|
13
|
+
#
|
|
14
|
+
# Call:
|
|
15
|
+
# .call(user_prompt, allowed_tables) -> Hash with "sql" and "explanation"
|
|
16
|
+
class Suggestion
|
|
17
|
+
def initialize(connection, client, config)
|
|
18
|
+
@connection = connection
|
|
19
|
+
@client = client
|
|
20
|
+
@config = config
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def call(user_prompt, allowed_tables)
|
|
24
|
+
schema = build_schema_description(allowed_tables)
|
|
25
|
+
messages = [
|
|
26
|
+
{ role: "system", content: system_prompt(schema) },
|
|
27
|
+
{ role: "user", content: user_prompt },
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
@client.chat(messages: messages)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
def system_prompt(schema_description)
|
|
36
|
+
prompt = <<~PROMPT
|
|
37
|
+
You are a SQL query assistant for a MySQL database.
|
|
38
|
+
PROMPT
|
|
39
|
+
|
|
40
|
+
if @config.system_context && !@config.system_context.empty?
|
|
41
|
+
prompt += <<~PROMPT
|
|
42
|
+
|
|
43
|
+
Domain context:
|
|
44
|
+
#{@config.system_context}
|
|
45
|
+
PROMPT
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
prompt += <<~PROMPT
|
|
49
|
+
|
|
50
|
+
Rules:
|
|
51
|
+
- Only generate SELECT statements. Never generate INSERT, UPDATE, DELETE, or any other mutation.
|
|
52
|
+
- Only reference the tables and columns listed in the schema below. Do not guess or invent column names.
|
|
53
|
+
- Use backticks for table and column names.
|
|
54
|
+
- Include a LIMIT 100 unless the user specifies otherwise.
|
|
55
|
+
|
|
56
|
+
Available schema:
|
|
57
|
+
#{schema_description}
|
|
58
|
+
|
|
59
|
+
Respond with JSON: {"sql": "the SQL query", "explanation": "brief explanation of what the query does"}
|
|
60
|
+
PROMPT
|
|
61
|
+
|
|
62
|
+
prompt
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def build_schema_description(allowed_tables)
|
|
66
|
+
allowed_tables.map do |table|
|
|
67
|
+
next unless @connection.tables.include?(table)
|
|
68
|
+
|
|
69
|
+
columns = @connection.columns_for(table).map { |c| "#{c.name} (#{c.type})" }
|
|
70
|
+
"#{table}: #{columns.join(", ")}"
|
|
71
|
+
end.compact.join("\n")
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
end
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "set"
|
|
4
|
+
|
|
5
|
+
module MysqlGenius
|
|
6
|
+
module Core
|
|
7
|
+
module Analysis
|
|
8
|
+
# Detects indexes whose columns are a left-prefix of another index on
|
|
9
|
+
# the same table (meaning the shorter index is redundant — the longer
|
|
10
|
+
# one can satisfy the same queries). Preserves unique indexes: a unique
|
|
11
|
+
# index is never flagged as redundant when only covered by a non-unique
|
|
12
|
+
# index.
|
|
13
|
+
#
|
|
14
|
+
# Takes a Core::Connection plus a list of tables to exclude from the
|
|
15
|
+
# scan. Returns an array of hashes describing each duplicate pair, with
|
|
16
|
+
# the (duplicate_index, covered_by_index) pair deduplicated across
|
|
17
|
+
# symmetrical relationships.
|
|
18
|
+
class DuplicateIndexes
|
|
19
|
+
def initialize(connection, blocked_tables:)
|
|
20
|
+
@connection = connection
|
|
21
|
+
@blocked_tables = blocked_tables
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def call
|
|
25
|
+
duplicates = []
|
|
26
|
+
|
|
27
|
+
queryable_tables.each do |table|
|
|
28
|
+
indexes = @connection.indexes_for(table)
|
|
29
|
+
next if indexes.size < 2
|
|
30
|
+
|
|
31
|
+
indexes.each do |idx|
|
|
32
|
+
indexes.each do |other|
|
|
33
|
+
next if idx.name == other.name
|
|
34
|
+
next unless covers?(other, idx)
|
|
35
|
+
|
|
36
|
+
duplicates << {
|
|
37
|
+
table: table,
|
|
38
|
+
duplicate_index: idx.name,
|
|
39
|
+
duplicate_columns: idx.columns,
|
|
40
|
+
covered_by_index: other.name,
|
|
41
|
+
covered_by_columns: other.columns,
|
|
42
|
+
unique: idx.unique,
|
|
43
|
+
}
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
deduplicate(duplicates)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
private
|
|
52
|
+
|
|
53
|
+
def queryable_tables
|
|
54
|
+
@connection.tables - @blocked_tables
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# True if `other` covers `idx` (idx's columns are a left-prefix of
|
|
58
|
+
# other's columns). Protects unique indexes from being covered by
|
|
59
|
+
# non-unique ones.
|
|
60
|
+
def covers?(other, idx)
|
|
61
|
+
return false if idx.columns.size > other.columns.size
|
|
62
|
+
return false unless other.columns.first(idx.columns.size) == idx.columns
|
|
63
|
+
return false if idx.unique && !other.unique
|
|
64
|
+
|
|
65
|
+
true
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def deduplicate(duplicates)
|
|
69
|
+
seen = Set.new
|
|
70
|
+
duplicates.reject do |d|
|
|
71
|
+
key = [d[:table], [d[:duplicate_index], d[:covered_by_index]].sort].flatten.join(":")
|
|
72
|
+
if seen.include?(key)
|
|
73
|
+
true
|
|
74
|
+
else
|
|
75
|
+
seen.add(key)
|
|
76
|
+
false
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module MysqlGenius
|
|
4
|
+
module Core
|
|
5
|
+
module Analysis
|
|
6
|
+
# Queries performance_schema.events_statements_summary_by_digest for
|
|
7
|
+
# the top statements by a given sort dimension, excluding noise
|
|
8
|
+
# (internal schema queries, EXPLAIN, SHOW, SET STATEMENT, etc.).
|
|
9
|
+
# Returns an array of per-digest hashes with call counts, timing
|
|
10
|
+
# percentiles, row examine/sent ratios, and temp-table metadata.
|
|
11
|
+
#
|
|
12
|
+
# If performance_schema is not enabled, the underlying exec_query
|
|
13
|
+
# call will raise — the caller decides how to render that.
|
|
14
|
+
class QueryStats
|
|
15
|
+
VALID_SORTS = ["total_time", "avg_time", "calls", "rows_examined"].freeze
|
|
16
|
+
MAX_LIMIT = 50
|
|
17
|
+
|
|
18
|
+
def initialize(connection)
|
|
19
|
+
@connection = connection
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def call(sort: "total_time", limit: MAX_LIMIT)
|
|
23
|
+
order_clause = order_clause_for(sort)
|
|
24
|
+
effective_limit = limit.to_i.clamp(1, MAX_LIMIT)
|
|
25
|
+
|
|
26
|
+
result = @connection.exec_query(build_sql(order_clause, effective_limit))
|
|
27
|
+
result.to_hashes.map { |row| transform(row) }
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
private
|
|
31
|
+
|
|
32
|
+
def order_clause_for(sort)
|
|
33
|
+
case sort
|
|
34
|
+
when "total_time" then "SUM_TIMER_WAIT DESC"
|
|
35
|
+
when "avg_time" then "AVG_TIMER_WAIT DESC"
|
|
36
|
+
when "calls" then "COUNT_STAR DESC"
|
|
37
|
+
when "rows_examined" then "SUM_ROWS_EXAMINED DESC"
|
|
38
|
+
else "SUM_TIMER_WAIT DESC"
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def build_sql(order_clause, limit)
|
|
43
|
+
<<~SQL
|
|
44
|
+
SELECT
|
|
45
|
+
DIGEST_TEXT,
|
|
46
|
+
COUNT_STAR AS calls,
|
|
47
|
+
ROUND(SUM_TIMER_WAIT / 1000000000, 1) AS total_time_ms,
|
|
48
|
+
ROUND(AVG_TIMER_WAIT / 1000000000, 1) AS avg_time_ms,
|
|
49
|
+
ROUND(MAX_TIMER_WAIT / 1000000000, 1) AS max_time_ms,
|
|
50
|
+
SUM_ROWS_EXAMINED AS rows_examined,
|
|
51
|
+
SUM_ROWS_SENT AS rows_sent,
|
|
52
|
+
SUM_CREATED_TMP_DISK_TABLES AS tmp_disk_tables,
|
|
53
|
+
SUM_SORT_ROWS AS sort_rows,
|
|
54
|
+
FIRST_SEEN,
|
|
55
|
+
LAST_SEEN
|
|
56
|
+
FROM performance_schema.events_statements_summary_by_digest
|
|
57
|
+
WHERE SCHEMA_NAME = #{@connection.quote(@connection.current_database)}
|
|
58
|
+
AND DIGEST_TEXT IS NOT NULL
|
|
59
|
+
AND DIGEST_TEXT NOT LIKE 'EXPLAIN%'
|
|
60
|
+
AND DIGEST_TEXT NOT LIKE '%`information_schema`%'
|
|
61
|
+
AND DIGEST_TEXT NOT LIKE '%`performance_schema`%'
|
|
62
|
+
AND DIGEST_TEXT NOT LIKE '%information_schema.%'
|
|
63
|
+
AND DIGEST_TEXT NOT LIKE '%performance_schema.%'
|
|
64
|
+
AND DIGEST_TEXT NOT LIKE 'SHOW %'
|
|
65
|
+
AND DIGEST_TEXT NOT LIKE 'SET STATEMENT %'
|
|
66
|
+
AND DIGEST_TEXT NOT LIKE 'SELECT VERSION ( )%'
|
|
67
|
+
AND DIGEST_TEXT NOT LIKE 'SELECT @@%'
|
|
68
|
+
ORDER BY #{order_clause}
|
|
69
|
+
LIMIT #{limit}
|
|
70
|
+
SQL
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def transform(row)
|
|
74
|
+
digest = (row["DIGEST_TEXT"] || row["digest_text"] || "").to_s
|
|
75
|
+
calls = (row["calls"] || row["CALLS"] || 0).to_i
|
|
76
|
+
rows_examined = (row["rows_examined"] || row["ROWS_EXAMINED"] || 0).to_i
|
|
77
|
+
rows_sent = (row["rows_sent"] || row["ROWS_SENT"] || 0).to_i
|
|
78
|
+
|
|
79
|
+
{
|
|
80
|
+
sql: truncate(digest, 500),
|
|
81
|
+
calls: calls,
|
|
82
|
+
total_time_ms: (row["total_time_ms"] || 0).to_f,
|
|
83
|
+
avg_time_ms: (row["avg_time_ms"] || 0).to_f,
|
|
84
|
+
max_time_ms: (row["max_time_ms"] || 0).to_f,
|
|
85
|
+
rows_examined: rows_examined,
|
|
86
|
+
rows_sent: rows_sent,
|
|
87
|
+
rows_ratio: rows_sent.positive? ? (rows_examined.to_f / rows_sent).round(1) : 0,
|
|
88
|
+
tmp_disk_tables: (row["tmp_disk_tables"] || row["TMP_DISK_TABLES"] || 0).to_i,
|
|
89
|
+
sort_rows: (row["sort_rows"] || row["SORT_ROWS"] || 0).to_i,
|
|
90
|
+
first_seen: row["FIRST_SEEN"] || row["first_seen"],
|
|
91
|
+
last_seen: row["LAST_SEEN"] || row["last_seen"],
|
|
92
|
+
}
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def truncate(string, max)
|
|
96
|
+
return string if string.length <= max
|
|
97
|
+
|
|
98
|
+
"#{string[0, max - 3]}..."
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
end
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module MysqlGenius
|
|
4
|
+
module Core
|
|
5
|
+
module Analysis
|
|
6
|
+
# Collects a dashboard-worthy snapshot of server state by combining
|
|
7
|
+
# SHOW GLOBAL STATUS, SHOW GLOBAL VARIABLES, and SELECT VERSION().
|
|
8
|
+
# Computes derived metrics (uptime formatting, connection usage
|
|
9
|
+
# percentage, buffer pool hit rate, tmp-disk percentage, QPS).
|
|
10
|
+
#
|
|
11
|
+
# Returns a nested hash with four top-level sections: server,
|
|
12
|
+
# connections, innodb, queries. Errors propagate; the caller
|
|
13
|
+
# decides how to render failure.
|
|
14
|
+
class ServerOverview
|
|
15
|
+
def initialize(connection)
|
|
16
|
+
@connection = connection
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def call
|
|
20
|
+
status = load_status
|
|
21
|
+
vars = load_variables
|
|
22
|
+
version = @connection.select_value("SELECT VERSION()")
|
|
23
|
+
|
|
24
|
+
uptime_seconds = status["Uptime"].to_i
|
|
25
|
+
{
|
|
26
|
+
server: server_block(version, uptime_seconds),
|
|
27
|
+
connections: connections_block(status, vars),
|
|
28
|
+
innodb: innodb_block(status, vars),
|
|
29
|
+
queries: queries_block(status, uptime_seconds),
|
|
30
|
+
}
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
def load_status
|
|
36
|
+
result = @connection.exec_query("SHOW GLOBAL STATUS")
|
|
37
|
+
result.to_hashes.each_with_object({}) do |row, acc|
|
|
38
|
+
name = (row["Variable_name"] || row["variable_name"]).to_s
|
|
39
|
+
value = (row["Value"] || row["value"]).to_s
|
|
40
|
+
acc[name] = value
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def load_variables
|
|
45
|
+
result = @connection.exec_query("SHOW GLOBAL VARIABLES")
|
|
46
|
+
result.to_hashes.each_with_object({}) do |row, acc|
|
|
47
|
+
name = (row["Variable_name"] || row["variable_name"]).to_s
|
|
48
|
+
value = (row["Value"] || row["value"]).to_s
|
|
49
|
+
acc[name] = value
|
|
50
|
+
end
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def server_block(version, uptime_seconds)
|
|
54
|
+
days = uptime_seconds / 86_400
|
|
55
|
+
hours = (uptime_seconds % 86_400) / 3600
|
|
56
|
+
minutes = (uptime_seconds % 3600) / 60
|
|
57
|
+
|
|
58
|
+
{
|
|
59
|
+
version: version,
|
|
60
|
+
uptime: "#{days}d #{hours}h #{minutes}m",
|
|
61
|
+
uptime_seconds: uptime_seconds,
|
|
62
|
+
}
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def connections_block(status, vars)
|
|
66
|
+
max_conn = vars["max_connections"].to_i
|
|
67
|
+
current_conn = status["Threads_connected"].to_i
|
|
68
|
+
usage_pct = max_conn.positive? ? ((current_conn.to_f / max_conn) * 100).round(1) : 0
|
|
69
|
+
|
|
70
|
+
{
|
|
71
|
+
max: max_conn,
|
|
72
|
+
current: current_conn,
|
|
73
|
+
usage_pct: usage_pct,
|
|
74
|
+
threads_running: status["Threads_running"].to_i,
|
|
75
|
+
threads_cached: status["Threads_cached"].to_i,
|
|
76
|
+
threads_created: status["Threads_created"].to_i,
|
|
77
|
+
aborted_connects: status["Aborted_connects"].to_i,
|
|
78
|
+
aborted_clients: status["Aborted_clients"].to_i,
|
|
79
|
+
max_used: status["Max_used_connections"].to_i,
|
|
80
|
+
}
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def innodb_block(status, vars)
|
|
84
|
+
buffer_pool_bytes = vars["innodb_buffer_pool_size"].to_i
|
|
85
|
+
buffer_pool_mb = (buffer_pool_bytes / 1024.0 / 1024.0).round(1)
|
|
86
|
+
|
|
87
|
+
reads = status["Innodb_buffer_pool_read_requests"].to_f
|
|
88
|
+
disk_reads = status["Innodb_buffer_pool_reads"].to_f
|
|
89
|
+
hit_rate = reads.positive? ? (((reads - disk_reads) / reads) * 100).round(2) : 0
|
|
90
|
+
|
|
91
|
+
{
|
|
92
|
+
buffer_pool_mb: buffer_pool_mb,
|
|
93
|
+
buffer_pool_hit_rate: hit_rate,
|
|
94
|
+
buffer_pool_pages_dirty: status["Innodb_buffer_pool_pages_dirty"].to_i,
|
|
95
|
+
buffer_pool_pages_free: status["Innodb_buffer_pool_pages_free"].to_i,
|
|
96
|
+
buffer_pool_pages_total: status["Innodb_buffer_pool_pages_total"].to_i,
|
|
97
|
+
row_lock_waits: status["Innodb_row_lock_waits"].to_i,
|
|
98
|
+
row_lock_time_ms: status["Innodb_row_lock_time"].to_f.round(0),
|
|
99
|
+
}
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
def queries_block(status, uptime_seconds)
|
|
103
|
+
tmp_tables = status["Created_tmp_tables"].to_i
|
|
104
|
+
tmp_disk_tables = status["Created_tmp_disk_tables"].to_i
|
|
105
|
+
tmp_disk_pct = tmp_tables.positive? ? ((tmp_disk_tables.to_f / tmp_tables) * 100).round(1) : 0
|
|
106
|
+
|
|
107
|
+
questions = status["Questions"].to_i
|
|
108
|
+
qps = uptime_seconds.positive? ? (questions.to_f / uptime_seconds).round(1) : 0
|
|
109
|
+
|
|
110
|
+
{
|
|
111
|
+
questions: questions,
|
|
112
|
+
qps: qps,
|
|
113
|
+
slow_queries: status["Slow_queries"].to_i,
|
|
114
|
+
tmp_tables: tmp_tables,
|
|
115
|
+
tmp_disk_tables: tmp_disk_tables,
|
|
116
|
+
tmp_disk_pct: tmp_disk_pct,
|
|
117
|
+
select_full_join: status["Select_full_join"].to_i,
|
|
118
|
+
sort_merge_passes: status["Sort_merge_passes"].to_i,
|
|
119
|
+
}
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
end
|