mysql_genius 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/workflows/ci.yml +53 -0
- data/.gitignore +11 -0
- data/.rspec +3 -0
- data/CHANGELOG.md +13 -0
- data/Gemfile +15 -0
- data/LICENSE.txt +21 -0
- data/README.md +295 -0
- data/Rakefile +6 -0
- data/app/controllers/concerns/mysql_genius/ai_features.rb +360 -0
- data/app/controllers/concerns/mysql_genius/database_analysis.rb +259 -0
- data/app/controllers/concerns/mysql_genius/query_execution.rb +129 -0
- data/app/controllers/mysql_genius/base_controller.rb +18 -0
- data/app/controllers/mysql_genius/queries_controller.rb +54 -0
- data/app/services/mysql_genius/ai_client.rb +84 -0
- data/app/services/mysql_genius/ai_optimization_service.rb +56 -0
- data/app/services/mysql_genius/ai_suggestion_service.rb +56 -0
- data/app/views/layouts/mysql_genius/application.html.erb +116 -0
- data/app/views/mysql_genius/queries/_shared_results.html.erb +56 -0
- data/app/views/mysql_genius/queries/_tab_ai_tools.html.erb +43 -0
- data/app/views/mysql_genius/queries/_tab_duplicate_indexes.html.erb +24 -0
- data/app/views/mysql_genius/queries/_tab_query_stats.html.erb +36 -0
- data/app/views/mysql_genius/queries/_tab_server.html.erb +54 -0
- data/app/views/mysql_genius/queries/_tab_slow_queries.html.erb +17 -0
- data/app/views/mysql_genius/queries/_tab_sql_query.html.erb +40 -0
- data/app/views/mysql_genius/queries/_tab_table_sizes.html.erb +31 -0
- data/app/views/mysql_genius/queries/_tab_unused_indexes.html.erb +25 -0
- data/app/views/mysql_genius/queries/_tab_visual_builder.html.erb +61 -0
- data/app/views/mysql_genius/queries/index.html.erb +1185 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/config/routes.rb +24 -0
- data/docs/screenshots/ai_tools.png +0 -0
- data/docs/screenshots/duplicate_indexes.png +0 -0
- data/docs/screenshots/query_stats.png +0 -0
- data/docs/screenshots/server.png +0 -0
- data/docs/screenshots/sql_query.png +0 -0
- data/docs/screenshots/table_sizes.png +0 -0
- data/docs/screenshots/visual_builder.png +0 -0
- data/lib/mysql_genius/configuration.rb +96 -0
- data/lib/mysql_genius/engine.rb +12 -0
- data/lib/mysql_genius/slow_query_monitor.rb +38 -0
- data/lib/mysql_genius/sql_validator.rb +55 -0
- data/lib/mysql_genius/version.rb +3 -0
- data/lib/mysql_genius.rb +23 -0
- data/mysql_genius.gemspec +34 -0
- metadata +122 -0
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
module MysqlGenius
|
|
2
|
+
module AiFeatures
|
|
3
|
+
extend ActiveSupport::Concern
|
|
4
|
+
|
|
5
|
+
def suggest
|
|
6
|
+
unless mysql_genius_config.ai_enabled?
|
|
7
|
+
return render json: { error: "AI features are not configured." }, status: :not_found
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
prompt = params[:prompt].to_s.strip
|
|
11
|
+
return render json: { error: "Please describe what you want to query." }, status: :unprocessable_entity if prompt.blank?
|
|
12
|
+
|
|
13
|
+
result = AiSuggestionService.new.call(prompt, queryable_tables)
|
|
14
|
+
sql = sanitize_ai_sql(result["sql"].to_s)
|
|
15
|
+
render json: { sql: sql, explanation: result["explanation"] }
|
|
16
|
+
rescue StandardError => e
|
|
17
|
+
render json: { error: "AI suggestion failed: #{e.message}" }, status: :unprocessable_entity
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def optimize
|
|
21
|
+
unless mysql_genius_config.ai_enabled?
|
|
22
|
+
return render json: { error: "AI features are not configured." }, status: :not_found
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
sql = params[:sql].to_s.strip
|
|
26
|
+
explain_rows = Array(params[:explain_rows]).map { |row| row.respond_to?(:values) ? row.values : Array(row) }
|
|
27
|
+
|
|
28
|
+
if sql.blank? || explain_rows.blank?
|
|
29
|
+
return render json: { error: "SQL and EXPLAIN output are required." }, status: :unprocessable_entity
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
result = AiOptimizationService.new.call(sql, explain_rows, queryable_tables)
|
|
33
|
+
render json: result
|
|
34
|
+
rescue StandardError => e
|
|
35
|
+
render json: { error: "Optimization failed: #{e.message}" }, status: :unprocessable_entity
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def describe_query
|
|
39
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
40
|
+
sql = params[:sql].to_s.strip
|
|
41
|
+
return render json: { error: "SQL is required." }, status: :unprocessable_entity if sql.blank?
|
|
42
|
+
|
|
43
|
+
messages = [
|
|
44
|
+
{ role: "system", content: <<~PROMPT },
|
|
45
|
+
You are a MySQL query explainer. Given a SQL query, explain in plain English:
|
|
46
|
+
1. What the query does (tables involved, joins, filters, aggregations)
|
|
47
|
+
2. How data flows through the query
|
|
48
|
+
3. Any subtle behaviors (implicit type casts, NULL handling in NOT IN, DISTINCT effects, etc.)
|
|
49
|
+
4. Potential performance concerns visible from the SQL structure alone
|
|
50
|
+
#{ai_domain_context}
|
|
51
|
+
Respond with JSON: {"explanation": "your plain-English explanation using markdown formatting"}
|
|
52
|
+
PROMPT
|
|
53
|
+
{ role: "user", content: sql }
|
|
54
|
+
]
|
|
55
|
+
|
|
56
|
+
result = AiClient.new.chat(messages: messages)
|
|
57
|
+
render json: result
|
|
58
|
+
rescue StandardError => e
|
|
59
|
+
render json: { error: "Explanation failed: #{e.message}" }, status: :unprocessable_entity
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def schema_review
|
|
63
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
64
|
+
table = params[:table].to_s.strip
|
|
65
|
+
connection = ActiveRecord::Base.connection
|
|
66
|
+
|
|
67
|
+
tables_to_review = table.present? ? [table] : queryable_tables.first(20)
|
|
68
|
+
schema_desc = tables_to_review.map do |t|
|
|
69
|
+
next unless connection.tables.include?(t)
|
|
70
|
+
cols = connection.columns(t).map { |c| "#{c.name} #{c.sql_type}#{c.null ? '' : ' NOT NULL'}#{c.default ? " DEFAULT #{c.default}" : ''}" }
|
|
71
|
+
indexes = connection.indexes(t).map { |idx| "#{idx.unique ? 'UNIQUE ' : ''}INDEX #{idx.name} (#{idx.columns.join(', ')})" }
|
|
72
|
+
row_count = connection.exec_query("SELECT TABLE_ROWS FROM information_schema.tables WHERE table_schema = #{connection.quote(connection.current_database)} AND table_name = #{connection.quote(t)}").rows.first&.first
|
|
73
|
+
"Table: #{t} (~#{row_count} rows)\nColumns: #{cols.join(', ')}\nIndexes: #{indexes.any? ? indexes.join(', ') : 'NONE'}"
|
|
74
|
+
end.compact.join("\n\n")
|
|
75
|
+
|
|
76
|
+
messages = [
|
|
77
|
+
{ role: "system", content: <<~PROMPT },
|
|
78
|
+
You are a MySQL schema reviewer. Analyze the following schema and identify anti-patterns and improvement opportunities. Look for:
|
|
79
|
+
- Inappropriate column types (VARCHAR(255) for short values, TEXT where VARCHAR suffices, INT for booleans)
|
|
80
|
+
- Missing indexes on foreign key columns or frequently filtered columns
|
|
81
|
+
- Missing NOT NULL constraints where NULLs are unlikely
|
|
82
|
+
- ENUM columns that should be lookup tables
|
|
83
|
+
- Missing created_at/updated_at timestamps
|
|
84
|
+
- Tables without a PRIMARY KEY
|
|
85
|
+
- Overly wide indexes or redundant indexes
|
|
86
|
+
- Column naming inconsistencies
|
|
87
|
+
#{ai_domain_context}
|
|
88
|
+
Respond with JSON: {"findings": "markdown-formatted findings organized by severity (Critical, Warning, Suggestion). Include specific ALTER TABLE statements where applicable."}
|
|
89
|
+
PROMPT
|
|
90
|
+
{ role: "user", content: schema_desc }
|
|
91
|
+
]
|
|
92
|
+
|
|
93
|
+
result = AiClient.new.chat(messages: messages)
|
|
94
|
+
render json: result
|
|
95
|
+
rescue StandardError => e
|
|
96
|
+
render json: { error: "Schema review failed: #{e.message}" }, status: :unprocessable_entity
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def rewrite_query
|
|
100
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
101
|
+
sql = params[:sql].to_s.strip
|
|
102
|
+
return render json: { error: "SQL is required." }, status: :unprocessable_entity if sql.blank?
|
|
103
|
+
|
|
104
|
+
schema = build_schema_for_query(sql)
|
|
105
|
+
|
|
106
|
+
messages = [
|
|
107
|
+
{ role: "system", content: <<~PROMPT },
|
|
108
|
+
You are a MySQL query rewrite expert. Analyze the SQL for anti-patterns and suggest a rewritten version. Look for:
|
|
109
|
+
- SELECT * when specific columns would suffice
|
|
110
|
+
- Correlated subqueries that could be JOINs
|
|
111
|
+
- OR conditions preventing index use (suggest UNION ALL)
|
|
112
|
+
- LIKE '%prefix' patterns (leading wildcard)
|
|
113
|
+
- Implicit type conversions in WHERE clauses
|
|
114
|
+
- NOT IN with NULLable columns (suggest NOT EXISTS)
|
|
115
|
+
- ORDER BY on non-indexed columns with LIMIT
|
|
116
|
+
- Unnecessary DISTINCT
|
|
117
|
+
- Functions on indexed columns in WHERE (e.g., DATE(created_at) instead of range)
|
|
118
|
+
|
|
119
|
+
Available schema:
|
|
120
|
+
#{schema}
|
|
121
|
+
#{ai_domain_context}
|
|
122
|
+
|
|
123
|
+
Respond with JSON: {"original": "the original SQL", "rewritten": "the improved SQL", "changes": "markdown list of each change and why it helps"}
|
|
124
|
+
PROMPT
|
|
125
|
+
{ role: "user", content: sql }
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
result = AiClient.new.chat(messages: messages)
|
|
129
|
+
render json: result
|
|
130
|
+
rescue StandardError => e
|
|
131
|
+
render json: { error: "Rewrite failed: #{e.message}" }, status: :unprocessable_entity
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def index_advisor
|
|
135
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
136
|
+
sql = params[:sql].to_s.strip
|
|
137
|
+
explain_rows = Array(params[:explain_rows]).map { |row| row.respond_to?(:values) ? row.values : Array(row) }
|
|
138
|
+
return render json: { error: "SQL and EXPLAIN output are required." }, status: :unprocessable_entity if sql.blank? || explain_rows.blank?
|
|
139
|
+
|
|
140
|
+
connection = ActiveRecord::Base.connection
|
|
141
|
+
tables_in_query = SqlValidator.extract_table_references(sql, connection)
|
|
142
|
+
|
|
143
|
+
index_detail = tables_in_query.map do |t|
|
|
144
|
+
indexes = connection.indexes(t).map { |idx| "#{idx.unique ? 'UNIQUE ' : ''}INDEX #{idx.name} (#{idx.columns.join(', ')})" }
|
|
145
|
+
stats = connection.exec_query("SELECT INDEX_NAME, COLUMN_NAME, CARDINALITY, SEQ_IN_INDEX FROM information_schema.STATISTICS WHERE TABLE_SCHEMA = #{connection.quote(connection.current_database)} AND TABLE_NAME = #{connection.quote(t)} ORDER BY INDEX_NAME, SEQ_IN_INDEX")
|
|
146
|
+
cardinality = stats.rows.map { |r| "#{r[0]}.#{r[1]}: cardinality=#{r[2]}" }.join(", ")
|
|
147
|
+
row_count = connection.exec_query("SELECT TABLE_ROWS FROM information_schema.tables WHERE table_schema = #{connection.quote(connection.current_database)} AND table_name = #{connection.quote(t)}").rows.first&.first
|
|
148
|
+
"Table: #{t} (~#{row_count} rows)\nIndexes: #{indexes.any? ? indexes.join('; ') : 'NONE'}\nCardinality: #{cardinality}"
|
|
149
|
+
end.join("\n\n")
|
|
150
|
+
|
|
151
|
+
messages = [
|
|
152
|
+
{ role: "system", content: <<~PROMPT },
|
|
153
|
+
You are a MySQL index advisor. Given a query, its EXPLAIN output, and current index/cardinality information, suggest optimal indexes. Consider:
|
|
154
|
+
- Composite index column ordering (most selective first, or matching query order)
|
|
155
|
+
- Covering indexes to avoid table lookups
|
|
156
|
+
- Partial indexes for long string columns
|
|
157
|
+
- Write-side costs (if this is a high-write table, note the INSERT/UPDATE overhead)
|
|
158
|
+
- Whether existing indexes could be extended rather than creating new ones
|
|
159
|
+
#{ai_domain_context}
|
|
160
|
+
|
|
161
|
+
Respond with JSON: {"indexes": "markdown-formatted recommendations with exact CREATE INDEX statements, rationale for column ordering, and estimated impact. Include any indexes that should be DROPPED as part of the change."}
|
|
162
|
+
PROMPT
|
|
163
|
+
{ role: "user", content: "Query:\n#{sql}\n\nEXPLAIN:\n#{explain_rows.map { |r| r.join(' | ') }.join("\n")}\n\nCurrent Indexes:\n#{index_detail}" }
|
|
164
|
+
]
|
|
165
|
+
|
|
166
|
+
result = AiClient.new.chat(messages: messages)
|
|
167
|
+
render json: result
|
|
168
|
+
rescue StandardError => e
|
|
169
|
+
render json: { error: "Index advisor failed: #{e.message}" }, status: :unprocessable_entity
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
def anomaly_detection
|
|
173
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
174
|
+
connection = ActiveRecord::Base.connection
|
|
175
|
+
|
|
176
|
+
# Gather recent slow queries
|
|
177
|
+
slow_data = []
|
|
178
|
+
if mysql_genius_config.redis_url
|
|
179
|
+
redis = Redis.new(url: mysql_genius_config.redis_url)
|
|
180
|
+
raw = redis.lrange(SlowQueryMonitor.redis_key, 0, 99)
|
|
181
|
+
slow_data = raw.map { |e| JSON.parse(e) rescue nil }.compact
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Gather top query stats
|
|
185
|
+
stats = []
|
|
186
|
+
begin
|
|
187
|
+
results = connection.exec_query(<<~SQL)
|
|
188
|
+
SELECT DIGEST_TEXT, COUNT_STAR AS calls,
|
|
189
|
+
ROUND(SUM_TIMER_WAIT / 1000000000, 1) AS total_time_ms,
|
|
190
|
+
ROUND(AVG_TIMER_WAIT / 1000000000, 1) AS avg_time_ms,
|
|
191
|
+
SUM_ROWS_EXAMINED AS rows_examined, SUM_ROWS_SENT AS rows_sent,
|
|
192
|
+
FIRST_SEEN, LAST_SEEN
|
|
193
|
+
FROM performance_schema.events_statements_summary_by_digest
|
|
194
|
+
WHERE SCHEMA_NAME = #{connection.quote(connection.current_database)}
|
|
195
|
+
AND DIGEST_TEXT IS NOT NULL
|
|
196
|
+
ORDER BY SUM_TIMER_WAIT DESC LIMIT 30
|
|
197
|
+
SQL
|
|
198
|
+
stats = results.rows.map { |r| { sql: r[0].to_s.truncate(200), calls: r[1], total_ms: r[2], avg_ms: r[3], rows_examined: r[4], rows_sent: r[5], first_seen: r[6], last_seen: r[7] } }
|
|
199
|
+
rescue
|
|
200
|
+
# performance_schema may not be available
|
|
201
|
+
end
|
|
202
|
+
|
|
203
|
+
slow_summary = slow_data.first(50).map { |q| "#{q['duration_ms']}ms @ #{q['timestamp']}: #{q['sql'].to_s.truncate(150)}" }.join("\n")
|
|
204
|
+
stats_summary = stats.map { |q| "calls=#{q[:calls]} avg=#{q[:avg_ms]}ms total=#{q[:total_ms]}ms exam=#{q[:rows_examined]} sent=#{q[:rows_sent]}: #{q[:sql]}" }.join("\n")
|
|
205
|
+
|
|
206
|
+
messages = [
|
|
207
|
+
{ role: "system", content: <<~PROMPT },
|
|
208
|
+
You are a MySQL query anomaly detector. Analyze the following query data and identify:
|
|
209
|
+
1. Queries with degrading performance (high avg time relative to complexity)
|
|
210
|
+
2. N+1 query patterns (same template called many times in short windows)
|
|
211
|
+
3. Full table scans (rows_examined >> rows_sent)
|
|
212
|
+
4. Sudden new query patterns that may indicate code changes
|
|
213
|
+
5. Queries creating excessive temp tables or sorts
|
|
214
|
+
#{ai_domain_context}
|
|
215
|
+
|
|
216
|
+
Respond with JSON: {"report": "markdown-formatted health report organized by severity. For each finding, explain the issue, affected query, and recommended fix."}
|
|
217
|
+
PROMPT
|
|
218
|
+
{ role: "user", content: "Recent Slow Queries (last #{slow_data.size}):\n#{slow_summary.presence || 'None captured'}\n\nTop Queries by Total Time:\n#{stats_summary.presence || 'Not available'}" }
|
|
219
|
+
]
|
|
220
|
+
|
|
221
|
+
result = AiClient.new.chat(messages: messages)
|
|
222
|
+
render json: result
|
|
223
|
+
rescue StandardError => e
|
|
224
|
+
render json: { error: "Anomaly detection failed: #{e.message}" }, status: :unprocessable_entity
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
def root_cause
|
|
228
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
229
|
+
connection = ActiveRecord::Base.connection
|
|
230
|
+
|
|
231
|
+
# PROCESSLIST
|
|
232
|
+
processlist = connection.exec_query("SHOW FULL PROCESSLIST")
|
|
233
|
+
process_info = processlist.rows.map { |r| "ID=#{r[0]} User=#{r[1]} Host=#{r[2]} DB=#{r[3]} Command=#{r[4]} Time=#{r[5]}s State=#{r[6]} SQL=#{r[7].to_s.truncate(200)}" }.join("\n")
|
|
234
|
+
|
|
235
|
+
# Key status variables
|
|
236
|
+
status_rows = connection.exec_query("SHOW GLOBAL STATUS")
|
|
237
|
+
status = {}
|
|
238
|
+
status_rows.each { |r| status[(r["Variable_name"] || r["variable_name"]).to_s] = (r["Value"] || r["value"]).to_s }
|
|
239
|
+
|
|
240
|
+
key_stats = %w[Threads_connected Threads_running Innodb_row_lock_waits Innodb_row_lock_current_waits
|
|
241
|
+
Innodb_buffer_pool_reads Innodb_buffer_pool_read_requests Slow_queries Created_tmp_disk_tables
|
|
242
|
+
Connections Aborted_connects].map { |k| "#{k}=#{status[k]}" }.join(", ")
|
|
243
|
+
|
|
244
|
+
# InnoDB status (truncated)
|
|
245
|
+
innodb_status = ""
|
|
246
|
+
begin
|
|
247
|
+
result = connection.exec_query("SHOW ENGINE INNODB STATUS")
|
|
248
|
+
innodb_status = result.rows.first&.last.to_s.truncate(3000)
|
|
249
|
+
rescue
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
# Recent slow queries
|
|
253
|
+
slow_summary = ""
|
|
254
|
+
if mysql_genius_config.redis_url
|
|
255
|
+
redis = Redis.new(url: mysql_genius_config.redis_url)
|
|
256
|
+
raw = redis.lrange(SlowQueryMonitor.redis_key, 0, 19)
|
|
257
|
+
slows = raw.map { |e| JSON.parse(e) rescue nil }.compact
|
|
258
|
+
slow_summary = slows.map { |q| "#{q['duration_ms']}ms: #{q['sql'].to_s.truncate(150)}" }.join("\n")
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
messages = [
|
|
262
|
+
{ role: "system", content: <<~PROMPT },
|
|
263
|
+
You are a MySQL incident responder. The user is asking "why is the database slow right now?" Analyze the provided data and give a root cause diagnosis. Consider:
|
|
264
|
+
- Lock contention (row locks, metadata locks, table locks)
|
|
265
|
+
- Long-running queries blocking others
|
|
266
|
+
- Connection exhaustion
|
|
267
|
+
- Buffer pool thrashing (low hit rate)
|
|
268
|
+
- Disk I/O saturation
|
|
269
|
+
- Replication lag
|
|
270
|
+
- Unusual query patterns
|
|
271
|
+
#{ai_domain_context}
|
|
272
|
+
|
|
273
|
+
Respond with JSON: {"diagnosis": "markdown-formatted root cause analysis. Start with a 1-2 sentence summary, then detailed findings. Include specific actionable steps to resolve the issue."}
|
|
274
|
+
PROMPT
|
|
275
|
+
{ role: "user", content: "PROCESSLIST:\n#{process_info}\n\nKey Status:\n#{key_stats}\n\nInnoDB Status (excerpt):\n#{innodb_status.presence || 'Not available'}\n\nRecent Slow Queries:\n#{slow_summary.presence || 'None captured'}" }
|
|
276
|
+
]
|
|
277
|
+
|
|
278
|
+
result = AiClient.new.chat(messages: messages)
|
|
279
|
+
render json: result
|
|
280
|
+
rescue StandardError => e
|
|
281
|
+
render json: { error: "Root cause analysis failed: #{e.message}" }, status: :unprocessable_entity
|
|
282
|
+
end
|
|
283
|
+
|
|
284
|
+
def migration_risk
|
|
285
|
+
return ai_not_configured unless mysql_genius_config.ai_enabled?
|
|
286
|
+
migration_sql = params[:migration].to_s.strip
|
|
287
|
+
return render json: { error: "Migration SQL or Ruby code is required." }, status: :unprocessable_entity if migration_sql.blank?
|
|
288
|
+
|
|
289
|
+
connection = ActiveRecord::Base.connection
|
|
290
|
+
|
|
291
|
+
# Try to identify tables mentioned in the migration
|
|
292
|
+
table_names = migration_sql.scan(/(?:create_table|add_column|remove_column|add_index|remove_index|rename_column|change_column|alter\s+table)\s+[:\"]?(\w+)/i).flatten.uniq
|
|
293
|
+
table_names += migration_sql.scan(/ALTER\s+TABLE\s+`?(\w+)`?/i).flatten
|
|
294
|
+
|
|
295
|
+
table_info = table_names.uniq.map do |t|
|
|
296
|
+
next unless connection.tables.include?(t)
|
|
297
|
+
row_count = connection.exec_query("SELECT TABLE_ROWS FROM information_schema.tables WHERE table_schema = #{connection.quote(connection.current_database)} AND table_name = #{connection.quote(t)}").rows.first&.first
|
|
298
|
+
indexes = connection.indexes(t).map { |idx| "#{idx.name} (#{idx.columns.join(', ')})" }
|
|
299
|
+
"Table: #{t} (~#{row_count} rows, #{indexes.size} indexes)"
|
|
300
|
+
end.compact.join("\n")
|
|
301
|
+
|
|
302
|
+
# Current active queries on those tables
|
|
303
|
+
active = ""
|
|
304
|
+
begin
|
|
305
|
+
results = connection.exec_query(<<~SQL)
|
|
306
|
+
SELECT DIGEST_TEXT, COUNT_STAR AS calls, ROUND(AVG_TIMER_WAIT / 1000000000, 1) AS avg_ms
|
|
307
|
+
FROM performance_schema.events_statements_summary_by_digest
|
|
308
|
+
WHERE SCHEMA_NAME = #{connection.quote(connection.current_database)}
|
|
309
|
+
AND DIGEST_TEXT IS NOT NULL
|
|
310
|
+
AND COUNT_STAR > 10
|
|
311
|
+
ORDER BY COUNT_STAR DESC LIMIT 20
|
|
312
|
+
SQL
|
|
313
|
+
matching = results.rows.select { |r| table_names.any? { |t| r[0].to_s.downcase.include?(t.downcase) } }
|
|
314
|
+
active = matching.map { |r| "calls=#{r[1]} avg=#{r[2]}ms: #{r[0].to_s.truncate(200)}" }.join("\n")
|
|
315
|
+
rescue
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
messages = [
|
|
319
|
+
{ role: "system", content: <<~PROMPT },
|
|
320
|
+
You are a MySQL migration risk assessor. Given a Rails migration or DDL, evaluate:
|
|
321
|
+
1. Will this lock the table? For how long given the row count?
|
|
322
|
+
2. Is this safe to run during traffic, or does it need a maintenance window?
|
|
323
|
+
3. Should pt-online-schema-change or gh-ost be used instead?
|
|
324
|
+
4. Will it break or degrade any of the active queries against this table?
|
|
325
|
+
5. Are there any data loss risks?
|
|
326
|
+
6. What is the recommended deployment strategy?
|
|
327
|
+
#{ai_domain_context}
|
|
328
|
+
|
|
329
|
+
Respond with JSON: {"risk_level": "low|medium|high|critical", "assessment": "markdown-formatted risk assessment with specific recommendations and estimated lock duration"}
|
|
330
|
+
PROMPT
|
|
331
|
+
{ role: "user", content: "Migration:\n#{migration_sql}\n\nAffected Tables:\n#{table_info.presence || 'Could not determine'}\n\nActive Queries on These Tables:\n#{active.presence || 'None found or performance_schema unavailable'}" }
|
|
332
|
+
]
|
|
333
|
+
|
|
334
|
+
result = AiClient.new.chat(messages: messages)
|
|
335
|
+
render json: result
|
|
336
|
+
rescue StandardError => e
|
|
337
|
+
render json: { error: "Migration risk assessment failed: #{e.message}" }, status: :unprocessable_entity
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
private
|
|
341
|
+
|
|
342
|
+
def ai_not_configured
|
|
343
|
+
render json: { error: "AI features are not configured." }, status: :not_found
|
|
344
|
+
end
|
|
345
|
+
|
|
346
|
+
def ai_domain_context
|
|
347
|
+
ctx = mysql_genius_config.ai_system_context
|
|
348
|
+
ctx.present? ? "\nDomain context:\n#{ctx}" : ""
|
|
349
|
+
end
|
|
350
|
+
|
|
351
|
+
def build_schema_for_query(sql)
|
|
352
|
+
connection = ActiveRecord::Base.connection
|
|
353
|
+
tables = SqlValidator.extract_table_references(sql, connection)
|
|
354
|
+
tables.map do |t|
|
|
355
|
+
cols = connection.columns(t).map { |c| "#{c.name} (#{c.type})" }
|
|
356
|
+
"#{t}: #{cols.join(', ')}"
|
|
357
|
+
end.join("\n")
|
|
358
|
+
end
|
|
359
|
+
end
|
|
360
|
+
end
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
module MysqlGenius
|
|
2
|
+
module DatabaseAnalysis
|
|
3
|
+
extend ActiveSupport::Concern
|
|
4
|
+
|
|
5
|
+
def duplicate_indexes
|
|
6
|
+
connection = ActiveRecord::Base.connection
|
|
7
|
+
duplicates = []
|
|
8
|
+
|
|
9
|
+
queryable_tables.each do |table|
|
|
10
|
+
indexes = connection.indexes(table)
|
|
11
|
+
next if indexes.size < 2
|
|
12
|
+
|
|
13
|
+
indexes.each do |idx|
|
|
14
|
+
indexes.each do |other|
|
|
15
|
+
next if idx.name == other.name
|
|
16
|
+
# idx is duplicate if its columns are a left-prefix of other's columns
|
|
17
|
+
if idx.columns.size <= other.columns.size &&
|
|
18
|
+
other.columns.first(idx.columns.size) == idx.columns &&
|
|
19
|
+
!(idx.unique && !other.unique) # don't drop a unique index covered by a non-unique one
|
|
20
|
+
duplicates << {
|
|
21
|
+
table: table,
|
|
22
|
+
duplicate_index: idx.name,
|
|
23
|
+
duplicate_columns: idx.columns,
|
|
24
|
+
covered_by_index: other.name,
|
|
25
|
+
covered_by_columns: other.columns,
|
|
26
|
+
unique: idx.unique
|
|
27
|
+
}
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Deduplicate (A covers B and B covers A when columns are identical -- keep only one)
|
|
34
|
+
seen = Set.new
|
|
35
|
+
duplicates = duplicates.reject do |d|
|
|
36
|
+
key = [d[:table], [d[:duplicate_index], d[:covered_by_index]].sort].flatten.join(":")
|
|
37
|
+
seen.include?(key) ? true : (seen.add(key); false)
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
render json: duplicates
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def table_sizes
|
|
44
|
+
connection = ActiveRecord::Base.connection
|
|
45
|
+
db_name = connection.current_database
|
|
46
|
+
|
|
47
|
+
results = connection.exec_query(<<~SQL)
|
|
48
|
+
SELECT
|
|
49
|
+
table_name,
|
|
50
|
+
table_rows,
|
|
51
|
+
ROUND(data_length / 1024 / 1024, 2) AS data_mb,
|
|
52
|
+
ROUND(index_length / 1024 / 1024, 2) AS index_mb,
|
|
53
|
+
ROUND((data_length + index_length) / 1024 / 1024, 2) AS total_mb,
|
|
54
|
+
ROUND(data_free / 1024 / 1024, 2) AS fragmented_mb
|
|
55
|
+
FROM information_schema.tables
|
|
56
|
+
WHERE table_schema = #{connection.quote(db_name)}
|
|
57
|
+
AND table_type = 'BASE TABLE'
|
|
58
|
+
ORDER BY (data_length + index_length) DESC
|
|
59
|
+
SQL
|
|
60
|
+
|
|
61
|
+
tables = results.map do |row|
|
|
62
|
+
{
|
|
63
|
+
table: row["table_name"] || row["TABLE_NAME"],
|
|
64
|
+
rows: row["table_rows"] || row["TABLE_ROWS"],
|
|
65
|
+
data_mb: row["data_mb"].to_f,
|
|
66
|
+
index_mb: row["index_mb"].to_f,
|
|
67
|
+
total_mb: row["total_mb"].to_f,
|
|
68
|
+
fragmented_mb: row["fragmented_mb"].to_f
|
|
69
|
+
}
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
render json: tables
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
def query_stats
|
|
76
|
+
connection = ActiveRecord::Base.connection
|
|
77
|
+
sort = %w[total_time avg_time calls rows_examined].include?(params[:sort]) ? params[:sort] : "total_time"
|
|
78
|
+
|
|
79
|
+
order_clause = case sort
|
|
80
|
+
when "total_time" then "SUM_TIMER_WAIT DESC"
|
|
81
|
+
when "avg_time" then "AVG_TIMER_WAIT DESC"
|
|
82
|
+
when "calls" then "COUNT_STAR DESC"
|
|
83
|
+
when "rows_examined" then "SUM_ROWS_EXAMINED DESC"
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
results = connection.exec_query(<<~SQL)
|
|
87
|
+
SELECT
|
|
88
|
+
DIGEST_TEXT,
|
|
89
|
+
COUNT_STAR AS calls,
|
|
90
|
+
ROUND(SUM_TIMER_WAIT / 1000000000, 1) AS total_time_ms,
|
|
91
|
+
ROUND(AVG_TIMER_WAIT / 1000000000, 1) AS avg_time_ms,
|
|
92
|
+
ROUND(MAX_TIMER_WAIT / 1000000000, 1) AS max_time_ms,
|
|
93
|
+
SUM_ROWS_EXAMINED AS rows_examined,
|
|
94
|
+
SUM_ROWS_SENT AS rows_sent,
|
|
95
|
+
SUM_CREATED_TMP_DISK_TABLES AS tmp_disk_tables,
|
|
96
|
+
SUM_SORT_ROWS AS sort_rows,
|
|
97
|
+
FIRST_SEEN,
|
|
98
|
+
LAST_SEEN
|
|
99
|
+
FROM performance_schema.events_statements_summary_by_digest
|
|
100
|
+
WHERE SCHEMA_NAME = #{connection.quote(connection.current_database)}
|
|
101
|
+
AND DIGEST_TEXT IS NOT NULL
|
|
102
|
+
AND DIGEST_TEXT NOT LIKE 'EXPLAIN%'
|
|
103
|
+
ORDER BY #{order_clause}
|
|
104
|
+
LIMIT 50
|
|
105
|
+
SQL
|
|
106
|
+
|
|
107
|
+
queries = results.map do |row|
|
|
108
|
+
digest = row["DIGEST_TEXT"] || row["digest_text"] || ""
|
|
109
|
+
calls = (row["calls"] || row["CALLS"] || 0).to_i
|
|
110
|
+
rows_examined = (row["rows_examined"] || row["ROWS_EXAMINED"] || 0).to_i
|
|
111
|
+
rows_sent = (row["rows_sent"] || row["ROWS_SENT"] || 0).to_i
|
|
112
|
+
{
|
|
113
|
+
sql: digest.truncate(500),
|
|
114
|
+
calls: calls,
|
|
115
|
+
total_time_ms: row["total_time_ms"].to_f,
|
|
116
|
+
avg_time_ms: row["avg_time_ms"].to_f,
|
|
117
|
+
max_time_ms: row["max_time_ms"].to_f,
|
|
118
|
+
rows_examined: rows_examined,
|
|
119
|
+
rows_sent: rows_sent,
|
|
120
|
+
rows_ratio: rows_sent > 0 ? (rows_examined.to_f / rows_sent).round(1) : 0,
|
|
121
|
+
tmp_disk_tables: (row["tmp_disk_tables"] || row["TMP_DISK_TABLES"] || 0).to_i,
|
|
122
|
+
sort_rows: (row["sort_rows"] || row["SORT_ROWS"] || 0).to_i,
|
|
123
|
+
first_seen: row["FIRST_SEEN"] || row["first_seen"],
|
|
124
|
+
last_seen: row["LAST_SEEN"] || row["last_seen"]
|
|
125
|
+
}
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
render json: queries
|
|
129
|
+
rescue ActiveRecord::StatementInvalid => e
|
|
130
|
+
render json: { error: "Query statistics require performance_schema to be enabled. #{e.message.split(':').last.strip}" }, status: :unprocessable_entity
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
def unused_indexes
|
|
134
|
+
connection = ActiveRecord::Base.connection
|
|
135
|
+
db_name = connection.current_database
|
|
136
|
+
|
|
137
|
+
results = connection.exec_query(<<~SQL)
|
|
138
|
+
SELECT
|
|
139
|
+
s.OBJECT_SCHEMA AS table_schema,
|
|
140
|
+
s.OBJECT_NAME AS table_name,
|
|
141
|
+
s.INDEX_NAME AS index_name,
|
|
142
|
+
s.COUNT_READ AS reads,
|
|
143
|
+
s.COUNT_WRITE AS writes,
|
|
144
|
+
t.TABLE_ROWS AS table_rows
|
|
145
|
+
FROM performance_schema.table_io_waits_summary_by_index_usage s
|
|
146
|
+
JOIN information_schema.tables t
|
|
147
|
+
ON t.TABLE_SCHEMA = s.OBJECT_SCHEMA AND t.TABLE_NAME = s.OBJECT_NAME
|
|
148
|
+
WHERE s.OBJECT_SCHEMA = #{connection.quote(db_name)}
|
|
149
|
+
AND s.INDEX_NAME IS NOT NULL
|
|
150
|
+
AND s.INDEX_NAME != 'PRIMARY'
|
|
151
|
+
AND s.COUNT_READ = 0
|
|
152
|
+
AND t.TABLE_ROWS > 0
|
|
153
|
+
ORDER BY s.COUNT_WRITE DESC
|
|
154
|
+
SQL
|
|
155
|
+
|
|
156
|
+
indexes = results.map do |row|
|
|
157
|
+
table = row["table_name"] || row["TABLE_NAME"]
|
|
158
|
+
index_name = row["index_name"] || row["INDEX_NAME"]
|
|
159
|
+
{
|
|
160
|
+
table: table,
|
|
161
|
+
index_name: index_name,
|
|
162
|
+
reads: (row["reads"] || row["READS"] || 0).to_i,
|
|
163
|
+
writes: (row["writes"] || row["WRITES"] || 0).to_i,
|
|
164
|
+
table_rows: (row["table_rows"] || row["TABLE_ROWS"] || 0).to_i,
|
|
165
|
+
drop_sql: "ALTER TABLE `#{table}` DROP INDEX `#{index_name}`;"
|
|
166
|
+
}
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
render json: indexes
|
|
170
|
+
rescue ActiveRecord::StatementInvalid => e
|
|
171
|
+
render json: { error: "Unused index detection requires performance_schema. #{e.message.split(':').last.strip}" }, status: :unprocessable_entity
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def server_overview
|
|
175
|
+
connection = ActiveRecord::Base.connection
|
|
176
|
+
|
|
177
|
+
# Global status variables
|
|
178
|
+
status_rows = connection.exec_query("SHOW GLOBAL STATUS")
|
|
179
|
+
status = {}
|
|
180
|
+
status_rows.each { |r| status[(r["Variable_name"] || r["variable_name"]).to_s] = (r["Value"] || r["value"]).to_s }
|
|
181
|
+
|
|
182
|
+
# Global variables
|
|
183
|
+
vars_rows = connection.exec_query("SHOW GLOBAL VARIABLES")
|
|
184
|
+
vars = {}
|
|
185
|
+
vars_rows.each { |r| vars[(r["Variable_name"] || r["variable_name"]).to_s] = (r["Value"] || r["value"]).to_s }
|
|
186
|
+
|
|
187
|
+
version = connection.select_value("SELECT VERSION()")
|
|
188
|
+
uptime_seconds = status["Uptime"].to_i
|
|
189
|
+
|
|
190
|
+
days = uptime_seconds / 86400
|
|
191
|
+
hours = (uptime_seconds % 86400) / 3600
|
|
192
|
+
minutes = (uptime_seconds % 3600) / 60
|
|
193
|
+
|
|
194
|
+
max_conn = vars["max_connections"].to_i
|
|
195
|
+
current_conn = status["Threads_connected"].to_i
|
|
196
|
+
conn_pct = max_conn > 0 ? ((current_conn.to_f / max_conn) * 100).round(1) : 0
|
|
197
|
+
|
|
198
|
+
buffer_pool_bytes = vars["innodb_buffer_pool_size"].to_i
|
|
199
|
+
buffer_pool_mb = (buffer_pool_bytes / 1024.0 / 1024.0).round(1)
|
|
200
|
+
|
|
201
|
+
# Buffer pool hit rate
|
|
202
|
+
reads = status["Innodb_buffer_pool_read_requests"].to_f
|
|
203
|
+
disk_reads = status["Innodb_buffer_pool_reads"].to_f
|
|
204
|
+
hit_rate = reads > 0 ? (((reads - disk_reads) / reads) * 100).round(2) : 0
|
|
205
|
+
|
|
206
|
+
# Tmp tables
|
|
207
|
+
tmp_tables = status["Created_tmp_tables"].to_i
|
|
208
|
+
tmp_disk_tables = status["Created_tmp_disk_tables"].to_i
|
|
209
|
+
tmp_disk_pct = tmp_tables > 0 ? ((tmp_disk_tables.to_f / tmp_tables) * 100).round(1) : 0
|
|
210
|
+
|
|
211
|
+
# Slow queries from MySQL's own counter
|
|
212
|
+
slow_queries = status["Slow_queries"].to_i
|
|
213
|
+
|
|
214
|
+
# Questions (total queries)
|
|
215
|
+
questions = status["Questions"].to_i
|
|
216
|
+
qps = uptime_seconds > 0 ? (questions.to_f / uptime_seconds).round(1) : 0
|
|
217
|
+
|
|
218
|
+
render json: {
|
|
219
|
+
server: {
|
|
220
|
+
version: version,
|
|
221
|
+
uptime: "#{days}d #{hours}h #{minutes}m",
|
|
222
|
+
uptime_seconds: uptime_seconds
|
|
223
|
+
},
|
|
224
|
+
connections: {
|
|
225
|
+
max: max_conn,
|
|
226
|
+
current: current_conn,
|
|
227
|
+
usage_pct: conn_pct,
|
|
228
|
+
threads_running: status["Threads_running"].to_i,
|
|
229
|
+
threads_cached: status["Threads_cached"].to_i,
|
|
230
|
+
threads_created: status["Threads_created"].to_i,
|
|
231
|
+
aborted_connects: status["Aborted_connects"].to_i,
|
|
232
|
+
aborted_clients: status["Aborted_clients"].to_i,
|
|
233
|
+
max_used: status["Max_used_connections"].to_i
|
|
234
|
+
},
|
|
235
|
+
innodb: {
|
|
236
|
+
buffer_pool_mb: buffer_pool_mb,
|
|
237
|
+
buffer_pool_hit_rate: hit_rate,
|
|
238
|
+
buffer_pool_pages_dirty: status["Innodb_buffer_pool_pages_dirty"].to_i,
|
|
239
|
+
buffer_pool_pages_free: status["Innodb_buffer_pool_pages_free"].to_i,
|
|
240
|
+
buffer_pool_pages_total: status["Innodb_buffer_pool_pages_total"].to_i,
|
|
241
|
+
row_lock_waits: status["Innodb_row_lock_waits"].to_i,
|
|
242
|
+
row_lock_time_ms: (status["Innodb_row_lock_time"].to_f).round(0)
|
|
243
|
+
},
|
|
244
|
+
queries: {
|
|
245
|
+
questions: questions,
|
|
246
|
+
qps: qps,
|
|
247
|
+
slow_queries: slow_queries,
|
|
248
|
+
tmp_tables: tmp_tables,
|
|
249
|
+
tmp_disk_tables: tmp_disk_tables,
|
|
250
|
+
tmp_disk_pct: tmp_disk_pct,
|
|
251
|
+
select_full_join: status["Select_full_join"].to_i,
|
|
252
|
+
sort_merge_passes: status["Sort_merge_passes"].to_i
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
rescue => e
|
|
256
|
+
render json: { error: "Failed to load server overview: #{e.message}" }, status: :unprocessable_entity
|
|
257
|
+
end
|
|
258
|
+
end
|
|
259
|
+
end
|