crackerjack 0.30.3__py3-none-any.whl → 0.31.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (156) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +227 -299
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +170 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +657 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +409 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +618 -928
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +585 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +826 -0
  40. crackerjack/dynamic_config.py +94 -103
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +433 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +443 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +114 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +621 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +372 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +217 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +565 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/coverage_improvement.py +223 -0
  107. crackerjack/orchestration/execution_strategies.py +341 -0
  108. crackerjack/orchestration/test_progress_streamer.py +636 -0
  109. crackerjack/plugins/__init__.py +15 -0
  110. crackerjack/plugins/base.py +200 -0
  111. crackerjack/plugins/hooks.py +246 -0
  112. crackerjack/plugins/loader.py +335 -0
  113. crackerjack/plugins/managers.py +259 -0
  114. crackerjack/py313.py +8 -3
  115. crackerjack/services/__init__.py +22 -0
  116. crackerjack/services/cache.py +314 -0
  117. crackerjack/services/config.py +358 -0
  118. crackerjack/services/config_integrity.py +99 -0
  119. crackerjack/services/contextual_ai_assistant.py +516 -0
  120. crackerjack/services/coverage_ratchet.py +356 -0
  121. crackerjack/services/debug.py +736 -0
  122. crackerjack/services/dependency_monitor.py +617 -0
  123. crackerjack/services/enhanced_filesystem.py +439 -0
  124. crackerjack/services/file_hasher.py +151 -0
  125. crackerjack/services/filesystem.py +421 -0
  126. crackerjack/services/git.py +176 -0
  127. crackerjack/services/health_metrics.py +611 -0
  128. crackerjack/services/initialization.py +873 -0
  129. crackerjack/services/log_manager.py +286 -0
  130. crackerjack/services/logging.py +174 -0
  131. crackerjack/services/metrics.py +578 -0
  132. crackerjack/services/pattern_cache.py +362 -0
  133. crackerjack/services/pattern_detector.py +515 -0
  134. crackerjack/services/performance_benchmarks.py +653 -0
  135. crackerjack/services/security.py +163 -0
  136. crackerjack/services/server_manager.py +234 -0
  137. crackerjack/services/smart_scheduling.py +144 -0
  138. crackerjack/services/tool_version_service.py +61 -0
  139. crackerjack/services/unified_config.py +437 -0
  140. crackerjack/services/version_checker.py +248 -0
  141. crackerjack/slash_commands/__init__.py +14 -0
  142. crackerjack/slash_commands/init.md +122 -0
  143. crackerjack/slash_commands/run.md +163 -0
  144. crackerjack/slash_commands/status.md +127 -0
  145. crackerjack-0.31.7.dist-info/METADATA +742 -0
  146. crackerjack-0.31.7.dist-info/RECORD +149 -0
  147. crackerjack-0.31.7.dist-info/entry_points.txt +2 -0
  148. crackerjack/.gitignore +0 -34
  149. crackerjack/.libcst.codemod.yaml +0 -18
  150. crackerjack/.pdm.toml +0 -1
  151. crackerjack/crackerjack.py +0 -3805
  152. crackerjack/pyproject.toml +0 -286
  153. crackerjack-0.30.3.dist-info/METADATA +0 -1290
  154. crackerjack-0.30.3.dist-info/RECORD +0 -16
  155. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
  156. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,578 @@
1
+ import json
2
+ import sqlite3
3
+ import threading
4
+ from contextlib import contextmanager
5
+ from datetime import date, datetime
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+
10
+ class MetricsCollector:
11
+ def __init__(self, db_path: Path | None = None) -> None:
12
+ if db_path is None:
13
+ db_dir = Path.home() / ".cache" / "crackerjack"
14
+ db_dir.mkdir(parents=True, exist_ok=True)
15
+ db_path = db_dir / "metrics.db"
16
+
17
+ self.db_path = db_path
18
+ self._lock = threading.Lock()
19
+ self._init_database()
20
+
21
+ def _init_database(self) -> None:
22
+ with self._get_connection() as conn:
23
+ conn.executescript("""
24
+ -- Jobs table
25
+ CREATE TABLE IF NOT EXISTS jobs (
26
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
27
+ job_id TEXT UNIQUE NOT NULL,
28
+ start_time TIMESTAMP NOT NULL,
29
+ end_time TIMESTAMP,
30
+ status TEXT NOT NULL, -- 'running', 'success', 'failed', 'cancelled'
31
+ iterations INTEGER DEFAULT 0,
32
+ ai_agent BOOLEAN DEFAULT 0,
33
+ error_message TEXT,
34
+ metadata TEXT -- JSON field for additional data
35
+ );
36
+
37
+ -- Errors table
38
+ CREATE TABLE IF NOT EXISTS errors (
39
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
40
+ job_id TEXT,
41
+ timestamp TIMESTAMP NOT NULL,
42
+ error_type TEXT NOT NULL, -- 'hook', 'test', 'lint', 'type_check', etc.
43
+ error_category TEXT, -- 'ruff', 'pyright', 'pytest', etc.
44
+ error_message TEXT,
45
+ file_path TEXT,
46
+ line_number INTEGER,
47
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
48
+ );
49
+
50
+ -- Hook executions table
51
+ CREATE TABLE IF NOT EXISTS hook_executions (
52
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
53
+ job_id TEXT,
54
+ timestamp TIMESTAMP NOT NULL,
55
+ hook_name TEXT NOT NULL,
56
+ hook_type TEXT, -- 'fast', 'comprehensive'
57
+ execution_time_ms INTEGER,
58
+ status TEXT, -- 'success', 'failed', 'skipped'
59
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
60
+ );
61
+
62
+ -- Test executions table
63
+ CREATE TABLE IF NOT EXISTS test_executions (
64
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
65
+ job_id TEXT,
66
+ timestamp TIMESTAMP NOT NULL,
67
+ total_tests INTEGER,
68
+ passed INTEGER,
69
+ failed INTEGER,
70
+ skipped INTEGER,
71
+ execution_time_ms INTEGER,
72
+ coverage_percent REAL,
73
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
74
+ );
75
+
76
+ -- Orchestration executions table (NEW)
77
+ CREATE TABLE IF NOT EXISTS orchestration_executions (
78
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
79
+ job_id TEXT,
80
+ timestamp TIMESTAMP NOT NULL,
81
+ execution_strategy TEXT NOT NULL, -- 'batch', 'individual', 'adaptive', 'selective'
82
+ progress_level TEXT NOT NULL, -- 'basic', 'detailed', 'granular', 'streaming'
83
+ ai_mode TEXT NOT NULL, -- 'single-agent', 'multi-agent', 'coordinator'
84
+ iteration_count INTEGER DEFAULT 1,
85
+ strategy_switches INTEGER DEFAULT 0, -- How many times strategy changed
86
+ correlation_insights TEXT, -- JSON of correlation analysis results
87
+ total_execution_time_ms INTEGER,
88
+ hooks_execution_time_ms INTEGER,
89
+ tests_execution_time_ms INTEGER,
90
+ ai_analysis_time_ms INTEGER,
91
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
92
+ );
93
+
94
+ -- Strategy decisions table (NEW)
95
+ CREATE TABLE IF NOT EXISTS strategy_decisions (
96
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
97
+ job_id TEXT,
98
+ iteration INTEGER,
99
+ timestamp TIMESTAMP NOT NULL,
100
+ previous_strategy TEXT,
101
+ selected_strategy TEXT NOT NULL,
102
+ decision_reason TEXT, -- Why this strategy was chosen
103
+ context_data TEXT, -- JSON of execution context
104
+ effectiveness_score REAL, -- How well the strategy worked (0-1)
105
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
106
+ );
107
+
108
+ -- Individual test executions table (NEW - more granular than test_executions)
109
+ CREATE TABLE IF NOT EXISTS individual_test_executions (
110
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
111
+ job_id TEXT,
112
+ timestamp TIMESTAMP NOT NULL,
113
+ test_id TEXT NOT NULL, -- Full test identifier
114
+ test_file TEXT NOT NULL,
115
+ test_class TEXT,
116
+ test_method TEXT,
117
+ status TEXT NOT NULL, -- 'passed', 'failed', 'skipped', 'error'
118
+ execution_time_ms INTEGER,
119
+ error_message TEXT,
120
+ error_traceback TEXT,
121
+ FOREIGN KEY (job_id) REFERENCES jobs(job_id)
122
+ );
123
+
124
+ -- Daily summary table (for quick stats)
125
+ CREATE TABLE IF NOT EXISTS daily_summary (
126
+ date DATE PRIMARY KEY,
127
+ total_jobs INTEGER DEFAULT 0,
128
+ successful_jobs INTEGER DEFAULT 0,
129
+ failed_jobs INTEGER DEFAULT 0,
130
+ total_errors INTEGER DEFAULT 0,
131
+ hook_errors INTEGER DEFAULT 0,
132
+ test_errors INTEGER DEFAULT 0,
133
+ lint_errors INTEGER DEFAULT 0,
134
+ type_errors INTEGER DEFAULT 0,
135
+ avg_job_duration_ms INTEGER,
136
+ total_ai_fixes INTEGER DEFAULT 0,
137
+ orchestrated_jobs INTEGER DEFAULT 0, -- NEW
138
+ avg_orchestration_iterations REAL DEFAULT 0, -- NEW
139
+ most_effective_strategy TEXT -- NEW
140
+ );
141
+
142
+ -- Create indexes for performance
143
+ CREATE INDEX IF NOT EXISTS idx_jobs_start_time ON jobs(start_time);
144
+ CREATE INDEX IF NOT EXISTS idx_errors_job_id ON errors(job_id);
145
+ CREATE INDEX IF NOT EXISTS idx_errors_type ON errors(error_type);
146
+ CREATE INDEX IF NOT EXISTS idx_hooks_job_id ON hook_executions(job_id);
147
+ CREATE INDEX IF NOT EXISTS idx_tests_job_id ON test_executions(job_id);
148
+ CREATE INDEX IF NOT EXISTS idx_orchestration_job_id ON orchestration_executions(job_id);
149
+ CREATE INDEX IF NOT EXISTS idx_strategy_decisions_job_id ON strategy_decisions(job_id);
150
+ CREATE INDEX IF NOT EXISTS idx_individual_tests_job_id ON individual_test_executions(job_id);
151
+ CREATE INDEX IF NOT EXISTS idx_strategy_decisions_strategy ON strategy_decisions(selected_strategy);
152
+ """)
153
+
154
+ @contextmanager
155
+ def _get_connection(self):
156
+ conn = sqlite3.connect(str(self.db_path))
157
+ conn.row_factory = sqlite3.Row
158
+ try:
159
+ yield conn
160
+ conn.commit()
161
+ except Exception:
162
+ conn.rollback()
163
+ raise
164
+ finally:
165
+ conn.close()
166
+
167
+ def start_job(
168
+ self,
169
+ job_id: str,
170
+ ai_agent: bool = False,
171
+ metadata: dict[str, Any] | None = None,
172
+ ) -> None:
173
+ with self._lock, self._get_connection() as conn:
174
+ conn.execute(
175
+ """
176
+ INSERT INTO jobs (job_id, start_time, status, ai_agent, metadata)
177
+ VALUES (?, ?, 'running', ?, ?)
178
+ """,
179
+ (job_id, datetime.now(), ai_agent, json.dumps(metadata or {})),
180
+ )
181
+
182
+ def end_job(
183
+ self,
184
+ job_id: str,
185
+ status: str,
186
+ iterations: int = 0,
187
+ error_message: str | None = None,
188
+ ) -> None:
189
+ with self._lock, self._get_connection() as conn:
190
+ conn.execute(
191
+ """
192
+ UPDATE jobs
193
+ SET end_time = ?, status = ?, iterations = ?, error_message = ?
194
+ WHERE job_id = ?
195
+ """,
196
+ (datetime.now(), status, iterations, error_message, job_id),
197
+ )
198
+
199
+ self._update_daily_summary(conn, datetime.now().date())
200
+
201
+ def record_error(
202
+ self,
203
+ job_id: str,
204
+ error_type: str,
205
+ error_category: str,
206
+ error_message: str,
207
+ file_path: str | None = None,
208
+ line_number: int | None = None,
209
+ ) -> None:
210
+ with self._lock, self._get_connection() as conn:
211
+ conn.execute(
212
+ """
213
+ INSERT INTO errors (job_id, timestamp, error_type, error_category,
214
+ error_message, file_path, line_number)
215
+ VALUES (?, ?, ?, ?, ?, ?, ?)
216
+ """,
217
+ (
218
+ job_id,
219
+ datetime.now(),
220
+ error_type,
221
+ error_category,
222
+ error_message,
223
+ file_path,
224
+ line_number,
225
+ ),
226
+ )
227
+
228
+ def record_hook_execution(
229
+ self,
230
+ job_id: str,
231
+ hook_name: str,
232
+ hook_type: str,
233
+ execution_time_ms: int,
234
+ status: str,
235
+ ) -> None:
236
+ with self._lock, self._get_connection() as conn:
237
+ conn.execute(
238
+ """
239
+ INSERT INTO hook_executions (job_id, timestamp, hook_name,
240
+ hook_type, execution_time_ms, status)
241
+ VALUES (?, ?, ?, ?, ?, ?)
242
+ """,
243
+ (
244
+ job_id,
245
+ datetime.now(),
246
+ hook_name,
247
+ hook_type,
248
+ execution_time_ms,
249
+ status,
250
+ ),
251
+ )
252
+
253
+ def record_test_execution(
254
+ self,
255
+ job_id: str,
256
+ total_tests: int,
257
+ passed: int,
258
+ failed: int,
259
+ skipped: int,
260
+ execution_time_ms: int,
261
+ coverage_percent: float | None = None,
262
+ ) -> None:
263
+ with self._lock, self._get_connection() as conn:
264
+ conn.execute(
265
+ """
266
+ INSERT INTO test_executions (job_id, timestamp, total_tests,
267
+ passed, failed, skipped,
268
+ execution_time_ms, coverage_percent)
269
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
270
+ """,
271
+ (
272
+ job_id,
273
+ datetime.now(),
274
+ total_tests,
275
+ passed,
276
+ failed,
277
+ skipped,
278
+ execution_time_ms,
279
+ coverage_percent,
280
+ ),
281
+ )
282
+
283
+ def record_orchestration_execution(
284
+ self,
285
+ job_id: str,
286
+ execution_strategy: str,
287
+ progress_level: str,
288
+ ai_mode: str,
289
+ iteration_count: int,
290
+ strategy_switches: int,
291
+ correlation_insights: dict[str, Any],
292
+ total_execution_time_ms: int,
293
+ hooks_execution_time_ms: int,
294
+ tests_execution_time_ms: int,
295
+ ai_analysis_time_ms: int,
296
+ ) -> None:
297
+ with self._lock, self._get_connection() as conn:
298
+ conn.execute(
299
+ """
300
+ INSERT INTO orchestration_executions
301
+ (job_id, timestamp, execution_strategy, progress_level, ai_mode,
302
+ iteration_count, strategy_switches, correlation_insights,
303
+ total_execution_time_ms, hooks_execution_time_ms,
304
+ tests_execution_time_ms, ai_analysis_time_ms)
305
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
306
+ """,
307
+ (
308
+ job_id,
309
+ datetime.now(),
310
+ execution_strategy,
311
+ progress_level,
312
+ ai_mode,
313
+ iteration_count,
314
+ strategy_switches,
315
+ json.dumps(correlation_insights),
316
+ total_execution_time_ms,
317
+ hooks_execution_time_ms,
318
+ tests_execution_time_ms,
319
+ ai_analysis_time_ms,
320
+ ),
321
+ )
322
+
323
+ def record_strategy_decision(
324
+ self,
325
+ job_id: str,
326
+ iteration: int,
327
+ previous_strategy: str | None,
328
+ selected_strategy: str,
329
+ decision_reason: str,
330
+ context_data: dict[str, Any],
331
+ effectiveness_score: float | None = None,
332
+ ) -> None:
333
+ with self._lock, self._get_connection() as conn:
334
+ conn.execute(
335
+ """
336
+ INSERT INTO strategy_decisions
337
+ (job_id, iteration, timestamp, previous_strategy, selected_strategy,
338
+ decision_reason, context_data, effectiveness_score)
339
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
340
+ """,
341
+ (
342
+ job_id,
343
+ iteration,
344
+ datetime.now(),
345
+ previous_strategy,
346
+ selected_strategy,
347
+ decision_reason,
348
+ json.dumps(context_data),
349
+ effectiveness_score,
350
+ ),
351
+ )
352
+
353
+ def record_individual_test(
354
+ self,
355
+ job_id: str,
356
+ test_id: str,
357
+ test_file: str,
358
+ test_class: str | None,
359
+ test_method: str | None,
360
+ status: str,
361
+ execution_time_ms: int | None,
362
+ error_message: str | None = None,
363
+ error_traceback: str | None = None,
364
+ ) -> None:
365
+ with self._lock, self._get_connection() as conn:
366
+ conn.execute(
367
+ """
368
+ INSERT INTO individual_test_executions
369
+ (job_id, timestamp, test_id, test_file, test_class, test_method,
370
+ status, execution_time_ms, error_message, error_traceback)
371
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
372
+ """,
373
+ (
374
+ job_id,
375
+ datetime.now(),
376
+ test_id,
377
+ test_file,
378
+ test_class,
379
+ test_method,
380
+ status,
381
+ execution_time_ms,
382
+ error_message,
383
+ error_traceback,
384
+ ),
385
+ )
386
+
387
+ def get_orchestration_stats(self) -> dict[str, Any]:
388
+ with self._get_connection() as conn:
389
+ strategy_stats = conn.execute("""
390
+ SELECT
391
+ selected_strategy,
392
+ COUNT(*) as usage_count,
393
+ AVG(effectiveness_score) as avg_effectiveness,
394
+ AVG(
395
+ SELECT iteration_count
396
+ FROM orchestration_executions o
397
+ WHERE o.job_id = sd.job_id
398
+ ) as avg_iterations_needed
399
+ FROM strategy_decisions sd
400
+ WHERE effectiveness_score IS NOT NULL
401
+ GROUP BY selected_strategy
402
+ ORDER BY avg_effectiveness DESC, usage_count DESC
403
+ """).fetchall()
404
+
405
+ correlation_patterns = conn.execute("""
406
+ SELECT
407
+ json_extract(correlation_insights, '$.problematic_hooks') as problematic_hooks,
408
+ COUNT(*) as frequency
409
+ FROM orchestration_executions
410
+ WHERE correlation_insights != 'null'
411
+ AND correlation_insights != '{}'
412
+ GROUP BY problematic_hooks
413
+ ORDER BY frequency DESC
414
+ LIMIT 10
415
+ """).fetchall()
416
+
417
+ performance_stats = conn.execute("""
418
+ SELECT
419
+ execution_strategy,
420
+ COUNT(*) as executions,
421
+ AVG(total_execution_time_ms) as avg_total_time,
422
+ AVG(hooks_execution_time_ms) as avg_hooks_time,
423
+ AVG(tests_execution_time_ms) as avg_tests_time,
424
+ AVG(ai_analysis_time_ms) as avg_ai_time,
425
+ AVG(iteration_count) as avg_iterations
426
+ FROM orchestration_executions
427
+ GROUP BY execution_strategy
428
+ """).fetchall()
429
+
430
+ test_failure_patterns = conn.execute("""
431
+ SELECT
432
+ test_file,
433
+ test_class,
434
+ test_method,
435
+ COUNT(*) as failure_count,
436
+ AVG(execution_time_ms) as avg_execution_time
437
+ FROM individual_test_executions
438
+ WHERE status = 'failed'
439
+ GROUP BY test_file, test_class, test_method
440
+ ORDER BY failure_count DESC
441
+ LIMIT 15
442
+ """).fetchall()
443
+
444
+ return {
445
+ "strategy_effectiveness": [dict(row) for row in strategy_stats],
446
+ "correlation_patterns": [dict(row) for row in correlation_patterns],
447
+ "performance_by_strategy": [dict(row) for row in performance_stats],
448
+ "test_failure_patterns": [dict(row) for row in test_failure_patterns],
449
+ }
450
+
451
+ def _update_daily_summary(
452
+ self,
453
+ conn: sqlite3.Connection,
454
+ date: date,
455
+ ) -> None:
456
+ job_stats = conn.execute(
457
+ """
458
+ SELECT
459
+ COUNT(*) as total_jobs,
460
+ SUM(CASE WHEN status = 'success' THEN 1 ELSE 0 END) as successful_jobs,
461
+ SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) as failed_jobs,
462
+ AVG(CASE
463
+ WHEN end_time IS NOT NULL
464
+ THEN (julianday(end_time) - julianday(start_time)) * 86400000
465
+ ELSE NULL
466
+ END) as avg_duration_ms,
467
+ SUM(CASE WHEN ai_agent = 1 AND status = 'success' THEN 1 ELSE 0 END) as ai_fixes
468
+ FROM jobs
469
+ WHERE DATE(start_time) = ?
470
+ """,
471
+ (date,),
472
+ ).fetchone()
473
+
474
+ error_stats = conn.execute(
475
+ """
476
+ SELECT
477
+ COUNT(*) as total_errors,
478
+ SUM(CASE WHEN error_type = 'hook' THEN 1 ELSE 0 END) as hook_errors,
479
+ SUM(CASE WHEN error_type = 'test' THEN 1 ELSE 0 END) as test_errors,
480
+ SUM(CASE WHEN error_type = 'lint' THEN 1 ELSE 0 END) as lint_errors,
481
+ SUM(CASE WHEN error_type = 'type_check' THEN 1 ELSE 0 END) as type_errors
482
+ FROM errors
483
+ WHERE DATE(timestamp) = ?
484
+ """,
485
+ (date,),
486
+ ).fetchone()
487
+
488
+ orchestration_stats = conn.execute(
489
+ """
490
+ SELECT
491
+ COUNT(*) as orchestrated_jobs,
492
+ AVG(iteration_count) as avg_iterations,
493
+ (SELECT selected_strategy
494
+ FROM strategy_decisions sd2
495
+ WHERE DATE(sd2.timestamp) = ?
496
+ GROUP BY selected_strategy
497
+ ORDER BY COUNT(*) DESC
498
+ LIMIT 1) as most_effective_strategy
499
+ FROM orchestration_executions
500
+ WHERE DATE(timestamp) = ?
501
+ """,
502
+ (date, date),
503
+ ).fetchone()
504
+
505
+ conn.execute(
506
+ """
507
+ INSERT OR REPLACE INTO daily_summary
508
+ (date, total_jobs, successful_jobs, failed_jobs, total_errors,
509
+ hook_errors, test_errors, lint_errors, type_errors,
510
+ avg_job_duration_ms, total_ai_fixes, orchestrated_jobs,
511
+ avg_orchestration_iterations, most_effective_strategy)
512
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
513
+ """,
514
+ (
515
+ date,
516
+ job_stats["total_jobs"] or 0,
517
+ job_stats["successful_jobs"] or 0,
518
+ job_stats["failed_jobs"] or 0,
519
+ error_stats["total_errors"] or 0,
520
+ error_stats["hook_errors"] or 0,
521
+ error_stats["test_errors"] or 0,
522
+ error_stats["lint_errors"] or 0,
523
+ error_stats["type_errors"] or 0,
524
+ int(job_stats["avg_duration_ms"] or 0),
525
+ job_stats["ai_fixes"] or 0,
526
+ orchestration_stats["orchestrated_jobs"] or 0,
527
+ float(orchestration_stats["avg_iterations"] or 0),
528
+ orchestration_stats["most_effective_strategy"],
529
+ ),
530
+ )
531
+
532
+ def get_all_time_stats(self) -> dict[str, Any]:
533
+ with self._get_connection() as conn:
534
+ job_stats = conn.execute("""
535
+ SELECT
536
+ COUNT(*) as total_jobs,
537
+ SUM(CASE WHEN status = 'success' THEN 1 ELSE 0 END) as successful_jobs,
538
+ SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) as failed_jobs,
539
+ SUM(CASE WHEN ai_agent = 1 THEN 1 ELSE 0 END) as ai_agent_jobs,
540
+ AVG(iterations) as avg_iterations
541
+ FROM jobs
542
+ """).fetchone()
543
+
544
+ error_stats = conn.execute("""
545
+ SELECT error_type, COUNT( * ) as count
546
+ FROM errors
547
+ GROUP BY error_type
548
+ """).fetchall()
549
+
550
+ common_errors = conn.execute("""
551
+ SELECT error_category, error_message, COUNT( * ) as count
552
+ FROM errors
553
+ GROUP BY error_category, error_message
554
+ ORDER BY count DESC
555
+ LIMIT 10
556
+ """).fetchall()
557
+
558
+ return {
559
+ "total_jobs": job_stats["total_jobs"] or 0,
560
+ "successful_jobs": job_stats["successful_jobs"] or 0,
561
+ "failed_jobs": job_stats["failed_jobs"] or 0,
562
+ "ai_agent_jobs": job_stats["ai_agent_jobs"] or 0,
563
+ "avg_iterations": float(job_stats["avg_iterations"] or 0),
564
+ "error_breakdown": {
565
+ row["error_type"]: row["count"] for row in error_stats
566
+ },
567
+ "common_errors": [dict(row) for row in common_errors],
568
+ }
569
+
570
+
571
+ _metrics_collector: MetricsCollector | None = None
572
+
573
+
574
+ def get_metrics_collector() -> MetricsCollector:
575
+ global _metrics_collector
576
+ if _metrics_collector is None:
577
+ _metrics_collector = MetricsCollector()
578
+ return _metrics_collector