ctrlcode 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. ctrlcode/__init__.py +8 -0
  2. ctrlcode/agents/__init__.py +29 -0
  3. ctrlcode/agents/cleanup.py +388 -0
  4. ctrlcode/agents/communication.py +439 -0
  5. ctrlcode/agents/observability.py +421 -0
  6. ctrlcode/agents/react_loop.py +297 -0
  7. ctrlcode/agents/registry.py +211 -0
  8. ctrlcode/agents/result_parser.py +242 -0
  9. ctrlcode/agents/workflow.py +723 -0
  10. ctrlcode/analysis/__init__.py +28 -0
  11. ctrlcode/analysis/ast_diff.py +163 -0
  12. ctrlcode/analysis/bug_detector.py +149 -0
  13. ctrlcode/analysis/code_graphs.py +329 -0
  14. ctrlcode/analysis/semantic.py +205 -0
  15. ctrlcode/analysis/static.py +183 -0
  16. ctrlcode/analysis/synthesizer.py +281 -0
  17. ctrlcode/analysis/tests.py +189 -0
  18. ctrlcode/cleanup/__init__.py +16 -0
  19. ctrlcode/cleanup/auto_merge.py +350 -0
  20. ctrlcode/cleanup/doc_gardening.py +388 -0
  21. ctrlcode/cleanup/pr_automation.py +330 -0
  22. ctrlcode/cleanup/scheduler.py +356 -0
  23. ctrlcode/config.py +380 -0
  24. ctrlcode/embeddings/__init__.py +6 -0
  25. ctrlcode/embeddings/embedder.py +192 -0
  26. ctrlcode/embeddings/vector_store.py +213 -0
  27. ctrlcode/fuzzing/__init__.py +24 -0
  28. ctrlcode/fuzzing/analyzer.py +280 -0
  29. ctrlcode/fuzzing/budget.py +112 -0
  30. ctrlcode/fuzzing/context.py +665 -0
  31. ctrlcode/fuzzing/context_fuzzer.py +506 -0
  32. ctrlcode/fuzzing/derived_orchestrator.py +732 -0
  33. ctrlcode/fuzzing/oracle_adapter.py +135 -0
  34. ctrlcode/linters/__init__.py +11 -0
  35. ctrlcode/linters/hand_rolled_utils.py +221 -0
  36. ctrlcode/linters/yolo_parsing.py +217 -0
  37. ctrlcode/metrics/__init__.py +6 -0
  38. ctrlcode/metrics/dashboard.py +283 -0
  39. ctrlcode/metrics/tech_debt.py +663 -0
  40. ctrlcode/paths.py +68 -0
  41. ctrlcode/permissions.py +179 -0
  42. ctrlcode/providers/__init__.py +15 -0
  43. ctrlcode/providers/anthropic.py +138 -0
  44. ctrlcode/providers/base.py +77 -0
  45. ctrlcode/providers/openai.py +197 -0
  46. ctrlcode/providers/parallel.py +104 -0
  47. ctrlcode/server.py +871 -0
  48. ctrlcode/session/__init__.py +6 -0
  49. ctrlcode/session/baseline.py +57 -0
  50. ctrlcode/session/manager.py +967 -0
  51. ctrlcode/skills/__init__.py +10 -0
  52. ctrlcode/skills/builtin/commit.toml +29 -0
  53. ctrlcode/skills/builtin/docs.toml +25 -0
  54. ctrlcode/skills/builtin/refactor.toml +33 -0
  55. ctrlcode/skills/builtin/review.toml +28 -0
  56. ctrlcode/skills/builtin/test.toml +28 -0
  57. ctrlcode/skills/loader.py +111 -0
  58. ctrlcode/skills/registry.py +139 -0
  59. ctrlcode/storage/__init__.py +19 -0
  60. ctrlcode/storage/history_db.py +708 -0
  61. ctrlcode/tools/__init__.py +220 -0
  62. ctrlcode/tools/bash.py +112 -0
  63. ctrlcode/tools/browser.py +352 -0
  64. ctrlcode/tools/executor.py +153 -0
  65. ctrlcode/tools/explore.py +486 -0
  66. ctrlcode/tools/mcp.py +108 -0
  67. ctrlcode/tools/observability.py +561 -0
  68. ctrlcode/tools/registry.py +193 -0
  69. ctrlcode/tools/todo.py +291 -0
  70. ctrlcode/tools/update.py +266 -0
  71. ctrlcode/tools/webfetch.py +147 -0
  72. ctrlcode-0.1.0.dist-info/METADATA +93 -0
  73. ctrlcode-0.1.0.dist-info/RECORD +75 -0
  74. ctrlcode-0.1.0.dist-info/WHEEL +4 -0
  75. ctrlcode-0.1.0.dist-info/entry_points.txt +3 -0
ctrlcode/tools/mcp.py ADDED
@@ -0,0 +1,108 @@
1
+ """MCP client for tool integration."""
2
+
3
+ from typing import Any, Optional
4
+ from dataclasses import dataclass
5
+ from contextlib import AsyncExitStack
6
+
7
+ from mcp import ClientSession, StdioServerParameters
8
+ from mcp.client.stdio import stdio_client
9
+
10
+
11
+ @dataclass
12
+ class MCPTool:
13
+ """MCP tool definition."""
14
+
15
+ name: str
16
+ description: str
17
+ input_schema: dict[str, Any]
18
+ server_name: str
19
+
20
+
21
+ class MCPClient:
22
+ """Client for MCP server communication."""
23
+
24
+ def __init__(self, server_config: dict[str, Any]):
25
+ """
26
+ Initialize MCP client.
27
+
28
+ Args:
29
+ server_config: Server configuration with command, args, env
30
+ """
31
+ self.server_name = server_config.get("name", "unknown")
32
+ self.server_params = StdioServerParameters(
33
+ command=server_config["command"][0],
34
+ args=server_config["command"][1:],
35
+ env=server_config.get("env"),
36
+ )
37
+ self.session: Optional[ClientSession] = None
38
+ self.read = None
39
+ self.write = None
40
+ self._exit_stack: Optional[AsyncExitStack] = None
41
+
42
+ async def start(self) -> None:
43
+ """Start MCP server and initialize session."""
44
+ self._exit_stack = AsyncExitStack()
45
+
46
+ # Enter the stdio_client context and keep it alive
47
+ streams = await self._exit_stack.enter_async_context(
48
+ stdio_client(self.server_params) # type: ignore[arg-type]
49
+ )
50
+ self.read, self.write = streams
51
+
52
+ # Initialize session
53
+ self.session = ClientSession(self.read, self.write) # type: ignore[arg-type]
54
+ await self.session.initialize()
55
+
56
+ async def stop(self) -> None:
57
+ """Stop MCP server and close session."""
58
+ if self._exit_stack:
59
+ await self._exit_stack.aclose()
60
+ self._exit_stack = None
61
+ self.session = None
62
+
63
+ async def call_tool(self, name: str, arguments: dict[str, Any]) -> Any:
64
+ """
65
+ Execute tool via MCP protocol.
66
+
67
+ Args:
68
+ name: Tool name
69
+ arguments: Tool arguments
70
+
71
+ Returns:
72
+ Tool result
73
+ """
74
+ if not self.session:
75
+ raise RuntimeError("MCP session not initialized")
76
+
77
+ result = await self.session.call_tool(name, arguments)
78
+ return result.content
79
+
80
+ async def list_tools(self) -> list[MCPTool]:
81
+ """
82
+ Get available tools from server.
83
+
84
+ Returns:
85
+ List of available tools
86
+ """
87
+ if not self.session:
88
+ raise RuntimeError("MCP session not initialized")
89
+
90
+ tools_result = await self.session.list_tools()
91
+ return [
92
+ MCPTool(
93
+ name=tool.name,
94
+ description=tool.description or "",
95
+ input_schema=tool.inputSchema,
96
+ server_name=self.server_name,
97
+ )
98
+ for tool in tools_result.tools
99
+ ]
100
+
101
+ async def __aenter__(self):
102
+ """Async context manager entry."""
103
+ await self.start()
104
+ return self
105
+
106
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
107
+ """Async context manager exit."""
108
+ await self.stop()
@@ -0,0 +1,561 @@
1
+ """Observability tools for Executor agent."""
2
+
3
+ import json
4
+ import logging
5
+ import re
6
+ from datetime import datetime, timedelta
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class ObservabilityTools:
14
+ """Observability tools for log and metrics analysis."""
15
+
16
+ def __init__(self, log_dir: str | Path | None = None):
17
+ """
18
+ Initialize observability tools.
19
+
20
+ Args:
21
+ log_dir: Directory containing log files (defaults to .ctrlcode/logs/)
22
+ """
23
+ if log_dir is None:
24
+ log_dir = Path.cwd() / ".ctrlcode" / "logs"
25
+ self.log_dir = Path(log_dir)
26
+
27
+ def query_logs(
28
+ self,
29
+ query: str | None = None,
30
+ time_range: str = "1h",
31
+ log_dir: str | None = None,
32
+ max_results: int = 100,
33
+ ) -> dict[str, Any]:
34
+ """
35
+ Query application logs with LogQL-style filtering.
36
+
37
+ Supports simple LogQL-style queries for filtering logs:
38
+ - `{key="value"}` - Filter by key-value pairs
39
+ - `|= "text"` - Contains text
40
+ - `!= "text"` - Doesn't contain text
41
+
42
+ Args:
43
+ query: LogQL-style query string (e.g., '{level="ERROR"} |= "database"')
44
+ time_range: Time range to search (e.g., "1h", "30m", "1d")
45
+ log_dir: Directory containing log files (defaults to .ctrlcode/logs/)
46
+ max_results: Maximum number of log entries to return
47
+
48
+ Returns:
49
+ Dict with:
50
+ - matches: List of matching log entries
51
+ - count: Number of matches
52
+ - query: Original query
53
+ - time_range: Time range searched
54
+
55
+ Examples:
56
+ # Find all errors
57
+ query_logs('{level="ERROR"}')
58
+
59
+ # Find failed login attempts
60
+ query_logs('{event="auth.login.failed"}')
61
+
62
+ # Find database errors in last 30 minutes
63
+ query_logs('{level="ERROR"} |= "database"', time_range="30m")
64
+
65
+ # Find logs for specific user
66
+ query_logs('{user_id="123"}', time_range="1d")
67
+ """
68
+ # Determine log directory
69
+ if log_dir is None:
70
+ log_path = self.log_dir
71
+ else:
72
+ log_path = Path(log_dir)
73
+
74
+ if not log_path.exists():
75
+ logger.warning(f"Log directory not found: {log_path}")
76
+ return {
77
+ "matches": [],
78
+ "count": 0,
79
+ "query": query,
80
+ "time_range": time_range,
81
+ "error": f"Log directory not found: {log_path}"
82
+ }
83
+
84
+ # Parse time range
85
+ try:
86
+ cutoff_time = _parse_time_range(time_range)
87
+ except ValueError as e:
88
+ return {
89
+ "matches": [],
90
+ "count": 0,
91
+ "query": query,
92
+ "time_range": time_range,
93
+ "error": str(e)
94
+ }
95
+
96
+ # Find log files
97
+ log_files = sorted(log_path.glob("*.log"), key=lambda p: p.stat().st_mtime, reverse=True)
98
+
99
+ if not log_files:
100
+ return {
101
+ "matches": [],
102
+ "count": 0,
103
+ "query": query,
104
+ "time_range": time_range,
105
+ "error": "No log files found"
106
+ }
107
+
108
+ # Parse query
109
+ filters = _parse_query(query) if query else {}
110
+
111
+ # Search logs
112
+ matches = []
113
+ for log_file in log_files:
114
+ try:
115
+ with open(log_file) as f:
116
+ for line in f:
117
+ line = line.strip()
118
+ if not line:
119
+ continue
120
+
121
+ # Try to parse as JSON (structured log)
122
+ try:
123
+ entry = json.loads(line)
124
+ except json.JSONDecodeError:
125
+ # Fallback: treat as plain text
126
+ entry = {
127
+ "timestamp": None,
128
+ "level": "UNKNOWN",
129
+ "message": line
130
+ }
131
+
132
+ # Check time range
133
+ if cutoff_time and "timestamp" in entry:
134
+ try:
135
+ log_time = datetime.fromisoformat(entry["timestamp"].replace("Z", "+00:00"))
136
+ if log_time < cutoff_time:
137
+ continue
138
+ except (ValueError, AttributeError):
139
+ pass # Include logs with unparseable timestamps
140
+
141
+ # Apply filters
142
+ if _matches_filters(entry, filters):
143
+ matches.append(entry)
144
+
145
+ if len(matches) >= max_results:
146
+ break
147
+
148
+ if len(matches) >= max_results:
149
+ break
150
+
151
+ except Exception as e:
152
+ logger.warning(f"Error reading log file {log_file}: {e}")
153
+
154
+ return {
155
+ "matches": matches,
156
+ "count": len(matches),
157
+ "query": query,
158
+ "time_range": time_range,
159
+ "searched_files": len(log_files),
160
+ }
161
+
162
+ def query_metrics(
163
+ self,
164
+ query: str | None = None,
165
+ time_range: str = "1h",
166
+ metrics_file: str | None = None,
167
+ ) -> dict[str, Any]:
168
+ """
169
+ Query performance metrics with PromQL-style filtering.
170
+
171
+ Simplified PromQL support for basic metric queries:
172
+ - Metric name selection
173
+ - Rate calculations
174
+ - Aggregations (avg, sum, max, min)
175
+
176
+ Args:
177
+ query: PromQL-style query (e.g., 'rate(http_requests_total[5m])')
178
+ time_range: Time range to query (e.g., "1h", "30m")
179
+ metrics_file: Path to metrics file (defaults to .ctrlcode/logs/metrics.jsonl)
180
+
181
+ Returns:
182
+ Dict with:
183
+ - values: List of metric values
184
+ - aggregates: Summary statistics
185
+ - query: Original query
186
+ - time_range: Time range queried
187
+
188
+ Examples:
189
+ # Get request rate
190
+ query_metrics(query='rate(http_requests_total[5m])')
191
+
192
+ # Get average latency
193
+ query_metrics(query='avg(http_request_duration_ms)')
194
+
195
+ # Get p95 latency (if pre-calculated)
196
+ query_metrics(query='http_request_duration_ms_p95')
197
+ """
198
+ # Determine metrics file
199
+ if metrics_file is None:
200
+ metrics_path = self.log_dir / "metrics.jsonl"
201
+ else:
202
+ metrics_path = Path(metrics_file)
203
+
204
+ if not metrics_path.exists():
205
+ # Try to extract metrics from test output or benchmarks
206
+ return self._extract_metrics_from_output(query, time_range)
207
+
208
+ # Parse time range
209
+ try:
210
+ cutoff_time = _parse_time_range(time_range)
211
+ except ValueError as e:
212
+ return {
213
+ "values": [],
214
+ "aggregates": {},
215
+ "query": query,
216
+ "time_range": time_range,
217
+ "error": str(e)
218
+ }
219
+
220
+ # Parse query
221
+ metric_name, operation = _parse_metric_query(query) if query else (None, None)
222
+
223
+ # Read metrics
224
+ values = []
225
+ try:
226
+ with open(metrics_path) as f:
227
+ for line in f:
228
+ line = line.strip()
229
+ if not line:
230
+ continue
231
+
232
+ try:
233
+ entry = json.loads(line)
234
+ except json.JSONDecodeError:
235
+ continue
236
+
237
+ # Check time range
238
+ if "timestamp" in entry:
239
+ try:
240
+ metric_time = datetime.fromisoformat(entry["timestamp"].replace("Z", "+00:00"))
241
+ if metric_time < cutoff_time:
242
+ continue
243
+ except (ValueError, AttributeError):
244
+ pass
245
+
246
+ # Extract metric value
247
+ if metric_name and metric_name in entry:
248
+ values.append(float(entry[metric_name]))
249
+ elif not metric_name:
250
+ # No specific metric, collect all numeric values
251
+ for key, value in entry.items():
252
+ if isinstance(value, (int, float)) and key != "timestamp":
253
+ values.append(float(value))
254
+
255
+ except Exception as e:
256
+ return {
257
+ "values": [],
258
+ "aggregates": {},
259
+ "query": query,
260
+ "time_range": time_range,
261
+ "error": f"Error reading metrics: {e}"
262
+ }
263
+
264
+ # Calculate aggregates
265
+ aggregates = {}
266
+ if values:
267
+ aggregates = {
268
+ "count": len(values),
269
+ "avg": sum(values) / len(values),
270
+ "min": min(values),
271
+ "max": max(values),
272
+ "sum": sum(values),
273
+ }
274
+
275
+ # Calculate percentiles if enough data
276
+ if len(values) >= 10:
277
+ sorted_values = sorted(values)
278
+ aggregates["p50"] = sorted_values[len(sorted_values) // 2]
279
+ aggregates["p95"] = sorted_values[int(len(sorted_values) * 0.95)]
280
+ aggregates["p99"] = sorted_values[int(len(sorted_values) * 0.99)]
281
+
282
+ return {
283
+ "values": values,
284
+ "aggregates": aggregates,
285
+ "query": query,
286
+ "time_range": time_range,
287
+ "metric_name": metric_name,
288
+ "operation": operation,
289
+ }
290
+
291
+ def _extract_metrics_from_output(self, query: str | None, time_range: str) -> dict[str, Any]:
292
+ """
293
+ Extract metrics from test output or benchmark results as fallback.
294
+
295
+ When no metrics file exists, try to parse from:
296
+ - pytest output
297
+ - benchmark results
298
+ - load test output (ab, wrk)
299
+ """
300
+ # Try to find recent test output or benchmark files
301
+ possible_files = [
302
+ self.log_dir / "test_output.log",
303
+ self.log_dir / "benchmark.txt",
304
+ self.log_dir.parent / "test_output.log",
305
+ ]
306
+
307
+ for file_path in possible_files:
308
+ if file_path.exists():
309
+ try:
310
+ content = file_path.read_text()
311
+
312
+ # Parse latency from wrk output
313
+ latency_match = re.search(r"Latency\s+(\d+\.?\d*)ms", content)
314
+ if latency_match:
315
+ return {
316
+ "values": [float(latency_match.group(1))],
317
+ "aggregates": {"latency_ms": float(latency_match.group(1))},
318
+ "query": query,
319
+ "time_range": time_range,
320
+ "source": "wrk_output"
321
+ }
322
+
323
+ # Parse from ab (Apache Bench) output
324
+ ab_time_match = re.search(r"Time per request:\s+(\d+\.?\d*)", content)
325
+ if ab_time_match:
326
+ return {
327
+ "values": [float(ab_time_match.group(1))],
328
+ "aggregates": {"time_per_request_ms": float(ab_time_match.group(1))},
329
+ "query": query,
330
+ "time_range": time_range,
331
+ "source": "ab_output"
332
+ }
333
+
334
+ except Exception:
335
+ continue
336
+
337
+ return {
338
+ "values": [],
339
+ "aggregates": {},
340
+ "query": query,
341
+ "time_range": time_range,
342
+ "error": "No metrics file found and could not extract from test output"
343
+ }
344
+
345
+
346
+ def _parse_time_range(time_range: str) -> datetime:
347
+ """
348
+ Parse time range string to cutoff datetime.
349
+
350
+ Args:
351
+ time_range: String like "1h", "30m", "1d", "2w"
352
+
353
+ Returns:
354
+ Datetime representing cutoff (now - range)
355
+
356
+ Raises:
357
+ ValueError: If time_range format invalid
358
+ """
359
+ match = re.match(r"(\d+)([smhdw])", time_range)
360
+ if not match:
361
+ raise ValueError(f"Invalid time range format: {time_range}. Expected format: <number><unit> (e.g., 1h, 30m)")
362
+
363
+ amount = int(match.group(1))
364
+ unit = match.group(2)
365
+
366
+ units = {
367
+ "s": "seconds",
368
+ "m": "minutes",
369
+ "h": "hours",
370
+ "d": "days",
371
+ "w": "weeks",
372
+ }
373
+
374
+ if unit not in units:
375
+ raise ValueError(f"Invalid time unit: {unit}. Supported: s, m, h, d, w")
376
+
377
+ delta = timedelta(**{units[unit]: amount})
378
+ return datetime.now() - delta
379
+
380
+
381
+ def _parse_query(query: str) -> dict[str, Any]:
382
+ """
383
+ Parse LogQL-style query into filters.
384
+
385
+ Supports:
386
+ - {key="value"} - Equality filters
387
+ - |= "text" - Contains text
388
+ - != "text" - Doesn't contain text
389
+
390
+ Args:
391
+ query: LogQL-style query string
392
+
393
+ Returns:
394
+ Dict with filter criteria
395
+ """
396
+ filters = {
397
+ "equals": {}, # {key: value}
398
+ "contains": [], # [text1, text2]
399
+ "excludes": [], # [text1, text2]
400
+ }
401
+
402
+ # Extract {key="value"} filters
403
+ label_pattern = r'\{([^}]+)\}'
404
+ label_matches = re.findall(label_pattern, query)
405
+
406
+ for label_match in label_matches:
407
+ # Parse key="value" pairs
408
+ pairs = re.findall(r'(\w+)="([^"]+)"', label_match)
409
+ for key, value in pairs:
410
+ filters["equals"][key] = value
411
+
412
+ # Extract |= "text" (contains)
413
+ contains_pattern = r'\|=\s*"([^"]+)"'
414
+ contains_matches = re.findall(contains_pattern, query)
415
+ filters["contains"].extend(contains_matches)
416
+
417
+ # Extract != "text" (excludes)
418
+ excludes_pattern = r'!=\s*"([^"]+)"'
419
+ excludes_matches = re.findall(excludes_pattern, query)
420
+ filters["excludes"].extend(excludes_matches)
421
+
422
+ return filters
423
+
424
+
425
+ def _matches_filters(entry: dict[str, Any], filters: dict[str, Any]) -> bool:
426
+ """
427
+ Check if log entry matches filter criteria.
428
+
429
+ Args:
430
+ entry: Log entry (dict)
431
+ filters: Filter criteria from _parse_query
432
+
433
+ Returns:
434
+ True if entry matches all filters
435
+ """
436
+ # Check equality filters
437
+ for key, expected_value in filters.get("equals", {}).items():
438
+ if key not in entry:
439
+ return False
440
+ if str(entry[key]) != expected_value:
441
+ return False
442
+
443
+ # Convert entry to string for text matching
444
+ entry_str = json.dumps(entry).lower()
445
+
446
+ # Check contains filters
447
+ for text in filters.get("contains", []):
448
+ if text.lower() not in entry_str:
449
+ return False
450
+
451
+ # Check excludes filters
452
+ for text in filters.get("excludes", []):
453
+ if text.lower() in entry_str:
454
+ return False
455
+
456
+ return True
457
+
458
+
459
+ def _parse_metric_query(query: str) -> tuple[str | None, str | None]:
460
+ """
461
+ Parse simple PromQL-style query into metric name and operation.
462
+
463
+ Supports basic patterns:
464
+ - metric_name
465
+ - rate(metric_name[5m])
466
+ - avg(metric_name)
467
+
468
+ Args:
469
+ query: PromQL-style query string
470
+
471
+ Returns:
472
+ Tuple of (metric_name, operation)
473
+ """
474
+ # Pattern: rate(metric_name[time_window])
475
+ rate_match = re.match(r'rate\((\w+)\[[\w\d]+\]\)', query)
476
+ if rate_match:
477
+ return rate_match.group(1), "rate"
478
+
479
+ # Pattern: aggregation(metric_name)
480
+ agg_match = re.match(r'(avg|sum|max|min)\((\w+)\)', query)
481
+ if agg_match:
482
+ return agg_match.group(2), agg_match.group(1)
483
+
484
+ # Simple metric name
485
+ if query and re.match(r'^\w+$', query):
486
+ return query, None
487
+
488
+ return None, None
489
+
490
+
491
+ OBSERVABILITY_TOOL_SCHEMAS = [
492
+ {
493
+ "name": "query_logs",
494
+ "description": """Query application logs with LogQL-style filtering.
495
+
496
+ Supports queries like:
497
+ - {level="ERROR"} - Find error logs
498
+ - {event="auth.login.failed"} - Find failed login attempts
499
+ - {user_id="123"} - Find logs for specific user
500
+ - {level="ERROR"} |= "database" - Find error logs containing "database"
501
+
502
+ Time ranges: 1h (1 hour), 30m (30 minutes), 1d (1 day), 1w (1 week)
503
+
504
+ Returns structured log entries with timestamp, level, event, and context fields.""",
505
+ "input_schema": {
506
+ "type": "object",
507
+ "properties": {
508
+ "query": {
509
+ "type": "string",
510
+ "description": 'LogQL-style query (e.g., \'{level="ERROR"} |= "database"\')'
511
+ },
512
+ "time_range": {
513
+ "type": "string",
514
+ "description": "Time range to search (e.g., 1h, 30m, 1d)",
515
+ "default": "1h"
516
+ },
517
+ "log_dir": {
518
+ "type": "string",
519
+ "description": "Directory containing log files (optional)"
520
+ },
521
+ "max_results": {
522
+ "type": "integer",
523
+ "description": "Maximum number of results to return",
524
+ "default": 100
525
+ }
526
+ },
527
+ }
528
+ },
529
+ {
530
+ "name": "query_metrics",
531
+ "description": """Query performance metrics with PromQL-style filtering.
532
+
533
+ Supports queries like:
534
+ - rate(http_requests_total[5m]) - Request rate over 5 minutes
535
+ - avg(http_request_duration_ms) - Average request latency
536
+ - http_request_duration_ms_p95 - 95th percentile latency
537
+
538
+ Time ranges: 1h (1 hour), 30m (30 minutes), 1d (1 day), 1w (1 week)
539
+
540
+ Returns metric values and aggregates (avg, min, max, p50, p95, p99).
541
+ Falls back to extracting metrics from test output if no metrics file exists.""",
542
+ "input_schema": {
543
+ "type": "object",
544
+ "properties": {
545
+ "query": {
546
+ "type": "string",
547
+ "description": 'PromQL-style query (e.g., "rate(http_requests_total[5m])")'
548
+ },
549
+ "time_range": {
550
+ "type": "string",
551
+ "description": "Time range to query (e.g., 1h, 30m, 1d)",
552
+ "default": "1h"
553
+ },
554
+ "metrics_file": {
555
+ "type": "string",
556
+ "description": "Path to metrics file (optional)"
557
+ }
558
+ },
559
+ }
560
+ }
561
+ ]