logler 1.0.7__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logler/__init__.py +22 -0
- logler/bootstrap.py +57 -0
- logler/cache.py +75 -0
- logler/cli.py +589 -0
- logler/helpers.py +282 -0
- logler/investigate.py +3962 -0
- logler/llm_cli.py +1426 -0
- logler/log_reader.py +267 -0
- logler/parser.py +207 -0
- logler/safe_regex.py +124 -0
- logler/terminal.py +252 -0
- logler/tracker.py +138 -0
- logler/tree_formatter.py +807 -0
- logler/watcher.py +55 -0
- logler/web/__init__.py +3 -0
- logler/web/app.py +810 -0
- logler/web/static/css/tailwind.css +1 -0
- logler/web/static/css/tailwind.input.css +3 -0
- logler/web/static/logler-logo.png +0 -0
- logler/web/tailwind.config.cjs +9 -0
- logler/web/templates/index.html +1454 -0
- logler-1.0.7.dist-info/METADATA +584 -0
- logler-1.0.7.dist-info/RECORD +28 -0
- logler-1.0.7.dist-info/WHEEL +4 -0
- logler-1.0.7.dist-info/entry_points.txt +2 -0
- logler-1.0.7.dist-info/licenses/LICENSE +21 -0
- logler_rs/__init__.py +5 -0
- logler_rs/logler_rs.cp311-win_amd64.pyd +0 -0
logler/llm_cli.py
ADDED
|
@@ -0,0 +1,1426 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM-First CLI Module - Commands optimized for AI agents
|
|
3
|
+
|
|
4
|
+
Design principles:
|
|
5
|
+
- JSON output by default (no --json flag needed)
|
|
6
|
+
- No truncation - full data always
|
|
7
|
+
- Meaningful exit codes for chaining
|
|
8
|
+
- Rich metadata for LLM reasoning
|
|
9
|
+
- Deterministic output structure
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import click
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
import re
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional, List, Dict, Any
|
|
18
|
+
from datetime import datetime, timedelta
|
|
19
|
+
from collections import defaultdict
|
|
20
|
+
|
|
21
|
+
from .safe_regex import safe_compile, RegexTimeoutError, RegexPatternTooLongError
|
|
22
|
+
|
|
23
|
+
# Exit codes
|
|
24
|
+
EXIT_SUCCESS = 0 # Success with results
|
|
25
|
+
EXIT_NO_RESULTS = 1 # Success but no results found
|
|
26
|
+
EXIT_USER_ERROR = 2 # Invalid arguments, file not found
|
|
27
|
+
EXIT_INTERNAL_ERROR = 3 # Unexpected exception
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _output_json(data: Dict[str, Any], pretty: bool = False) -> None:
|
|
31
|
+
"""Output JSON to stdout."""
|
|
32
|
+
if pretty:
|
|
33
|
+
click.echo(json.dumps(data, indent=2, default=str))
|
|
34
|
+
else:
|
|
35
|
+
click.echo(json.dumps(data, default=str))
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _error_json(message: str, code: int = EXIT_USER_ERROR) -> None:
|
|
39
|
+
"""Output error as JSON and exit."""
|
|
40
|
+
_output_json({"error": message, "code": code})
|
|
41
|
+
sys.exit(code)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _parse_duration(duration: str) -> timedelta:
|
|
45
|
+
"""Parse duration string like '30m', '2h', '1d' to timedelta."""
|
|
46
|
+
match = re.match(r"^(\d+)(s|m|h|d)$", duration.lower())
|
|
47
|
+
if not match:
|
|
48
|
+
raise ValueError(f"Invalid duration format: {duration}. Use format like '30m', '2h', '1d'")
|
|
49
|
+
|
|
50
|
+
value = int(match.group(1))
|
|
51
|
+
unit = match.group(2)
|
|
52
|
+
|
|
53
|
+
if unit == "s":
|
|
54
|
+
return timedelta(seconds=value)
|
|
55
|
+
elif unit == "m":
|
|
56
|
+
return timedelta(minutes=value)
|
|
57
|
+
elif unit == "h":
|
|
58
|
+
return timedelta(hours=value)
|
|
59
|
+
elif unit == "d":
|
|
60
|
+
return timedelta(days=value)
|
|
61
|
+
else:
|
|
62
|
+
raise ValueError(f"Unknown time unit: {unit}")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _expand_globs(patterns: List[str]) -> List[str]:
|
|
66
|
+
"""Expand glob patterns to file paths."""
|
|
67
|
+
import glob
|
|
68
|
+
|
|
69
|
+
files = []
|
|
70
|
+
for pattern in patterns:
|
|
71
|
+
matches = glob.glob(pattern, recursive=True)
|
|
72
|
+
if matches:
|
|
73
|
+
files.extend(matches)
|
|
74
|
+
elif Path(pattern).exists():
|
|
75
|
+
files.append(pattern)
|
|
76
|
+
return sorted(set(files))
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@click.group()
|
|
80
|
+
def llm():
|
|
81
|
+
"""
|
|
82
|
+
LLM-first CLI commands - optimized for AI agents.
|
|
83
|
+
|
|
84
|
+
All commands output structured JSON by default.
|
|
85
|
+
No truncation - full data is always returned.
|
|
86
|
+
|
|
87
|
+
Exit codes:
|
|
88
|
+
0 - Success with results
|
|
89
|
+
1 - Success but no results found
|
|
90
|
+
2 - User error (invalid args, file not found)
|
|
91
|
+
3 - Internal error
|
|
92
|
+
"""
|
|
93
|
+
pass
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@llm.command()
|
|
97
|
+
@click.argument("files", nargs=-1, required=True)
|
|
98
|
+
@click.option("--sample-size", default=1000, help="Number of entries to analyze (default: 1000)")
|
|
99
|
+
@click.option("--full", is_flag=True, help="Analyze all entries (slow for large files)")
|
|
100
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
101
|
+
def schema(files: tuple, sample_size: int, full: bool, pretty: bool):
|
|
102
|
+
"""
|
|
103
|
+
Infer the structure/schema of log files.
|
|
104
|
+
|
|
105
|
+
Analyzes log files to determine available fields, formats,
|
|
106
|
+
and data patterns. Useful for understanding log structure
|
|
107
|
+
before running queries.
|
|
108
|
+
|
|
109
|
+
Example:
|
|
110
|
+
logler llm schema app.log worker.log
|
|
111
|
+
"""
|
|
112
|
+
from .parser import LogParser
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
file_list = _expand_globs(list(files))
|
|
116
|
+
if not file_list:
|
|
117
|
+
_error_json(f"No files found matching: {files}")
|
|
118
|
+
|
|
119
|
+
parser = LogParser()
|
|
120
|
+
|
|
121
|
+
# Track schema information
|
|
122
|
+
field_presence = defaultdict(int)
|
|
123
|
+
level_values = defaultdict(int)
|
|
124
|
+
format_counts = defaultdict(int)
|
|
125
|
+
thread_patterns = set()
|
|
126
|
+
correlation_patterns = set()
|
|
127
|
+
custom_fields = set()
|
|
128
|
+
timestamps = []
|
|
129
|
+
total_entries = 0
|
|
130
|
+
|
|
131
|
+
for file_path in file_list:
|
|
132
|
+
try:
|
|
133
|
+
with open(file_path, "r", errors="replace") as f:
|
|
134
|
+
for i, line in enumerate(f):
|
|
135
|
+
if not full and i >= sample_size:
|
|
136
|
+
break
|
|
137
|
+
|
|
138
|
+
line = line.rstrip()
|
|
139
|
+
if not line:
|
|
140
|
+
continue
|
|
141
|
+
|
|
142
|
+
entry = parser.parse_line(i + 1, line)
|
|
143
|
+
total_entries += 1
|
|
144
|
+
|
|
145
|
+
# Track field presence
|
|
146
|
+
if entry.timestamp:
|
|
147
|
+
field_presence["timestamp"] += 1
|
|
148
|
+
timestamps.append(entry.timestamp)
|
|
149
|
+
if entry.level:
|
|
150
|
+
field_presence["level"] += 1
|
|
151
|
+
level_values[str(entry.level)] += 1
|
|
152
|
+
if entry.message:
|
|
153
|
+
field_presence["message"] += 1
|
|
154
|
+
if entry.thread_id:
|
|
155
|
+
field_presence["thread_id"] += 1
|
|
156
|
+
thread_patterns.add(entry.thread_id)
|
|
157
|
+
if entry.correlation_id:
|
|
158
|
+
field_presence["correlation_id"] += 1
|
|
159
|
+
correlation_patterns.add(entry.correlation_id)
|
|
160
|
+
if entry.trace_id:
|
|
161
|
+
field_presence["trace_id"] += 1
|
|
162
|
+
if entry.span_id:
|
|
163
|
+
field_presence["span_id"] += 1
|
|
164
|
+
|
|
165
|
+
# Track format
|
|
166
|
+
format_name = getattr(entry, "format", None) or "Unknown"
|
|
167
|
+
format_counts[str(format_name)] += 1
|
|
168
|
+
|
|
169
|
+
# Track custom fields from extra
|
|
170
|
+
if hasattr(entry, "extra") and entry.extra:
|
|
171
|
+
for key in entry.extra.keys():
|
|
172
|
+
custom_fields.add(key)
|
|
173
|
+
|
|
174
|
+
except FileNotFoundError:
|
|
175
|
+
_error_json(f"File not found: {file_path}")
|
|
176
|
+
except PermissionError:
|
|
177
|
+
_error_json(f"Permission denied: {file_path}")
|
|
178
|
+
|
|
179
|
+
if total_entries == 0:
|
|
180
|
+
_output_json(
|
|
181
|
+
{
|
|
182
|
+
"files_analyzed": len(file_list),
|
|
183
|
+
"total_entries": 0,
|
|
184
|
+
"schema": {},
|
|
185
|
+
"error": "No log entries found",
|
|
186
|
+
},
|
|
187
|
+
pretty,
|
|
188
|
+
)
|
|
189
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
190
|
+
|
|
191
|
+
# Build schema output
|
|
192
|
+
schema_data = {}
|
|
193
|
+
for field, count in field_presence.items():
|
|
194
|
+
presence = count / total_entries
|
|
195
|
+
schema_data[field] = {"present": round(presence, 3)}
|
|
196
|
+
|
|
197
|
+
if field == "level":
|
|
198
|
+
schema_data[field]["values"] = list(level_values.keys())
|
|
199
|
+
elif field == "thread_id" and thread_patterns:
|
|
200
|
+
# Extract patterns from thread IDs
|
|
201
|
+
patterns = _extract_patterns(list(thread_patterns)[:100])
|
|
202
|
+
if patterns:
|
|
203
|
+
schema_data[field]["patterns"] = patterns
|
|
204
|
+
elif field == "correlation_id" and correlation_patterns:
|
|
205
|
+
patterns = _extract_patterns(list(correlation_patterns)[:100])
|
|
206
|
+
if patterns:
|
|
207
|
+
schema_data[field]["patterns"] = patterns
|
|
208
|
+
|
|
209
|
+
# Time range
|
|
210
|
+
time_range = None
|
|
211
|
+
if timestamps:
|
|
212
|
+
sorted_ts = sorted([t for t in timestamps if t])
|
|
213
|
+
if sorted_ts:
|
|
214
|
+
time_range = {"earliest": str(sorted_ts[0]), "latest": str(sorted_ts[-1])}
|
|
215
|
+
|
|
216
|
+
# Format distribution
|
|
217
|
+
format_dist = {}
|
|
218
|
+
for fmt, count in format_counts.items():
|
|
219
|
+
format_dist[fmt] = round(count / total_entries, 3)
|
|
220
|
+
|
|
221
|
+
result = {
|
|
222
|
+
"files_analyzed": len(file_list),
|
|
223
|
+
"files": file_list,
|
|
224
|
+
"total_entries": total_entries,
|
|
225
|
+
"sample_size": sample_size if not full else total_entries,
|
|
226
|
+
"schema": schema_data,
|
|
227
|
+
"detected_formats": format_dist,
|
|
228
|
+
"custom_fields": sorted(list(custom_fields)) if custom_fields else [],
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
if time_range:
|
|
232
|
+
result["time_range"] = time_range
|
|
233
|
+
|
|
234
|
+
_output_json(result, pretty)
|
|
235
|
+
sys.exit(EXIT_SUCCESS)
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _extract_patterns(values: List[str]) -> List[str]:
|
|
242
|
+
"""Extract regex-like patterns from a list of values."""
|
|
243
|
+
if not values:
|
|
244
|
+
return []
|
|
245
|
+
|
|
246
|
+
patterns = set()
|
|
247
|
+
|
|
248
|
+
# Common patterns
|
|
249
|
+
for val in values[:50]:
|
|
250
|
+
# worker-N pattern
|
|
251
|
+
if re.match(r"^[a-z]+-\d+$", val, re.I):
|
|
252
|
+
patterns.add(r"[a-z]+-\d+")
|
|
253
|
+
# UUID-like
|
|
254
|
+
elif re.match(r"^[a-f0-9-]{36}$", val, re.I):
|
|
255
|
+
patterns.add(r"uuid")
|
|
256
|
+
# req-xxx pattern
|
|
257
|
+
elif re.match(r"^req-[a-z0-9]+$", val, re.I):
|
|
258
|
+
patterns.add(r"req-[a-z0-9]+")
|
|
259
|
+
# trace-xxx pattern
|
|
260
|
+
elif re.match(r"^trace-[a-z0-9]+$", val, re.I):
|
|
261
|
+
patterns.add(r"trace-[a-z0-9]+")
|
|
262
|
+
else:
|
|
263
|
+
# Just add a sample
|
|
264
|
+
if len(patterns) < 5:
|
|
265
|
+
patterns.add(val)
|
|
266
|
+
|
|
267
|
+
return sorted(list(patterns))[:10]
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
@llm.command()
|
|
271
|
+
@click.argument("files", nargs=-1, required=True)
|
|
272
|
+
@click.option("--level", help="Filter by log level (ERROR, WARN, INFO, DEBUG)")
|
|
273
|
+
@click.option("--query", help="Regex pattern to match in message")
|
|
274
|
+
@click.option("--thread", help="Filter by thread ID")
|
|
275
|
+
@click.option("--correlation", help="Filter by correlation ID")
|
|
276
|
+
@click.option("--after", help="Only entries after this timestamp (ISO8601)")
|
|
277
|
+
@click.option("--before", help="Only entries before this timestamp (ISO8601)")
|
|
278
|
+
@click.option("--last", help="Only entries in last N duration (e.g., 30m, 2h)")
|
|
279
|
+
@click.option("--limit", type=int, help="Limit number of results")
|
|
280
|
+
@click.option("--context", type=int, default=0, help="Include N context lines")
|
|
281
|
+
@click.option("--include-raw/--no-raw", default=True, help="Include raw log line")
|
|
282
|
+
@click.option("--aggregate/--no-aggregate", default=True, help="Include aggregations")
|
|
283
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
284
|
+
def search(
|
|
285
|
+
files: tuple,
|
|
286
|
+
level: Optional[str],
|
|
287
|
+
query: Optional[str],
|
|
288
|
+
thread: Optional[str],
|
|
289
|
+
correlation: Optional[str],
|
|
290
|
+
after: Optional[str],
|
|
291
|
+
before: Optional[str],
|
|
292
|
+
last: Optional[str],
|
|
293
|
+
limit: Optional[int],
|
|
294
|
+
context: int,
|
|
295
|
+
include_raw: bool,
|
|
296
|
+
aggregate: bool,
|
|
297
|
+
pretty: bool,
|
|
298
|
+
):
|
|
299
|
+
"""
|
|
300
|
+
Search logs with full results - no truncation.
|
|
301
|
+
|
|
302
|
+
Returns complete search results with metadata.
|
|
303
|
+
Use --limit to restrict results if needed.
|
|
304
|
+
|
|
305
|
+
Example:
|
|
306
|
+
logler llm search app.log --level ERROR --query "timeout"
|
|
307
|
+
"""
|
|
308
|
+
from . import investigate
|
|
309
|
+
|
|
310
|
+
try:
|
|
311
|
+
file_list = _expand_globs(list(files))
|
|
312
|
+
if not file_list:
|
|
313
|
+
_error_json(f"No files found matching: {files}")
|
|
314
|
+
|
|
315
|
+
# Calculate time filters
|
|
316
|
+
after_ts = None
|
|
317
|
+
before_ts = None
|
|
318
|
+
|
|
319
|
+
if last:
|
|
320
|
+
try:
|
|
321
|
+
duration = _parse_duration(last)
|
|
322
|
+
before_ts = datetime.now()
|
|
323
|
+
after_ts = before_ts - duration
|
|
324
|
+
except ValueError as e:
|
|
325
|
+
_error_json(str(e))
|
|
326
|
+
else:
|
|
327
|
+
if after:
|
|
328
|
+
try:
|
|
329
|
+
after_ts = datetime.fromisoformat(after.replace("Z", "+00:00"))
|
|
330
|
+
except ValueError:
|
|
331
|
+
_error_json(f"Invalid timestamp format for --after: {after}")
|
|
332
|
+
if before:
|
|
333
|
+
try:
|
|
334
|
+
before_ts = datetime.fromisoformat(before.replace("Z", "+00:00"))
|
|
335
|
+
except ValueError:
|
|
336
|
+
_error_json(f"Invalid timestamp format for --before: {before}")
|
|
337
|
+
|
|
338
|
+
# Call search
|
|
339
|
+
result = investigate.search(
|
|
340
|
+
files=file_list,
|
|
341
|
+
query=query,
|
|
342
|
+
level=level,
|
|
343
|
+
thread_id=thread,
|
|
344
|
+
correlation_id=correlation,
|
|
345
|
+
limit=limit,
|
|
346
|
+
context_lines=context,
|
|
347
|
+
output_format="full",
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
# Build LLM-optimized output
|
|
351
|
+
results = result.get("results", [])
|
|
352
|
+
|
|
353
|
+
# Apply time filters if specified
|
|
354
|
+
if after_ts or before_ts:
|
|
355
|
+
filtered = []
|
|
356
|
+
for item in results:
|
|
357
|
+
entry = item.get("entry", {})
|
|
358
|
+
ts_str = entry.get("timestamp")
|
|
359
|
+
if ts_str:
|
|
360
|
+
try:
|
|
361
|
+
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00"))
|
|
362
|
+
if after_ts and ts < after_ts:
|
|
363
|
+
continue
|
|
364
|
+
if before_ts and ts > before_ts:
|
|
365
|
+
continue
|
|
366
|
+
except (ValueError, TypeError):
|
|
367
|
+
pass
|
|
368
|
+
filtered.append(item)
|
|
369
|
+
results = filtered
|
|
370
|
+
|
|
371
|
+
# Transform results
|
|
372
|
+
output_results = []
|
|
373
|
+
for item in results:
|
|
374
|
+
entry = item.get("entry", {})
|
|
375
|
+
out_entry = {
|
|
376
|
+
"file": entry.get("file", file_list[0] if len(file_list) == 1 else None),
|
|
377
|
+
"line_number": entry.get("line_number"),
|
|
378
|
+
"timestamp": entry.get("timestamp"),
|
|
379
|
+
"level": entry.get("level"),
|
|
380
|
+
"message": entry.get("message"),
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
# Optional fields
|
|
384
|
+
if entry.get("thread_id"):
|
|
385
|
+
out_entry["thread_id"] = entry["thread_id"]
|
|
386
|
+
if entry.get("correlation_id"):
|
|
387
|
+
out_entry["correlation_id"] = entry["correlation_id"]
|
|
388
|
+
if entry.get("trace_id"):
|
|
389
|
+
out_entry["trace_id"] = entry["trace_id"]
|
|
390
|
+
if entry.get("span_id"):
|
|
391
|
+
out_entry["span_id"] = entry["span_id"]
|
|
392
|
+
if include_raw and entry.get("raw"):
|
|
393
|
+
out_entry["raw"] = entry["raw"]
|
|
394
|
+
|
|
395
|
+
# Context if requested
|
|
396
|
+
if context > 0:
|
|
397
|
+
if item.get("context_before"):
|
|
398
|
+
out_entry["context_before"] = item["context_before"]
|
|
399
|
+
if item.get("context_after"):
|
|
400
|
+
out_entry["context_after"] = item["context_after"]
|
|
401
|
+
|
|
402
|
+
output_results.append(out_entry)
|
|
403
|
+
|
|
404
|
+
output = {
|
|
405
|
+
"query": {
|
|
406
|
+
"files": file_list,
|
|
407
|
+
"level": level,
|
|
408
|
+
"pattern": query,
|
|
409
|
+
"thread": thread,
|
|
410
|
+
"correlation": correlation,
|
|
411
|
+
},
|
|
412
|
+
"summary": {
|
|
413
|
+
"total_matches": len(output_results),
|
|
414
|
+
"files_searched": len(file_list),
|
|
415
|
+
},
|
|
416
|
+
"results": output_results,
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
# Add aggregations if requested
|
|
420
|
+
if aggregate and output_results:
|
|
421
|
+
agg_by_level = defaultdict(int)
|
|
422
|
+
agg_by_thread = defaultdict(int)
|
|
423
|
+
|
|
424
|
+
for r in output_results:
|
|
425
|
+
if r.get("level"):
|
|
426
|
+
agg_by_level[r["level"]] += 1
|
|
427
|
+
if r.get("thread_id"):
|
|
428
|
+
agg_by_thread[r["thread_id"]] += 1
|
|
429
|
+
|
|
430
|
+
output["aggregations"] = {
|
|
431
|
+
"by_level": dict(agg_by_level),
|
|
432
|
+
"by_thread": dict(agg_by_thread) if agg_by_thread else None,
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
_output_json(output, pretty)
|
|
436
|
+
|
|
437
|
+
if len(output_results) == 0:
|
|
438
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
439
|
+
else:
|
|
440
|
+
sys.exit(EXIT_SUCCESS)
|
|
441
|
+
|
|
442
|
+
except RuntimeError as e:
|
|
443
|
+
if "Rust backend" in str(e):
|
|
444
|
+
_error_json(
|
|
445
|
+
"Rust backend not available. Build with: maturin develop --release",
|
|
446
|
+
EXIT_INTERNAL_ERROR,
|
|
447
|
+
)
|
|
448
|
+
raise
|
|
449
|
+
except Exception as e:
|
|
450
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
@llm.command()
|
|
454
|
+
@click.argument("files", nargs=-1, required=True)
|
|
455
|
+
@click.option(
|
|
456
|
+
"--strategy",
|
|
457
|
+
type=click.Choice(["random", "diverse", "errors_focused", "head", "tail", "edges"]),
|
|
458
|
+
default="diverse",
|
|
459
|
+
help="Sampling strategy",
|
|
460
|
+
)
|
|
461
|
+
@click.option("--size", type=int, default=100, help="Sample size (default: 100)")
|
|
462
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
463
|
+
def sample(files: tuple, strategy: str, size: int, pretty: bool):
|
|
464
|
+
"""
|
|
465
|
+
Get a statistically representative sample of log entries.
|
|
466
|
+
|
|
467
|
+
Strategies:
|
|
468
|
+
random - Pure random sample
|
|
469
|
+
diverse - Cover all levels, threads, time ranges
|
|
470
|
+
errors_focused - Prioritize errors and warnings
|
|
471
|
+
head - First N entries
|
|
472
|
+
tail - Last N entries
|
|
473
|
+
edges - Boundaries and transitions
|
|
474
|
+
|
|
475
|
+
Example:
|
|
476
|
+
logler llm sample app.log --strategy errors_focused --size 50
|
|
477
|
+
"""
|
|
478
|
+
from . import investigate
|
|
479
|
+
|
|
480
|
+
try:
|
|
481
|
+
file_list = _expand_globs(list(files))
|
|
482
|
+
if not file_list:
|
|
483
|
+
_error_json(f"No files found matching: {files}")
|
|
484
|
+
|
|
485
|
+
result = investigate.smart_sample(files=file_list, strategy=strategy, sample_size=size)
|
|
486
|
+
|
|
487
|
+
# Build output
|
|
488
|
+
output = {
|
|
489
|
+
"population": {"total_entries": result.get("total_population", 0), "files": file_list},
|
|
490
|
+
"sample": {
|
|
491
|
+
"size": result.get("sample_size", 0),
|
|
492
|
+
"strategy": strategy,
|
|
493
|
+
},
|
|
494
|
+
"entries": [],
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
# Add coverage info if available
|
|
498
|
+
if "level_distribution" in result:
|
|
499
|
+
output["sample"]["coverage"] = {"levels": result["level_distribution"]}
|
|
500
|
+
|
|
501
|
+
# Transform entries
|
|
502
|
+
for entry in result.get("entries", []):
|
|
503
|
+
out_entry = {
|
|
504
|
+
"line_number": entry.get("line_number"),
|
|
505
|
+
"timestamp": entry.get("timestamp"),
|
|
506
|
+
"level": entry.get("level"),
|
|
507
|
+
"message": entry.get("message"),
|
|
508
|
+
}
|
|
509
|
+
if entry.get("thread_id"):
|
|
510
|
+
out_entry["thread_id"] = entry["thread_id"]
|
|
511
|
+
if entry.get("selection_reason"):
|
|
512
|
+
out_entry["selection_reason"] = entry["selection_reason"]
|
|
513
|
+
|
|
514
|
+
output["entries"].append(out_entry)
|
|
515
|
+
|
|
516
|
+
_output_json(output, pretty)
|
|
517
|
+
|
|
518
|
+
if len(output["entries"]) == 0:
|
|
519
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
520
|
+
else:
|
|
521
|
+
sys.exit(EXIT_SUCCESS)
|
|
522
|
+
|
|
523
|
+
except Exception as e:
|
|
524
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
@llm.command()
|
|
528
|
+
@click.argument("files", nargs=-1, required=True)
|
|
529
|
+
@click.option("--last", help="Analyze last N duration (e.g., 30m, 2h)")
|
|
530
|
+
@click.option("--after", help="Start timestamp (ISO8601)")
|
|
531
|
+
@click.option("--before", help="End timestamp (ISO8601)")
|
|
532
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
533
|
+
def triage(
|
|
534
|
+
files: tuple, last: Optional[str], after: Optional[str], before: Optional[str], pretty: bool
|
|
535
|
+
):
|
|
536
|
+
"""
|
|
537
|
+
Quick severity assessment for incident response.
|
|
538
|
+
|
|
539
|
+
Returns severity level, top issues, and suggested actions.
|
|
540
|
+
Designed for rapid initial assessment during incidents.
|
|
541
|
+
|
|
542
|
+
Example:
|
|
543
|
+
logler llm triage /var/log/app/*.log --last 1h
|
|
544
|
+
"""
|
|
545
|
+
from . import investigate
|
|
546
|
+
|
|
547
|
+
try:
|
|
548
|
+
file_list = _expand_globs(list(files))
|
|
549
|
+
if not file_list:
|
|
550
|
+
_error_json(f"No files found matching: {files}")
|
|
551
|
+
|
|
552
|
+
# Run auto-insights
|
|
553
|
+
result = investigate.analyze_with_insights(files=file_list, auto_investigate=True)
|
|
554
|
+
|
|
555
|
+
overview = result.get("overview", {})
|
|
556
|
+
insights = result.get("insights", [])
|
|
557
|
+
|
|
558
|
+
# Determine severity
|
|
559
|
+
error_rate = overview.get("error_rate", 0)
|
|
560
|
+
if error_rate > 0.2:
|
|
561
|
+
severity = "critical"
|
|
562
|
+
confidence = 0.95
|
|
563
|
+
elif error_rate > 0.1:
|
|
564
|
+
severity = "high"
|
|
565
|
+
confidence = 0.9
|
|
566
|
+
elif error_rate > 0.05:
|
|
567
|
+
severity = "medium"
|
|
568
|
+
confidence = 0.85
|
|
569
|
+
elif error_rate > 0.01:
|
|
570
|
+
severity = "low"
|
|
571
|
+
confidence = 0.8
|
|
572
|
+
else:
|
|
573
|
+
severity = "healthy"
|
|
574
|
+
confidence = 0.9
|
|
575
|
+
|
|
576
|
+
# Build top issues
|
|
577
|
+
top_issues = []
|
|
578
|
+
for insight in insights[:5]:
|
|
579
|
+
issue = {
|
|
580
|
+
"type": insight.get("type"),
|
|
581
|
+
"severity": insight.get("severity"),
|
|
582
|
+
"description": insight.get("description"),
|
|
583
|
+
}
|
|
584
|
+
if insight.get("count"):
|
|
585
|
+
issue["count"] = insight["count"]
|
|
586
|
+
top_issues.append(issue)
|
|
587
|
+
|
|
588
|
+
# Build suggested actions
|
|
589
|
+
suggested_actions = []
|
|
590
|
+
for insight in insights[:3]:
|
|
591
|
+
if insight.get("suggestion"):
|
|
592
|
+
suggested_actions.append({"action": "investigate", "reason": insight["suggestion"]})
|
|
593
|
+
|
|
594
|
+
output = {
|
|
595
|
+
"assessment": {
|
|
596
|
+
"severity": severity,
|
|
597
|
+
"confidence": confidence,
|
|
598
|
+
"summary": f"Error rate: {error_rate:.1%}, {len(insights)} issues detected",
|
|
599
|
+
},
|
|
600
|
+
"metrics": {
|
|
601
|
+
"error_rate": round(error_rate, 4),
|
|
602
|
+
"error_count": overview.get("error_count", 0),
|
|
603
|
+
"total_entries": overview.get("total_logs", 0),
|
|
604
|
+
"log_levels": overview.get("log_levels", {}),
|
|
605
|
+
},
|
|
606
|
+
"top_issues": top_issues,
|
|
607
|
+
"suggested_actions": suggested_actions,
|
|
608
|
+
"next_steps": result.get("next_steps", []),
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
_output_json(output, pretty)
|
|
612
|
+
sys.exit(EXIT_SUCCESS)
|
|
613
|
+
|
|
614
|
+
except Exception as e:
|
|
615
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
@llm.command()
|
|
619
|
+
@click.argument("identifier")
|
|
620
|
+
@click.option("--files", "-f", multiple=True, help="Files to search (supports globs)")
|
|
621
|
+
@click.option(
|
|
622
|
+
"--type",
|
|
623
|
+
"id_type",
|
|
624
|
+
type=click.Choice(["auto", "correlation_id", "trace_id", "thread_id"]),
|
|
625
|
+
default="auto",
|
|
626
|
+
help="Identifier type",
|
|
627
|
+
)
|
|
628
|
+
@click.option("--window", default="1h", help="Time window to search (e.g., 30m, 2h)")
|
|
629
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
630
|
+
def correlate(identifier: str, files: tuple, id_type: str, window: str, pretty: bool):
|
|
631
|
+
"""
|
|
632
|
+
Trace a request/correlation ID across files and services.
|
|
633
|
+
|
|
634
|
+
Builds a complete timeline of all log entries matching
|
|
635
|
+
the identifier across multiple files.
|
|
636
|
+
|
|
637
|
+
Example:
|
|
638
|
+
logler llm correlate req-abc123 --files "*.log"
|
|
639
|
+
"""
|
|
640
|
+
from . import investigate
|
|
641
|
+
|
|
642
|
+
try:
|
|
643
|
+
file_list = _expand_globs(list(files)) if files else _expand_globs(["*.log"])
|
|
644
|
+
if not file_list:
|
|
645
|
+
_error_json(f"No files found matching: {files or ['*.log']}")
|
|
646
|
+
|
|
647
|
+
# Determine ID type
|
|
648
|
+
correlation_id = None
|
|
649
|
+
trace_id = None
|
|
650
|
+
thread_id = None
|
|
651
|
+
|
|
652
|
+
if id_type == "auto":
|
|
653
|
+
if identifier.startswith("trace-") or len(identifier) == 32:
|
|
654
|
+
trace_id = identifier
|
|
655
|
+
detected_type = "trace_id"
|
|
656
|
+
elif identifier.startswith("req-") or identifier.startswith("corr-"):
|
|
657
|
+
correlation_id = identifier
|
|
658
|
+
detected_type = "correlation_id"
|
|
659
|
+
else:
|
|
660
|
+
correlation_id = identifier
|
|
661
|
+
detected_type = "correlation_id"
|
|
662
|
+
elif id_type == "correlation_id":
|
|
663
|
+
correlation_id = identifier
|
|
664
|
+
detected_type = "correlation_id"
|
|
665
|
+
elif id_type == "trace_id":
|
|
666
|
+
trace_id = identifier
|
|
667
|
+
detected_type = "trace_id"
|
|
668
|
+
elif id_type == "thread_id":
|
|
669
|
+
thread_id = identifier
|
|
670
|
+
detected_type = "thread_id"
|
|
671
|
+
|
|
672
|
+
result = investigate.follow_thread(
|
|
673
|
+
files=file_list, thread_id=thread_id, correlation_id=correlation_id, trace_id=trace_id
|
|
674
|
+
)
|
|
675
|
+
|
|
676
|
+
entries = result.get("entries", [])
|
|
677
|
+
|
|
678
|
+
# Build timeline
|
|
679
|
+
timeline = []
|
|
680
|
+
services = set()
|
|
681
|
+
start_time = None
|
|
682
|
+
|
|
683
|
+
for i, entry in enumerate(entries):
|
|
684
|
+
ts = entry.get("timestamp")
|
|
685
|
+
if ts and not start_time:
|
|
686
|
+
start_time = ts
|
|
687
|
+
|
|
688
|
+
service = entry.get("service") or entry.get("service_name")
|
|
689
|
+
if service:
|
|
690
|
+
services.add(service)
|
|
691
|
+
|
|
692
|
+
timeline_entry = {
|
|
693
|
+
"sequence": i + 1,
|
|
694
|
+
"timestamp": ts,
|
|
695
|
+
"file": entry.get("file"),
|
|
696
|
+
"line_number": entry.get("line_number"),
|
|
697
|
+
"level": entry.get("level"),
|
|
698
|
+
"message": entry.get("message"),
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
if entry.get("thread_id"):
|
|
702
|
+
timeline_entry["thread_id"] = entry["thread_id"]
|
|
703
|
+
if service:
|
|
704
|
+
timeline_entry["service"] = service
|
|
705
|
+
|
|
706
|
+
timeline.append(timeline_entry)
|
|
707
|
+
|
|
708
|
+
# Find error point
|
|
709
|
+
error_point = None
|
|
710
|
+
for entry in timeline:
|
|
711
|
+
if entry.get("level") in ["ERROR", "FATAL", "CRITICAL"]:
|
|
712
|
+
error_point = entry
|
|
713
|
+
break
|
|
714
|
+
|
|
715
|
+
output = {
|
|
716
|
+
"identifier": identifier,
|
|
717
|
+
"identifier_type": detected_type,
|
|
718
|
+
"trace": {
|
|
719
|
+
"total_entries": len(timeline),
|
|
720
|
+
"services": list(services),
|
|
721
|
+
"duration_ms": result.get("duration_ms"),
|
|
722
|
+
"outcome": "error" if error_point else "success",
|
|
723
|
+
},
|
|
724
|
+
"timeline": timeline,
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
if error_point:
|
|
728
|
+
output["error_point"] = error_point
|
|
729
|
+
|
|
730
|
+
_output_json(output, pretty)
|
|
731
|
+
|
|
732
|
+
if len(timeline) == 0:
|
|
733
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
734
|
+
else:
|
|
735
|
+
sys.exit(EXIT_SUCCESS)
|
|
736
|
+
|
|
737
|
+
except Exception as e:
|
|
738
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
@llm.command()
|
|
742
|
+
@click.argument("identifier")
|
|
743
|
+
@click.option("--files", "-f", multiple=True, help="Files to search (supports globs)")
|
|
744
|
+
@click.option("--max-depth", type=int, help="Maximum hierarchy depth")
|
|
745
|
+
@click.option("--min-confidence", type=float, default=0.0, help="Minimum confidence (0.0-1.0)")
|
|
746
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
747
|
+
def hierarchy(
|
|
748
|
+
identifier: str, files: tuple, max_depth: Optional[int], min_confidence: float, pretty: bool
|
|
749
|
+
):
|
|
750
|
+
"""
|
|
751
|
+
Build full parent-child hierarchy tree as structured data.
|
|
752
|
+
|
|
753
|
+
Detects thread/span relationships using:
|
|
754
|
+
- Explicit parent_span_id (OpenTelemetry)
|
|
755
|
+
- Naming patterns (worker-1.task-a)
|
|
756
|
+
- Temporal inference
|
|
757
|
+
|
|
758
|
+
Example:
|
|
759
|
+
logler llm hierarchy trace-xyz789 --files "*.log"
|
|
760
|
+
"""
|
|
761
|
+
from . import investigate
|
|
762
|
+
|
|
763
|
+
try:
|
|
764
|
+
file_list = _expand_globs(list(files)) if files else _expand_globs(["*.log"])
|
|
765
|
+
if not file_list:
|
|
766
|
+
_error_json(f"No files found matching: {files or ['*.log']}")
|
|
767
|
+
|
|
768
|
+
result = investigate.follow_thread_hierarchy(
|
|
769
|
+
files=file_list,
|
|
770
|
+
root_identifier=identifier,
|
|
771
|
+
max_depth=max_depth,
|
|
772
|
+
min_confidence=min_confidence,
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Output directly - hierarchy result is already structured
|
|
776
|
+
_output_json(result, pretty)
|
|
777
|
+
|
|
778
|
+
if not result.get("roots"):
|
|
779
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
780
|
+
else:
|
|
781
|
+
sys.exit(EXIT_SUCCESS)
|
|
782
|
+
|
|
783
|
+
except Exception as e:
|
|
784
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
785
|
+
|
|
786
|
+
|
|
787
|
+
@llm.command("verify-pattern")
|
|
788
|
+
@click.argument("files", nargs=-1, required=True)
|
|
789
|
+
@click.option("--pattern", required=True, help="Regex pattern to verify")
|
|
790
|
+
@click.option("--extract-groups", is_flag=True, help="Extract and analyze capture groups")
|
|
791
|
+
@click.option("--hypothesis", help="Natural language hypothesis (for documentation)")
|
|
792
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
793
|
+
def verify_pattern(
|
|
794
|
+
files: tuple, pattern: str, extract_groups: bool, hypothesis: Optional[str], pretty: bool
|
|
795
|
+
):
|
|
796
|
+
"""
|
|
797
|
+
Test a hypothesis about log patterns programmatically.
|
|
798
|
+
|
|
799
|
+
Verifies if a pattern exists in logs and optionally
|
|
800
|
+
extracts/analyzes capture groups.
|
|
801
|
+
|
|
802
|
+
Example:
|
|
803
|
+
logler llm verify-pattern app.log --pattern "timeout after (\\d+)ms" --extract-groups
|
|
804
|
+
"""
|
|
805
|
+
from .parser import LogParser
|
|
806
|
+
|
|
807
|
+
try:
|
|
808
|
+
file_list = _expand_globs(list(files))
|
|
809
|
+
if not file_list:
|
|
810
|
+
_error_json(f"No files found matching: {files}")
|
|
811
|
+
|
|
812
|
+
try:
|
|
813
|
+
regex = safe_compile(pattern)
|
|
814
|
+
except (re.error, RegexTimeoutError, RegexPatternTooLongError) as e:
|
|
815
|
+
_error_json(f"Invalid regex pattern: {e}")
|
|
816
|
+
|
|
817
|
+
parser = LogParser()
|
|
818
|
+
|
|
819
|
+
matches = []
|
|
820
|
+
total_entries = 0
|
|
821
|
+
group_values = defaultdict(lambda: defaultdict(int))
|
|
822
|
+
by_thread = defaultdict(int)
|
|
823
|
+
first_match = None
|
|
824
|
+
last_match = None
|
|
825
|
+
|
|
826
|
+
for file_path in file_list:
|
|
827
|
+
try:
|
|
828
|
+
with open(file_path, "r", errors="replace") as f:
|
|
829
|
+
for i, line in enumerate(f):
|
|
830
|
+
line = line.rstrip()
|
|
831
|
+
if not line:
|
|
832
|
+
continue
|
|
833
|
+
|
|
834
|
+
total_entries += 1
|
|
835
|
+
entry = parser.parse_line(i + 1, line)
|
|
836
|
+
|
|
837
|
+
# Try matching against message and raw
|
|
838
|
+
match = regex.search(entry.message or "") or regex.search(line)
|
|
839
|
+
|
|
840
|
+
if match:
|
|
841
|
+
match_info = {
|
|
842
|
+
"file": file_path,
|
|
843
|
+
"line_number": i + 1,
|
|
844
|
+
"raw": line[:200],
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
if extract_groups and match.groups():
|
|
848
|
+
match_info["groups"] = list(match.groups())
|
|
849
|
+
for j, grp in enumerate(match.groups()):
|
|
850
|
+
if grp:
|
|
851
|
+
group_values[f"group_{j+1}"][grp] += 1
|
|
852
|
+
|
|
853
|
+
matches.append(match_info)
|
|
854
|
+
|
|
855
|
+
if not first_match:
|
|
856
|
+
first_match = entry.timestamp
|
|
857
|
+
last_match = entry.timestamp
|
|
858
|
+
|
|
859
|
+
# Track by thread
|
|
860
|
+
if entry.thread_id:
|
|
861
|
+
by_thread[entry.thread_id] += 1
|
|
862
|
+
|
|
863
|
+
except FileNotFoundError:
|
|
864
|
+
_error_json(f"File not found: {file_path}")
|
|
865
|
+
except PermissionError:
|
|
866
|
+
_error_json(f"Permission denied: {file_path}")
|
|
867
|
+
|
|
868
|
+
# Build output
|
|
869
|
+
output = {
|
|
870
|
+
"pattern": pattern,
|
|
871
|
+
"hypothesis": hypothesis,
|
|
872
|
+
"verified": len(matches) > 0,
|
|
873
|
+
"statistics": {
|
|
874
|
+
"total_matches": len(matches),
|
|
875
|
+
"total_entries": total_entries,
|
|
876
|
+
"match_rate": round(len(matches) / total_entries, 6) if total_entries > 0 else 0,
|
|
877
|
+
"first_match": str(first_match) if first_match else None,
|
|
878
|
+
"last_match": str(last_match) if last_match else None,
|
|
879
|
+
},
|
|
880
|
+
"sample_matches": matches[:20], # First 20 matches as samples
|
|
881
|
+
}
|
|
882
|
+
|
|
883
|
+
if extract_groups and group_values:
|
|
884
|
+
extracted = {}
|
|
885
|
+
for group_name, values in group_values.items():
|
|
886
|
+
# Get numeric stats if all values are numeric
|
|
887
|
+
numeric_vals = []
|
|
888
|
+
for v in values.keys():
|
|
889
|
+
try:
|
|
890
|
+
numeric_vals.append(float(v))
|
|
891
|
+
except (ValueError, TypeError):
|
|
892
|
+
pass
|
|
893
|
+
|
|
894
|
+
group_data = {"values": dict(values), "unique_count": len(values)}
|
|
895
|
+
|
|
896
|
+
if numeric_vals:
|
|
897
|
+
group_data["min"] = min(numeric_vals)
|
|
898
|
+
group_data["max"] = max(numeric_vals)
|
|
899
|
+
group_data["mean"] = round(sum(numeric_vals) / len(numeric_vals), 2)
|
|
900
|
+
|
|
901
|
+
extracted[group_name] = group_data
|
|
902
|
+
|
|
903
|
+
output["extracted_groups"] = extracted
|
|
904
|
+
|
|
905
|
+
if by_thread:
|
|
906
|
+
output["distribution"] = {"by_thread": dict(by_thread)}
|
|
907
|
+
|
|
908
|
+
_output_json(output, pretty)
|
|
909
|
+
|
|
910
|
+
if len(matches) == 0:
|
|
911
|
+
sys.exit(EXIT_NO_RESULTS)
|
|
912
|
+
else:
|
|
913
|
+
sys.exit(EXIT_SUCCESS)
|
|
914
|
+
|
|
915
|
+
except Exception as e:
|
|
916
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
@llm.command()
|
|
920
|
+
@click.argument("files", nargs=-1, required=True)
|
|
921
|
+
@click.option("--level", help="Filter by level")
|
|
922
|
+
@click.option("--query", help="Filter by pattern")
|
|
923
|
+
@click.option("--fields", help="Comma-separated fields to include")
|
|
924
|
+
@click.option("--compact", is_flag=True, help="Minimal JSON (short keys)")
|
|
925
|
+
def emit(
|
|
926
|
+
files: tuple, level: Optional[str], query: Optional[str], fields: Optional[str], compact: bool
|
|
927
|
+
):
|
|
928
|
+
"""
|
|
929
|
+
Stream parsed entries as JSONL for processing.
|
|
930
|
+
|
|
931
|
+
Outputs one JSON object per line, suitable for piping
|
|
932
|
+
to other tools or processing large files.
|
|
933
|
+
|
|
934
|
+
Example:
|
|
935
|
+
logler llm emit app.log --level ERROR | head -100
|
|
936
|
+
"""
|
|
937
|
+
from .parser import LogParser
|
|
938
|
+
|
|
939
|
+
try:
|
|
940
|
+
file_list = _expand_globs(list(files))
|
|
941
|
+
if not file_list:
|
|
942
|
+
_error_json(f"No files found matching: {files}")
|
|
943
|
+
|
|
944
|
+
parser = LogParser()
|
|
945
|
+
|
|
946
|
+
# Parse field list
|
|
947
|
+
include_fields = None
|
|
948
|
+
if fields:
|
|
949
|
+
include_fields = set(f.strip() for f in fields.split(","))
|
|
950
|
+
|
|
951
|
+
# Compile query regex if provided
|
|
952
|
+
query_regex = None
|
|
953
|
+
if query:
|
|
954
|
+
try:
|
|
955
|
+
query_regex = safe_compile(query, re.IGNORECASE)
|
|
956
|
+
except (re.error, RegexTimeoutError, RegexPatternTooLongError) as e:
|
|
957
|
+
_error_json(f"Invalid regex pattern: {e}")
|
|
958
|
+
|
|
959
|
+
for file_path in file_list:
|
|
960
|
+
try:
|
|
961
|
+
with open(file_path, "r", errors="replace") as f:
|
|
962
|
+
for i, line in enumerate(f):
|
|
963
|
+
line = line.rstrip()
|
|
964
|
+
if not line:
|
|
965
|
+
continue
|
|
966
|
+
|
|
967
|
+
entry = parser.parse_line(i + 1, line)
|
|
968
|
+
|
|
969
|
+
# Apply level filter
|
|
970
|
+
if level and str(entry.level).upper() != level.upper():
|
|
971
|
+
continue
|
|
972
|
+
|
|
973
|
+
# Apply query filter
|
|
974
|
+
if query_regex:
|
|
975
|
+
if not query_regex.search(entry.message or ""):
|
|
976
|
+
if not query_regex.search(line):
|
|
977
|
+
continue
|
|
978
|
+
|
|
979
|
+
# Build output
|
|
980
|
+
if compact:
|
|
981
|
+
out = {
|
|
982
|
+
"ln": i + 1,
|
|
983
|
+
"ts": str(entry.timestamp) if entry.timestamp else None,
|
|
984
|
+
"lv": str(entry.level) if entry.level else None,
|
|
985
|
+
"msg": entry.message,
|
|
986
|
+
}
|
|
987
|
+
if entry.thread_id:
|
|
988
|
+
out["th"] = entry.thread_id
|
|
989
|
+
else:
|
|
990
|
+
out = {
|
|
991
|
+
"file": file_path,
|
|
992
|
+
"line_number": i + 1,
|
|
993
|
+
"timestamp": str(entry.timestamp) if entry.timestamp else None,
|
|
994
|
+
"level": str(entry.level) if entry.level else None,
|
|
995
|
+
"message": entry.message,
|
|
996
|
+
}
|
|
997
|
+
if entry.thread_id:
|
|
998
|
+
out["thread_id"] = entry.thread_id
|
|
999
|
+
if entry.correlation_id:
|
|
1000
|
+
out["correlation_id"] = entry.correlation_id
|
|
1001
|
+
|
|
1002
|
+
# Filter fields if specified
|
|
1003
|
+
if include_fields:
|
|
1004
|
+
out = {k: v for k, v in out.items() if k in include_fields}
|
|
1005
|
+
|
|
1006
|
+
click.echo(json.dumps(out, default=str))
|
|
1007
|
+
|
|
1008
|
+
except FileNotFoundError:
|
|
1009
|
+
pass # Skip missing files in emit mode
|
|
1010
|
+
except PermissionError:
|
|
1011
|
+
pass
|
|
1012
|
+
|
|
1013
|
+
sys.exit(EXIT_SUCCESS)
|
|
1014
|
+
|
|
1015
|
+
except Exception as e:
|
|
1016
|
+
# In emit mode, errors go to stderr
|
|
1017
|
+
import sys as _sys
|
|
1018
|
+
|
|
1019
|
+
_sys.stderr.write(json.dumps({"error": str(e)}) + "\n")
|
|
1020
|
+
sys.exit(EXIT_INTERNAL_ERROR)
|
|
1021
|
+
|
|
1022
|
+
|
|
1023
|
+
@llm.command()
|
|
1024
|
+
@click.argument("files", nargs=-1, required=True)
|
|
1025
|
+
@click.option("--before-start", help="Before period start (ISO8601)")
|
|
1026
|
+
@click.option("--before-end", help="Before period end (ISO8601)")
|
|
1027
|
+
@click.option("--after-start", help="After period start (ISO8601)")
|
|
1028
|
+
@click.option("--after-end", help="After period end (ISO8601)")
|
|
1029
|
+
@click.option("--baseline", help="Use last N as baseline (e.g., 1h)")
|
|
1030
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1031
|
+
def diff(
|
|
1032
|
+
files: tuple,
|
|
1033
|
+
before_start: Optional[str],
|
|
1034
|
+
before_end: Optional[str],
|
|
1035
|
+
after_start: Optional[str],
|
|
1036
|
+
after_end: Optional[str],
|
|
1037
|
+
baseline: Optional[str],
|
|
1038
|
+
pretty: bool,
|
|
1039
|
+
):
|
|
1040
|
+
"""
|
|
1041
|
+
Compare log characteristics between time periods.
|
|
1042
|
+
|
|
1043
|
+
Useful for understanding what changed before/after an incident.
|
|
1044
|
+
|
|
1045
|
+
Example:
|
|
1046
|
+
logler llm diff app.log --baseline 1h
|
|
1047
|
+
"""
|
|
1048
|
+
from .parser import LogParser
|
|
1049
|
+
from datetime import timezone
|
|
1050
|
+
|
|
1051
|
+
try:
|
|
1052
|
+
file_list = _expand_globs(list(files))
|
|
1053
|
+
if not file_list:
|
|
1054
|
+
_error_json(f"No files found matching: {files}")
|
|
1055
|
+
|
|
1056
|
+
parser = LogParser()
|
|
1057
|
+
|
|
1058
|
+
# Parse time periods
|
|
1059
|
+
now = datetime.now(timezone.utc)
|
|
1060
|
+
|
|
1061
|
+
if baseline:
|
|
1062
|
+
try:
|
|
1063
|
+
duration = _parse_duration(baseline)
|
|
1064
|
+
after_end_ts = now
|
|
1065
|
+
after_start_ts = now - duration
|
|
1066
|
+
before_end_ts = after_start_ts
|
|
1067
|
+
before_start_ts = before_end_ts - duration
|
|
1068
|
+
except ValueError as e:
|
|
1069
|
+
_error_json(str(e))
|
|
1070
|
+
else:
|
|
1071
|
+
|
|
1072
|
+
def parse_ts(s):
|
|
1073
|
+
if not s:
|
|
1074
|
+
return None
|
|
1075
|
+
try:
|
|
1076
|
+
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
|
1077
|
+
except ValueError:
|
|
1078
|
+
_error_json(f"Invalid timestamp: {s}")
|
|
1079
|
+
|
|
1080
|
+
before_start_ts = parse_ts(before_start)
|
|
1081
|
+
before_end_ts = parse_ts(before_end)
|
|
1082
|
+
after_start_ts = parse_ts(after_start)
|
|
1083
|
+
after_end_ts = parse_ts(after_end)
|
|
1084
|
+
|
|
1085
|
+
# Collect entries for each period
|
|
1086
|
+
before_entries = []
|
|
1087
|
+
after_entries = []
|
|
1088
|
+
|
|
1089
|
+
for file_path in file_list:
|
|
1090
|
+
try:
|
|
1091
|
+
with open(file_path, "r", errors="replace") as f:
|
|
1092
|
+
for i, line in enumerate(f):
|
|
1093
|
+
line = line.rstrip()
|
|
1094
|
+
if not line:
|
|
1095
|
+
continue
|
|
1096
|
+
|
|
1097
|
+
entry = parser.parse_line(i + 1, line)
|
|
1098
|
+
|
|
1099
|
+
if not entry.timestamp:
|
|
1100
|
+
continue
|
|
1101
|
+
|
|
1102
|
+
try:
|
|
1103
|
+
ts = entry.timestamp
|
|
1104
|
+
if isinstance(ts, str):
|
|
1105
|
+
ts = datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
|
1106
|
+
|
|
1107
|
+
# Make timezone-aware if needed
|
|
1108
|
+
if ts.tzinfo is None:
|
|
1109
|
+
ts = ts.replace(tzinfo=timezone.utc)
|
|
1110
|
+
|
|
1111
|
+
if before_start_ts and before_end_ts:
|
|
1112
|
+
if before_start_ts <= ts <= before_end_ts:
|
|
1113
|
+
before_entries.append(entry)
|
|
1114
|
+
|
|
1115
|
+
if after_start_ts and after_end_ts:
|
|
1116
|
+
if after_start_ts <= ts <= after_end_ts:
|
|
1117
|
+
after_entries.append(entry)
|
|
1118
|
+
except (ValueError, TypeError):
|
|
1119
|
+
pass
|
|
1120
|
+
|
|
1121
|
+
except (FileNotFoundError, PermissionError):
|
|
1122
|
+
pass
|
|
1123
|
+
|
|
1124
|
+
# Calculate metrics
|
|
1125
|
+
def calc_metrics(entries):
|
|
1126
|
+
if not entries:
|
|
1127
|
+
return {"total": 0, "error_rate": 0, "by_level": {}}
|
|
1128
|
+
|
|
1129
|
+
by_level = defaultdict(int)
|
|
1130
|
+
errors = 0
|
|
1131
|
+
for e in entries:
|
|
1132
|
+
lvl = str(e.level) if e.level else "UNKNOWN"
|
|
1133
|
+
by_level[lvl] += 1
|
|
1134
|
+
if lvl in ["ERROR", "FATAL", "CRITICAL"]:
|
|
1135
|
+
errors += 1
|
|
1136
|
+
|
|
1137
|
+
return {
|
|
1138
|
+
"total": len(entries),
|
|
1139
|
+
"error_count": errors,
|
|
1140
|
+
"error_rate": round(errors / len(entries), 4) if entries else 0,
|
|
1141
|
+
"by_level": dict(by_level),
|
|
1142
|
+
}
|
|
1143
|
+
|
|
1144
|
+
before_metrics = calc_metrics(before_entries)
|
|
1145
|
+
after_metrics = calc_metrics(after_entries)
|
|
1146
|
+
|
|
1147
|
+
# Calculate changes
|
|
1148
|
+
volume_change = 0
|
|
1149
|
+
if before_metrics["total"] > 0:
|
|
1150
|
+
volume_change = round(
|
|
1151
|
+
(after_metrics["total"] - before_metrics["total"]) / before_metrics["total"] * 100,
|
|
1152
|
+
1,
|
|
1153
|
+
)
|
|
1154
|
+
|
|
1155
|
+
error_rate_change = None
|
|
1156
|
+
if before_metrics["error_rate"] > 0:
|
|
1157
|
+
change_pct = (
|
|
1158
|
+
(after_metrics["error_rate"] - before_metrics["error_rate"])
|
|
1159
|
+
/ before_metrics["error_rate"]
|
|
1160
|
+
* 100
|
|
1161
|
+
)
|
|
1162
|
+
error_rate_change = f"{change_pct:+.0f}%"
|
|
1163
|
+
|
|
1164
|
+
output = {
|
|
1165
|
+
"comparison": {
|
|
1166
|
+
"before": {
|
|
1167
|
+
"start": str(before_start_ts) if before_start_ts else None,
|
|
1168
|
+
"end": str(before_end_ts) if before_end_ts else None,
|
|
1169
|
+
**before_metrics,
|
|
1170
|
+
},
|
|
1171
|
+
"after": {
|
|
1172
|
+
"start": str(after_start_ts) if after_start_ts else None,
|
|
1173
|
+
"end": str(after_end_ts) if after_end_ts else None,
|
|
1174
|
+
**after_metrics,
|
|
1175
|
+
},
|
|
1176
|
+
},
|
|
1177
|
+
"changes": {
|
|
1178
|
+
"volume_change_percent": volume_change,
|
|
1179
|
+
"error_rate_before": before_metrics["error_rate"],
|
|
1180
|
+
"error_rate_after": after_metrics["error_rate"],
|
|
1181
|
+
"error_rate_change": error_rate_change,
|
|
1182
|
+
},
|
|
1183
|
+
}
|
|
1184
|
+
|
|
1185
|
+
_output_json(output, pretty)
|
|
1186
|
+
sys.exit(EXIT_SUCCESS)
|
|
1187
|
+
|
|
1188
|
+
except Exception as e:
|
|
1189
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
# Session management subgroup
|
|
1193
|
+
@llm.group()
|
|
1194
|
+
def session():
|
|
1195
|
+
"""
|
|
1196
|
+
Stateful investigation sessions for complex analyses.
|
|
1197
|
+
|
|
1198
|
+
Sessions track investigation steps and can be saved/resumed.
|
|
1199
|
+
"""
|
|
1200
|
+
pass
|
|
1201
|
+
|
|
1202
|
+
|
|
1203
|
+
@session.command("create")
|
|
1204
|
+
@click.option("--files", "-f", multiple=True, required=True, help="Files to include")
|
|
1205
|
+
@click.option("--name", help="Session name")
|
|
1206
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1207
|
+
def session_create(files: tuple, name: Optional[str], pretty: bool):
|
|
1208
|
+
"""Create a new investigation session."""
|
|
1209
|
+
import uuid
|
|
1210
|
+
from pathlib import Path
|
|
1211
|
+
|
|
1212
|
+
try:
|
|
1213
|
+
file_list = _expand_globs(list(files))
|
|
1214
|
+
if not file_list:
|
|
1215
|
+
_error_json(f"No files found matching: {files}")
|
|
1216
|
+
|
|
1217
|
+
session_id = f"sess_{uuid.uuid4().hex[:12]}"
|
|
1218
|
+
session_name = name or f"investigation-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
|
1219
|
+
|
|
1220
|
+
session_data = {
|
|
1221
|
+
"session_id": session_id,
|
|
1222
|
+
"name": session_name,
|
|
1223
|
+
"created_at": datetime.now().isoformat(),
|
|
1224
|
+
"files": file_list,
|
|
1225
|
+
"status": "active",
|
|
1226
|
+
"log": [],
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
# Save session
|
|
1230
|
+
sessions_dir = Path.home() / ".logler" / "sessions"
|
|
1231
|
+
sessions_dir.mkdir(parents=True, exist_ok=True)
|
|
1232
|
+
|
|
1233
|
+
session_file = sessions_dir / f"{session_id}.json"
|
|
1234
|
+
with open(session_file, "w") as f:
|
|
1235
|
+
json.dump(session_data, f, indent=2, default=str)
|
|
1236
|
+
|
|
1237
|
+
output = {
|
|
1238
|
+
"session_id": session_id,
|
|
1239
|
+
"name": session_name,
|
|
1240
|
+
"created_at": session_data["created_at"],
|
|
1241
|
+
"files": file_list,
|
|
1242
|
+
"status": "active",
|
|
1243
|
+
"session_file": str(session_file),
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
_output_json(output, pretty)
|
|
1247
|
+
sys.exit(EXIT_SUCCESS)
|
|
1248
|
+
|
|
1249
|
+
except Exception as e:
|
|
1250
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
1251
|
+
|
|
1252
|
+
|
|
1253
|
+
@session.command("list")
|
|
1254
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1255
|
+
def session_list(pretty: bool):
|
|
1256
|
+
"""List all investigation sessions."""
|
|
1257
|
+
from pathlib import Path
|
|
1258
|
+
|
|
1259
|
+
try:
|
|
1260
|
+
sessions_dir = Path.home() / ".logler" / "sessions"
|
|
1261
|
+
|
|
1262
|
+
if not sessions_dir.exists():
|
|
1263
|
+
_output_json({"sessions": []}, pretty)
|
|
1264
|
+
sys.exit(EXIT_SUCCESS)
|
|
1265
|
+
|
|
1266
|
+
sessions = []
|
|
1267
|
+
for session_file in sessions_dir.glob("sess_*.json"):
|
|
1268
|
+
try:
|
|
1269
|
+
with open(session_file) as f:
|
|
1270
|
+
data = json.load(f)
|
|
1271
|
+
sessions.append(
|
|
1272
|
+
{
|
|
1273
|
+
"session_id": data.get("session_id"),
|
|
1274
|
+
"name": data.get("name"),
|
|
1275
|
+
"created_at": data.get("created_at"),
|
|
1276
|
+
"status": data.get("status"),
|
|
1277
|
+
"files_count": len(data.get("files", [])),
|
|
1278
|
+
}
|
|
1279
|
+
)
|
|
1280
|
+
except (json.JSONDecodeError, KeyError):
|
|
1281
|
+
pass
|
|
1282
|
+
|
|
1283
|
+
# Sort by created_at descending
|
|
1284
|
+
sessions.sort(key=lambda x: x.get("created_at", ""), reverse=True)
|
|
1285
|
+
|
|
1286
|
+
_output_json({"sessions": sessions}, pretty)
|
|
1287
|
+
sys.exit(EXIT_SUCCESS)
|
|
1288
|
+
|
|
1289
|
+
except Exception as e:
|
|
1290
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
1291
|
+
|
|
1292
|
+
|
|
1293
|
+
@session.command("query")
|
|
1294
|
+
@click.argument("session_id")
|
|
1295
|
+
@click.option("--level", help="Filter by level")
|
|
1296
|
+
@click.option("--query", help="Search pattern")
|
|
1297
|
+
@click.option("--limit", type=int, help="Limit results")
|
|
1298
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1299
|
+
def session_query(
|
|
1300
|
+
session_id: str, level: Optional[str], query: Optional[str], limit: Optional[int], pretty: bool
|
|
1301
|
+
):
|
|
1302
|
+
"""Query logs within a session context."""
|
|
1303
|
+
from pathlib import Path
|
|
1304
|
+
from . import investigate
|
|
1305
|
+
|
|
1306
|
+
try:
|
|
1307
|
+
sessions_dir = Path.home() / ".logler" / "sessions"
|
|
1308
|
+
session_file = sessions_dir / f"{session_id}.json"
|
|
1309
|
+
|
|
1310
|
+
if not session_file.exists():
|
|
1311
|
+
_error_json(f"Session not found: {session_id}")
|
|
1312
|
+
|
|
1313
|
+
with open(session_file) as f:
|
|
1314
|
+
session_data = json.load(f)
|
|
1315
|
+
|
|
1316
|
+
files = session_data.get("files", [])
|
|
1317
|
+
|
|
1318
|
+
result = investigate.search(
|
|
1319
|
+
files=files, query=query, level=level, limit=limit, output_format="full"
|
|
1320
|
+
)
|
|
1321
|
+
|
|
1322
|
+
# Log the query
|
|
1323
|
+
session_data["log"].append(
|
|
1324
|
+
{
|
|
1325
|
+
"timestamp": datetime.now().isoformat(),
|
|
1326
|
+
"action": "query",
|
|
1327
|
+
"params": {"level": level, "query": query, "limit": limit},
|
|
1328
|
+
"results_count": len(result.get("results", [])),
|
|
1329
|
+
}
|
|
1330
|
+
)
|
|
1331
|
+
|
|
1332
|
+
with open(session_file, "w") as f:
|
|
1333
|
+
json.dump(session_data, f, indent=2, default=str)
|
|
1334
|
+
|
|
1335
|
+
_output_json(result, pretty)
|
|
1336
|
+
sys.exit(EXIT_SUCCESS if result.get("results") else EXIT_NO_RESULTS)
|
|
1337
|
+
|
|
1338
|
+
except Exception as e:
|
|
1339
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
1340
|
+
|
|
1341
|
+
|
|
1342
|
+
@session.command("note")
|
|
1343
|
+
@click.argument("session_id")
|
|
1344
|
+
@click.option("--text", required=True, help="Note text")
|
|
1345
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1346
|
+
def session_note(session_id: str, text: str, pretty: bool):
|
|
1347
|
+
"""Add a note to a session."""
|
|
1348
|
+
from pathlib import Path
|
|
1349
|
+
|
|
1350
|
+
try:
|
|
1351
|
+
sessions_dir = Path.home() / ".logler" / "sessions"
|
|
1352
|
+
session_file = sessions_dir / f"{session_id}.json"
|
|
1353
|
+
|
|
1354
|
+
if not session_file.exists():
|
|
1355
|
+
_error_json(f"Session not found: {session_id}")
|
|
1356
|
+
|
|
1357
|
+
with open(session_file) as f:
|
|
1358
|
+
session_data = json.load(f)
|
|
1359
|
+
|
|
1360
|
+
note_entry = {"timestamp": datetime.now().isoformat(), "action": "note", "text": text}
|
|
1361
|
+
|
|
1362
|
+
session_data["log"].append(note_entry)
|
|
1363
|
+
|
|
1364
|
+
with open(session_file, "w") as f:
|
|
1365
|
+
json.dump(session_data, f, indent=2, default=str)
|
|
1366
|
+
|
|
1367
|
+
_output_json({"status": "ok", "note": note_entry}, pretty)
|
|
1368
|
+
sys.exit(EXIT_SUCCESS)
|
|
1369
|
+
|
|
1370
|
+
except Exception as e:
|
|
1371
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|
|
1372
|
+
|
|
1373
|
+
|
|
1374
|
+
@session.command("conclude")
|
|
1375
|
+
@click.argument("session_id")
|
|
1376
|
+
@click.option("--summary", required=True, help="Investigation summary")
|
|
1377
|
+
@click.option("--root-cause", help="Root cause description")
|
|
1378
|
+
@click.option("--confidence", type=float, default=0.8, help="Confidence level (0.0-1.0)")
|
|
1379
|
+
@click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
|
|
1380
|
+
def session_conclude(
|
|
1381
|
+
session_id: str, summary: str, root_cause: Optional[str], confidence: float, pretty: bool
|
|
1382
|
+
):
|
|
1383
|
+
"""Conclude a session with findings."""
|
|
1384
|
+
from pathlib import Path
|
|
1385
|
+
|
|
1386
|
+
try:
|
|
1387
|
+
sessions_dir = Path.home() / ".logler" / "sessions"
|
|
1388
|
+
session_file = sessions_dir / f"{session_id}.json"
|
|
1389
|
+
|
|
1390
|
+
if not session_file.exists():
|
|
1391
|
+
_error_json(f"Session not found: {session_id}")
|
|
1392
|
+
|
|
1393
|
+
with open(session_file) as f:
|
|
1394
|
+
session_data = json.load(f)
|
|
1395
|
+
|
|
1396
|
+
conclusion = {
|
|
1397
|
+
"summary": summary,
|
|
1398
|
+
"root_cause": root_cause,
|
|
1399
|
+
"confidence": confidence,
|
|
1400
|
+
"concluded_at": datetime.now().isoformat(),
|
|
1401
|
+
}
|
|
1402
|
+
|
|
1403
|
+
session_data["status"] = "concluded"
|
|
1404
|
+
session_data["conclusion"] = conclusion
|
|
1405
|
+
session_data["log"].append(
|
|
1406
|
+
{
|
|
1407
|
+
"timestamp": datetime.now().isoformat(),
|
|
1408
|
+
"action": "conclude",
|
|
1409
|
+
"conclusion": conclusion,
|
|
1410
|
+
}
|
|
1411
|
+
)
|
|
1412
|
+
|
|
1413
|
+
with open(session_file, "w") as f:
|
|
1414
|
+
json.dump(session_data, f, indent=2, default=str)
|
|
1415
|
+
|
|
1416
|
+
output = {
|
|
1417
|
+
"session_id": session_id,
|
|
1418
|
+
"conclusion": conclusion,
|
|
1419
|
+
"investigation_log": session_data["log"],
|
|
1420
|
+
}
|
|
1421
|
+
|
|
1422
|
+
_output_json(output, pretty)
|
|
1423
|
+
sys.exit(EXIT_SUCCESS)
|
|
1424
|
+
|
|
1425
|
+
except Exception as e:
|
|
1426
|
+
_error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
|