logler 1.0.7__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logler/__init__.py +22 -0
- logler/bootstrap.py +57 -0
- logler/cache.py +75 -0
- logler/cli.py +589 -0
- logler/helpers.py +282 -0
- logler/investigate.py +3962 -0
- logler/llm_cli.py +1426 -0
- logler/log_reader.py +267 -0
- logler/parser.py +207 -0
- logler/safe_regex.py +124 -0
- logler/terminal.py +252 -0
- logler/tracker.py +138 -0
- logler/tree_formatter.py +807 -0
- logler/watcher.py +55 -0
- logler/web/__init__.py +3 -0
- logler/web/app.py +810 -0
- logler/web/static/css/tailwind.css +1 -0
- logler/web/static/css/tailwind.input.css +3 -0
- logler/web/static/logler-logo.png +0 -0
- logler/web/tailwind.config.cjs +9 -0
- logler/web/templates/index.html +1454 -0
- logler-1.0.7.dist-info/METADATA +584 -0
- logler-1.0.7.dist-info/RECORD +28 -0
- logler-1.0.7.dist-info/WHEEL +4 -0
- logler-1.0.7.dist-info/entry_points.txt +2 -0
- logler-1.0.7.dist-info/licenses/LICENSE +21 -0
- logler_rs/__init__.py +5 -0
- logler_rs/logler_rs.cp311-win_amd64.pyd +0 -0
logler/helpers.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Helper Utilities for Common Investigation Patterns
|
|
3
|
+
|
|
4
|
+
This module provides convenience functions that wrap common investigation
|
|
5
|
+
patterns, making it easier for LLM agents to perform typical tasks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import List, Dict, Any, Optional
|
|
9
|
+
import logler.investigate as investigate
|
|
10
|
+
from logler.investigate import Investigator
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def quick_summary(files: List[str]) -> Dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Get a quick summary of log files.
|
|
16
|
+
|
|
17
|
+
Returns a dictionary with:
|
|
18
|
+
- total_lines: Total number of log entries
|
|
19
|
+
- time_range: Start and end timestamps
|
|
20
|
+
- log_levels: Count of each log level
|
|
21
|
+
- error_rate: Percentage of ERROR/FATAL logs
|
|
22
|
+
- top_threads: Most active threads
|
|
23
|
+
|
|
24
|
+
Example:
|
|
25
|
+
summary = quick_summary(["app.log"])
|
|
26
|
+
print(f"Error rate: {summary['error_rate']:.1f}%")
|
|
27
|
+
"""
|
|
28
|
+
metadata = investigate.get_metadata(files)
|
|
29
|
+
if not metadata:
|
|
30
|
+
return {}
|
|
31
|
+
|
|
32
|
+
meta = metadata[0]
|
|
33
|
+
total = meta["lines"]
|
|
34
|
+
levels = meta.get("log_levels", {})
|
|
35
|
+
|
|
36
|
+
errors = levels.get("ERROR", 0) + levels.get("FATAL", 0) + levels.get("CRITICAL", 0)
|
|
37
|
+
error_rate = (errors / total * 100) if total > 0 else 0
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
"total_lines": total,
|
|
41
|
+
"time_range": meta.get("time_range"),
|
|
42
|
+
"log_levels": levels,
|
|
43
|
+
"error_rate": error_rate,
|
|
44
|
+
"unique_threads": meta.get("unique_threads", 0),
|
|
45
|
+
"unique_correlations": meta.get("unique_correlation_ids", 0),
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def find_top_errors(files: List[str], limit: int = 10) -> List[Dict[str, Any]]:
|
|
50
|
+
"""
|
|
51
|
+
Find the most common error patterns.
|
|
52
|
+
|
|
53
|
+
Returns a list of error patterns sorted by frequency.
|
|
54
|
+
|
|
55
|
+
Example:
|
|
56
|
+
errors = find_top_errors(["app.log"], limit=5)
|
|
57
|
+
for err in errors:
|
|
58
|
+
print(f"{err['occurrences']}x: {err['pattern']}")
|
|
59
|
+
"""
|
|
60
|
+
patterns = investigate.find_patterns(files, min_occurrences=2)
|
|
61
|
+
return sorted(patterns["patterns"], key=lambda x: x["occurrences"], reverse=True)[:limit]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def search_errors(
|
|
65
|
+
files: List[str], query: Optional[str] = None, limit: int = 100
|
|
66
|
+
) -> List[Dict[str, Any]]:
|
|
67
|
+
"""
|
|
68
|
+
Search for ERROR and FATAL level logs.
|
|
69
|
+
|
|
70
|
+
Example:
|
|
71
|
+
errors = search_errors(["app.log"], query="database")
|
|
72
|
+
for err in errors:
|
|
73
|
+
print(f"Line {err['line_number']}: {err['message']}")
|
|
74
|
+
"""
|
|
75
|
+
results = investigate.search(files=files, query=query, level="ERROR", limit=limit)
|
|
76
|
+
return [r["entry"] for r in results["results"]]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def trace_request(files: List[str], correlation_id: str) -> Dict[str, Any]:
|
|
80
|
+
"""
|
|
81
|
+
Trace a complete request by correlation ID.
|
|
82
|
+
|
|
83
|
+
Returns a dictionary with:
|
|
84
|
+
- entries: List of all log entries for this request
|
|
85
|
+
- duration_ms: Total request duration
|
|
86
|
+
- error_count: Number of errors in this request
|
|
87
|
+
- services: List of services involved (if available)
|
|
88
|
+
|
|
89
|
+
Example:
|
|
90
|
+
trace = trace_request(["app.log"], "req-abc123")
|
|
91
|
+
print(f"Request took {trace['duration_ms']}ms with {trace['error_count']} errors")
|
|
92
|
+
"""
|
|
93
|
+
timeline = investigate.follow_thread(files=files, correlation_id=correlation_id)
|
|
94
|
+
|
|
95
|
+
error_count = sum(
|
|
96
|
+
1 for e in timeline["entries"] if e.get("level") in ["ERROR", "FATAL", "CRITICAL"]
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Extract unique services if available
|
|
100
|
+
services = set()
|
|
101
|
+
for entry in timeline["entries"]:
|
|
102
|
+
if "service" in entry.get("fields", {}):
|
|
103
|
+
services.add(entry["fields"]["service"])
|
|
104
|
+
|
|
105
|
+
return {
|
|
106
|
+
"entries": timeline["entries"],
|
|
107
|
+
"duration_ms": timeline.get("duration_ms"),
|
|
108
|
+
"error_count": error_count,
|
|
109
|
+
"services": list(services),
|
|
110
|
+
"total_entries": timeline["total_entries"],
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def detect_spikes(files: List[str], window_minutes: int = 5) -> List[Dict[str, Any]]:
|
|
115
|
+
"""
|
|
116
|
+
Detect error rate spikes.
|
|
117
|
+
|
|
118
|
+
Note: This requires the SQL feature to be enabled.
|
|
119
|
+
|
|
120
|
+
Returns list of time windows with abnormally high error rates.
|
|
121
|
+
|
|
122
|
+
Example:
|
|
123
|
+
spikes = detect_spikes(["app.log"], window_minutes=5)
|
|
124
|
+
for spike in spikes:
|
|
125
|
+
print(f"Spike at {spike['time']}: {spike['errors']} errors")
|
|
126
|
+
"""
|
|
127
|
+
# This would require SQL queries to implement properly
|
|
128
|
+
# For now, use pattern detection as a simpler alternative
|
|
129
|
+
patterns = investigate.find_patterns(files, min_occurrences=3)
|
|
130
|
+
|
|
131
|
+
spikes = []
|
|
132
|
+
for pattern in patterns["patterns"]:
|
|
133
|
+
if pattern["occurrences"] >= 5: # Threshold for "spike"
|
|
134
|
+
spikes.append(
|
|
135
|
+
{
|
|
136
|
+
"pattern": pattern["pattern"],
|
|
137
|
+
"occurrences": pattern["occurrences"],
|
|
138
|
+
"first_seen": pattern["first_seen"],
|
|
139
|
+
"last_seen": pattern["last_seen"],
|
|
140
|
+
}
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return spikes
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def get_error_context(file: str, line_number: int, lines: int = 10) -> Dict[str, Any]:
|
|
147
|
+
"""
|
|
148
|
+
Get context around an error line.
|
|
149
|
+
|
|
150
|
+
Example:
|
|
151
|
+
context = get_error_context("app.log", 42, lines=5)
|
|
152
|
+
print("Before:")
|
|
153
|
+
for entry in context['before']:
|
|
154
|
+
print(f" {entry['message']}")
|
|
155
|
+
print(f"ERROR: {context['error']['message']}")
|
|
156
|
+
"""
|
|
157
|
+
inv = Investigator()
|
|
158
|
+
inv.load_files([file])
|
|
159
|
+
context = inv.get_context(file, line_number, lines, lines)
|
|
160
|
+
|
|
161
|
+
return {
|
|
162
|
+
"error": context["target"],
|
|
163
|
+
"before": context["context_before"],
|
|
164
|
+
"after": context["context_after"],
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def analyze_thread_health(files: List[str]) -> Dict[str, Dict[str, int]]:
|
|
169
|
+
"""
|
|
170
|
+
Analyze health of each thread.
|
|
171
|
+
|
|
172
|
+
Returns a dictionary mapping thread IDs to their log level counts.
|
|
173
|
+
|
|
174
|
+
Example:
|
|
175
|
+
health = analyze_thread_health(["app.log"])
|
|
176
|
+
for thread, counts in health.items():
|
|
177
|
+
error_rate = counts.get('ERROR', 0) / counts.get('total', 1) * 100
|
|
178
|
+
print(f"{thread}: {error_rate:.1f}% errors")
|
|
179
|
+
"""
|
|
180
|
+
# Get all entries
|
|
181
|
+
metadata = investigate.get_metadata(files)
|
|
182
|
+
inv = Investigator()
|
|
183
|
+
inv.load_files(files)
|
|
184
|
+
|
|
185
|
+
# This is a simplified version - full implementation would track by thread
|
|
186
|
+
# For now, return basic info
|
|
187
|
+
return {
|
|
188
|
+
"note": "Thread health analysis requires SQL queries for full implementation",
|
|
189
|
+
"total_threads": metadata[0].get("unique_threads", 0),
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def find_cascading_failures(files: List[str]) -> List[Dict[str, Any]]:
|
|
194
|
+
"""
|
|
195
|
+
Find patterns that suggest cascading failures.
|
|
196
|
+
|
|
197
|
+
Looks for:
|
|
198
|
+
- Multiple errors in quick succession
|
|
199
|
+
- Errors across multiple threads/services
|
|
200
|
+
- Increasing error rates over time
|
|
201
|
+
|
|
202
|
+
Example:
|
|
203
|
+
cascades = find_cascading_failures(["app.log"])
|
|
204
|
+
for cascade in cascades:
|
|
205
|
+
print(f"Cascade: {cascade['pattern']} across {len(cascade['threads'])} threads")
|
|
206
|
+
"""
|
|
207
|
+
patterns = investigate.find_patterns(files, min_occurrences=3)
|
|
208
|
+
|
|
209
|
+
cascades = []
|
|
210
|
+
for pattern in patterns["patterns"]:
|
|
211
|
+
# Cascading failures typically affect multiple threads
|
|
212
|
+
if len(pattern["affected_threads"]) >= 3:
|
|
213
|
+
cascades.append(
|
|
214
|
+
{
|
|
215
|
+
"pattern": pattern["pattern"],
|
|
216
|
+
"occurrences": pattern["occurrences"],
|
|
217
|
+
"threads": pattern["affected_threads"],
|
|
218
|
+
"first_seen": pattern["first_seen"],
|
|
219
|
+
"last_seen": pattern["last_seen"],
|
|
220
|
+
}
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
return cascades
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def get_timeline_summary(files: List[str], correlation_id: str) -> str:
|
|
227
|
+
"""
|
|
228
|
+
Get a human-readable timeline summary for a request.
|
|
229
|
+
|
|
230
|
+
Example:
|
|
231
|
+
summary = get_timeline_summary(["app.log"], "req-001")
|
|
232
|
+
print(summary)
|
|
233
|
+
"""
|
|
234
|
+
timeline = investigate.follow_thread(files=files, correlation_id=correlation_id)
|
|
235
|
+
|
|
236
|
+
if not timeline["entries"]:
|
|
237
|
+
return f"No entries found for correlation_id={correlation_id}"
|
|
238
|
+
|
|
239
|
+
lines = []
|
|
240
|
+
lines.append(f"Request {correlation_id} Timeline:")
|
|
241
|
+
lines.append(f"Duration: {timeline.get('duration_ms', 'unknown')}ms")
|
|
242
|
+
lines.append(f"Total entries: {timeline['total_entries']}")
|
|
243
|
+
lines.append("")
|
|
244
|
+
|
|
245
|
+
for i, entry in enumerate(timeline["entries"][:10], 1): # Limit to first 10
|
|
246
|
+
level_emoji = {
|
|
247
|
+
"INFO": "ℹ️",
|
|
248
|
+
"WARN": "⚠️",
|
|
249
|
+
"ERROR": "❌",
|
|
250
|
+
"FATAL": "💀",
|
|
251
|
+
"CRITICAL": "🔴",
|
|
252
|
+
}.get(entry.get("level"), "📝")
|
|
253
|
+
|
|
254
|
+
thread = entry.get("thread_id", "unknown")
|
|
255
|
+
message = entry.get("message", "")[:60]
|
|
256
|
+
lines.append(f"{i:2d}. {level_emoji} [{thread}] {message}")
|
|
257
|
+
|
|
258
|
+
if timeline["total_entries"] > 10:
|
|
259
|
+
lines.append(f"... and {timeline['total_entries'] - 10} more entries")
|
|
260
|
+
|
|
261
|
+
return "\n".join(lines)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
# Convenience aliases for common operations
|
|
265
|
+
def errors(files: List[str], **kwargs) -> List[Dict[str, Any]]:
|
|
266
|
+
"""Shorthand for search_errors()"""
|
|
267
|
+
return search_errors(files, **kwargs)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def trace(files: List[str], correlation_id: str) -> Dict[str, Any]:
|
|
271
|
+
"""Shorthand for trace_request()"""
|
|
272
|
+
return trace_request(files, correlation_id)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
def summary(files: List[str]) -> Dict[str, Any]:
|
|
276
|
+
"""Shorthand for quick_summary()"""
|
|
277
|
+
return quick_summary(files)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def patterns(files: List[str], limit: int = 10) -> List[Dict[str, Any]]:
|
|
281
|
+
"""Shorthand for find_top_errors()"""
|
|
282
|
+
return find_top_errors(files, limit)
|