logler 1.1.0__cp311-cp311-manylinux_2_34_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logler/__init__.py +22 -0
- logler/bootstrap.py +55 -0
- logler/cache.py +75 -0
- logler/cli.py +535 -0
- logler/helpers.py +282 -0
- logler/investigate.py +4003 -0
- logler/llm_cli.py +1426 -0
- logler/log_reader.py +267 -0
- logler/parser.py +207 -0
- logler/safe_regex.py +124 -0
- logler/sql.py +147 -0
- logler/terminal.py +252 -0
- logler/tracker.py +138 -0
- logler/tree_formatter.py +807 -0
- logler/watcher.py +55 -0
- logler-1.1.0.dist-info/METADATA +587 -0
- logler-1.1.0.dist-info/RECORD +22 -0
- logler-1.1.0.dist-info/WHEEL +4 -0
- logler-1.1.0.dist-info/entry_points.txt +2 -0
- logler-1.1.0.dist-info/licenses/LICENSE +21 -0
- logler_rs/__init__.py +5 -0
- logler_rs/logler_rs.cpython-311-x86_64-linux-gnu.so +0 -0
logler/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Logler - Beautiful local log viewer with thread tracking and real-time updates.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
__version__ = "1.0.7"
|
|
6
|
+
__author__ = "Logler Contributors"
|
|
7
|
+
|
|
8
|
+
from .parser import LogParser, LogEntry
|
|
9
|
+
from .tracker import ThreadTracker
|
|
10
|
+
from .log_reader import LogReader
|
|
11
|
+
from .tree_formatter import format_tree, format_waterfall, print_tree, print_waterfall
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"LogParser",
|
|
15
|
+
"LogEntry",
|
|
16
|
+
"ThreadTracker",
|
|
17
|
+
"LogReader",
|
|
18
|
+
"format_tree",
|
|
19
|
+
"format_waterfall",
|
|
20
|
+
"print_tree",
|
|
21
|
+
"print_waterfall",
|
|
22
|
+
]
|
logler/bootstrap.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Helper to ensure the Rust backend is installed.
|
|
3
|
+
|
|
4
|
+
Attempts to import `logler_rs`; if missing, runs `maturin develop` against
|
|
5
|
+
`crates/logler-py/Cargo.toml`.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import subprocess
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def ensure_rust_backend(auto_install: bool = True) -> bool:
|
|
16
|
+
"""Ensure logler_rs is importable. Optionally auto-installs via maturin."""
|
|
17
|
+
try:
|
|
18
|
+
import logler_rs # noqa: F401
|
|
19
|
+
|
|
20
|
+
return True
|
|
21
|
+
except Exception:
|
|
22
|
+
if not auto_install:
|
|
23
|
+
return False
|
|
24
|
+
|
|
25
|
+
maturin = _which("maturin")
|
|
26
|
+
if not maturin:
|
|
27
|
+
return False
|
|
28
|
+
|
|
29
|
+
repo_root = Path(__file__).resolve().parents[2]
|
|
30
|
+
cmd = [
|
|
31
|
+
maturin,
|
|
32
|
+
"develop",
|
|
33
|
+
"--release",
|
|
34
|
+
"-m",
|
|
35
|
+
str(repo_root / "crates" / "logler-py" / "Cargo.toml"),
|
|
36
|
+
]
|
|
37
|
+
try:
|
|
38
|
+
subprocess.run(
|
|
39
|
+
cmd, cwd=repo_root, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
|
40
|
+
)
|
|
41
|
+
except Exception:
|
|
42
|
+
return False
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
import logler_rs # noqa: F401
|
|
46
|
+
|
|
47
|
+
return True
|
|
48
|
+
except Exception:
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _which(cmd: str) -> Optional[str]:
|
|
53
|
+
from shutil import which
|
|
54
|
+
|
|
55
|
+
return which(cmd)
|
logler/cache.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Performance optimization: Add file-level index caching to standalone functions.
|
|
3
|
+
|
|
4
|
+
Currently, standalone functions like search(), follow_thread() create a new
|
|
5
|
+
Investigator and re-read files every time. This is inefficient for repeated
|
|
6
|
+
queries on the same files.
|
|
7
|
+
|
|
8
|
+
Solution: Add a module-level LRU cache that reuses Investigator instances.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import threading
|
|
12
|
+
from typing import Dict
|
|
13
|
+
import logler_rs
|
|
14
|
+
|
|
15
|
+
# Thread-safe cache for Investigator instances
|
|
16
|
+
_investigator_lock = threading.Lock()
|
|
17
|
+
_investigator_cache: Dict[tuple, logler_rs.PyInvestigator] = {}
|
|
18
|
+
_cache_max_size = 10 # Keep up to 10 file sets in cache
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _get_cached_investigator(files: tuple) -> logler_rs.PyInvestigator:
|
|
22
|
+
"""
|
|
23
|
+
Get or create a cached Investigator for the given files.
|
|
24
|
+
|
|
25
|
+
This allows standalone functions to reuse parsed indices when
|
|
26
|
+
called multiple times with the same files.
|
|
27
|
+
"""
|
|
28
|
+
with _investigator_lock:
|
|
29
|
+
# Check cache
|
|
30
|
+
if files in _investigator_cache:
|
|
31
|
+
return _investigator_cache[files]
|
|
32
|
+
|
|
33
|
+
# Create new investigator
|
|
34
|
+
inv = logler_rs.PyInvestigator()
|
|
35
|
+
inv.load_files(list(files))
|
|
36
|
+
|
|
37
|
+
# Add to cache (with simple size limit)
|
|
38
|
+
if len(_investigator_cache) >= _cache_max_size:
|
|
39
|
+
# Remove oldest entry (simple FIFO)
|
|
40
|
+
oldest = next(iter(_investigator_cache))
|
|
41
|
+
del _investigator_cache[oldest]
|
|
42
|
+
|
|
43
|
+
_investigator_cache[files] = inv
|
|
44
|
+
return inv
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_cached_investigator(files) -> logler_rs.PyInvestigator:
|
|
48
|
+
"""
|
|
49
|
+
Public accessor that normalizes the incoming file list into a stable cache key.
|
|
50
|
+
"""
|
|
51
|
+
key = tuple(sorted(str(f) for f in files))
|
|
52
|
+
return _get_cached_investigator(key)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def clear_cache():
|
|
56
|
+
"""Clear the investigator cache (useful for testing or freeing memory)"""
|
|
57
|
+
with _investigator_lock:
|
|
58
|
+
_investigator_cache.clear()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# Example usage showing the difference:
|
|
62
|
+
#
|
|
63
|
+
# SLOW (current):
|
|
64
|
+
# for i in range(100):
|
|
65
|
+
# search(["app.log"], level="ERROR") # Re-reads file 100 times!
|
|
66
|
+
#
|
|
67
|
+
# FAST (with cache):
|
|
68
|
+
# for i in range(100):
|
|
69
|
+
# search(["app.log"], level="ERROR") # Reads once, caches index!
|
|
70
|
+
#
|
|
71
|
+
# FASTEST (explicit Investigator):
|
|
72
|
+
# inv = Investigator()
|
|
73
|
+
# inv.load_files(["app.log"])
|
|
74
|
+
# for i in range(100):
|
|
75
|
+
# inv.search(level="ERROR") # Explicit control, no cache overhead
|
logler/cli.py
ADDED
|
@@ -0,0 +1,535 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for Logler.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import click
|
|
7
|
+
import sys
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from .terminal import TerminalViewer
|
|
11
|
+
from .llm_cli import llm as llm_group
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@click.group(invoke_without_command=True)
|
|
15
|
+
@click.pass_context
|
|
16
|
+
@click.version_option()
|
|
17
|
+
def main(ctx):
|
|
18
|
+
"""
|
|
19
|
+
š Logler - Beautiful local log viewer
|
|
20
|
+
|
|
21
|
+
A modern log viewer with thread tracking, real-time updates, and beautiful output.
|
|
22
|
+
"""
|
|
23
|
+
if ctx.invoked_subcommand is None:
|
|
24
|
+
click.echo(ctx.get_help())
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@main.command()
|
|
28
|
+
@click.argument("files", nargs=-1, required=True, type=click.Path(exists=True))
|
|
29
|
+
@click.option("-n", "--lines", type=int, help="Number of lines to show")
|
|
30
|
+
@click.option("-f", "--follow", is_flag=True, help="Follow log file in real-time")
|
|
31
|
+
@click.option("--level", type=str, help="Filter by log level (DEBUG, INFO, WARN, ERROR)")
|
|
32
|
+
@click.option("--grep", type=str, help="Search for pattern")
|
|
33
|
+
@click.option("--thread", type=str, help="Filter by thread ID")
|
|
34
|
+
@click.option("--no-color", is_flag=True, help="Disable colored output")
|
|
35
|
+
def view(
|
|
36
|
+
files: tuple,
|
|
37
|
+
lines: Optional[int],
|
|
38
|
+
follow: bool,
|
|
39
|
+
level: Optional[str],
|
|
40
|
+
grep: Optional[str],
|
|
41
|
+
thread: Optional[str],
|
|
42
|
+
no_color: bool,
|
|
43
|
+
):
|
|
44
|
+
"""
|
|
45
|
+
View log files in the terminal with beautiful output.
|
|
46
|
+
|
|
47
|
+
Examples:
|
|
48
|
+
logler view app.log # View entire file
|
|
49
|
+
logler view app.log -n 100 # Last 100 lines
|
|
50
|
+
logler view app.log -f # Follow in real-time
|
|
51
|
+
logler view app.log --level ERROR # Show only errors
|
|
52
|
+
logler view app.log --grep "timeout" # Search for pattern
|
|
53
|
+
logler view app.log --thread worker-1 # Filter by thread
|
|
54
|
+
"""
|
|
55
|
+
viewer = TerminalViewer(use_colors=not no_color)
|
|
56
|
+
|
|
57
|
+
for file_path in files:
|
|
58
|
+
try:
|
|
59
|
+
asyncio.run(
|
|
60
|
+
viewer.view_file(
|
|
61
|
+
file_path=file_path,
|
|
62
|
+
lines=lines,
|
|
63
|
+
follow=follow,
|
|
64
|
+
level_filter=level,
|
|
65
|
+
pattern=grep,
|
|
66
|
+
thread_filter=thread,
|
|
67
|
+
)
|
|
68
|
+
)
|
|
69
|
+
except KeyboardInterrupt:
|
|
70
|
+
click.echo("\nš Goodbye!")
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
except Exception as e:
|
|
73
|
+
click.echo(f"ā Error: {e}", err=True)
|
|
74
|
+
sys.exit(1)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@main.command()
|
|
78
|
+
@click.argument("files", nargs=-1, required=True, type=click.Path(exists=True))
|
|
79
|
+
@click.option("--json", "output_json", is_flag=True, help="Output as JSON")
|
|
80
|
+
def stats(files: tuple, output_json: bool):
|
|
81
|
+
"""
|
|
82
|
+
Show statistics for log files.
|
|
83
|
+
|
|
84
|
+
Examples:
|
|
85
|
+
logler stats app.log # Show statistics
|
|
86
|
+
logler stats app.log --json # Output as JSON
|
|
87
|
+
"""
|
|
88
|
+
from .parser import LogParser
|
|
89
|
+
from rich.console import Console
|
|
90
|
+
from rich.table import Table
|
|
91
|
+
|
|
92
|
+
console = Console()
|
|
93
|
+
parser = LogParser()
|
|
94
|
+
|
|
95
|
+
for file_path in files:
|
|
96
|
+
with open(file_path, "r") as f:
|
|
97
|
+
entries = [parser.parse_line(i + 1, line.rstrip()) for i, line in enumerate(f)]
|
|
98
|
+
|
|
99
|
+
stats_data = {
|
|
100
|
+
"total": len(entries),
|
|
101
|
+
"by_level": {},
|
|
102
|
+
"by_thread": {},
|
|
103
|
+
"errors": 0,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
for entry in entries:
|
|
107
|
+
level = str(entry.level)
|
|
108
|
+
stats_data["by_level"][level] = stats_data["by_level"].get(level, 0) + 1
|
|
109
|
+
|
|
110
|
+
if entry.level in ["ERROR", "FATAL", "CRITICAL"]:
|
|
111
|
+
stats_data["errors"] += 1
|
|
112
|
+
|
|
113
|
+
if entry.thread_id:
|
|
114
|
+
stats_data["by_thread"][entry.thread_id] = (
|
|
115
|
+
stats_data["by_thread"].get(entry.thread_id, 0) + 1
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if output_json:
|
|
119
|
+
console.print_json(data=stats_data)
|
|
120
|
+
else:
|
|
121
|
+
console.print(f"\n[bold]š Statistics for {file_path}[/bold]\n")
|
|
122
|
+
|
|
123
|
+
table = Table(title="Log Levels")
|
|
124
|
+
table.add_column("Level", style="cyan")
|
|
125
|
+
table.add_column("Count", justify="right", style="green")
|
|
126
|
+
|
|
127
|
+
for level, count in sorted(stats_data["by_level"].items()):
|
|
128
|
+
table.add_row(level, str(count))
|
|
129
|
+
|
|
130
|
+
console.print(table)
|
|
131
|
+
|
|
132
|
+
console.print(f"\n[bold red]Errors:[/bold red] {stats_data['errors']}")
|
|
133
|
+
console.print(f"[bold]Total:[/bold] {stats_data['total']} entries\n")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
@main.command()
|
|
137
|
+
@click.argument("files", nargs=-1, required=True, type=click.Path(exists=True))
|
|
138
|
+
@click.option("--auto-insights", is_flag=True, help="Run automatic insights analysis")
|
|
139
|
+
@click.option("--errors", is_flag=True, help="Show only errors with analysis")
|
|
140
|
+
@click.option("--patterns", is_flag=True, help="Find repeated patterns")
|
|
141
|
+
@click.option("--thread", type=str, help="Follow specific thread ID")
|
|
142
|
+
@click.option("--correlation", type=str, help="Follow specific correlation ID")
|
|
143
|
+
@click.option(
|
|
144
|
+
"--hierarchy", is_flag=True, help="Show thread hierarchy tree (with --thread or --correlation)"
|
|
145
|
+
)
|
|
146
|
+
@click.option("--waterfall", is_flag=True, help="Show waterfall timeline (with --hierarchy)")
|
|
147
|
+
@click.option("--flamegraph", is_flag=True, help="Show flamegraph visualization (with --hierarchy)")
|
|
148
|
+
@click.option(
|
|
149
|
+
"--show-error-flow",
|
|
150
|
+
is_flag=True,
|
|
151
|
+
help="Analyze error propagation through hierarchy (with --hierarchy)",
|
|
152
|
+
)
|
|
153
|
+
@click.option("--max-depth", type=int, help="Maximum hierarchy depth to display")
|
|
154
|
+
@click.option(
|
|
155
|
+
"--min-confidence",
|
|
156
|
+
type=float,
|
|
157
|
+
default=0.0,
|
|
158
|
+
help="Minimum confidence for hierarchy detection (0.0-1.0)",
|
|
159
|
+
)
|
|
160
|
+
@click.option("--context", type=int, default=3, help="Number of context lines (default: 3)")
|
|
161
|
+
@click.option(
|
|
162
|
+
"--output",
|
|
163
|
+
type=click.Choice(["full", "summary", "count", "compact"]),
|
|
164
|
+
default="summary",
|
|
165
|
+
help="Output format (default: summary)",
|
|
166
|
+
)
|
|
167
|
+
@click.option("--json", "output_json", is_flag=True, help="Output as JSON")
|
|
168
|
+
@click.option(
|
|
169
|
+
"--min-occurrences", type=int, default=3, help="Minimum pattern occurrences (default: 3)"
|
|
170
|
+
)
|
|
171
|
+
def investigate(
|
|
172
|
+
files: tuple,
|
|
173
|
+
auto_insights: bool,
|
|
174
|
+
errors: bool,
|
|
175
|
+
patterns: bool,
|
|
176
|
+
thread: Optional[str],
|
|
177
|
+
correlation: Optional[str],
|
|
178
|
+
hierarchy: bool,
|
|
179
|
+
waterfall: bool,
|
|
180
|
+
flamegraph: bool,
|
|
181
|
+
show_error_flow: bool,
|
|
182
|
+
max_depth: Optional[int],
|
|
183
|
+
min_confidence: float,
|
|
184
|
+
context: int,
|
|
185
|
+
output: str,
|
|
186
|
+
output_json: bool,
|
|
187
|
+
min_occurrences: int,
|
|
188
|
+
):
|
|
189
|
+
"""
|
|
190
|
+
Investigate log files with smart analysis and insights.
|
|
191
|
+
|
|
192
|
+
Examples:
|
|
193
|
+
logler investigate app.log --auto-insights # Auto-detect issues
|
|
194
|
+
logler investigate app.log --errors # Analyze errors
|
|
195
|
+
logler investigate app.log --patterns # Find repeated patterns
|
|
196
|
+
logler investigate app.log --thread worker-1 # Follow specific thread
|
|
197
|
+
logler investigate app.log --correlation req-123 # Follow request
|
|
198
|
+
logler investigate app.log --thread req-123 --hierarchy # Show hierarchy tree
|
|
199
|
+
logler investigate app.log --thread req-123 --hierarchy --waterfall # Show waterfall timeline
|
|
200
|
+
logler investigate app.log --thread req-123 --hierarchy --flamegraph # Show flamegraph
|
|
201
|
+
logler investigate app.log --hierarchy --show-error-flow # Analyze error propagation
|
|
202
|
+
logler investigate app.log --output summary # Token-efficient output
|
|
203
|
+
"""
|
|
204
|
+
from .investigate import (
|
|
205
|
+
analyze_with_insights,
|
|
206
|
+
search,
|
|
207
|
+
find_patterns,
|
|
208
|
+
follow_thread,
|
|
209
|
+
follow_thread_hierarchy,
|
|
210
|
+
get_hierarchy_summary,
|
|
211
|
+
analyze_error_flow,
|
|
212
|
+
format_error_flow,
|
|
213
|
+
)
|
|
214
|
+
from rich.console import Console
|
|
215
|
+
from rich.table import Table
|
|
216
|
+
from rich.panel import Panel
|
|
217
|
+
|
|
218
|
+
console = Console()
|
|
219
|
+
file_list = list(files)
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
# Auto-insights mode (most powerful)
|
|
223
|
+
if auto_insights:
|
|
224
|
+
console.print("[bold cyan]šÆ Running automatic insights analysis...[/bold cyan]\n")
|
|
225
|
+
result = analyze_with_insights(files=file_list, auto_investigate=True)
|
|
226
|
+
|
|
227
|
+
if output_json:
|
|
228
|
+
console.print_json(data=result)
|
|
229
|
+
return
|
|
230
|
+
|
|
231
|
+
# Display overview
|
|
232
|
+
overview = result["overview"]
|
|
233
|
+
console.print(
|
|
234
|
+
Panel(
|
|
235
|
+
f"[bold]Total Logs:[/bold] {overview['total_logs']}\n"
|
|
236
|
+
f"[bold]Error Count:[/bold] {overview['error_count']}\n"
|
|
237
|
+
f"[bold]Error Rate:[/bold] {overview['error_rate']:.1%}\n"
|
|
238
|
+
f"[bold]Log Levels:[/bold] {overview['log_levels']}",
|
|
239
|
+
title="š Overview",
|
|
240
|
+
border_style="cyan",
|
|
241
|
+
)
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# Display insights
|
|
245
|
+
if result["insights"]:
|
|
246
|
+
console.print("\n[bold cyan]š” Automatic Insights[/bold cyan]\n")
|
|
247
|
+
for i, insight in enumerate(result["insights"], 1):
|
|
248
|
+
severity_color = {"high": "red", "medium": "yellow", "low": "green"}.get(
|
|
249
|
+
insight["severity"], "white"
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
severity_icon = {"high": "š“", "medium": "š”", "low": "š¢"}.get(
|
|
253
|
+
insight["severity"], "āŖ"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
console.print(
|
|
257
|
+
f"{severity_icon} [bold {severity_color}]Insight #{i}:[/bold {severity_color}] {insight['type']}"
|
|
258
|
+
)
|
|
259
|
+
console.print(
|
|
260
|
+
f" [dim]Severity:[/dim] [{severity_color}]{insight['severity'].upper()}[/{severity_color}]"
|
|
261
|
+
)
|
|
262
|
+
console.print(f" [dim]Description:[/dim] {insight['description']}")
|
|
263
|
+
console.print(f" [dim]Suggestion:[/dim] {insight['suggestion']}\n")
|
|
264
|
+
|
|
265
|
+
# Display suggestions
|
|
266
|
+
if result["suggestions"]:
|
|
267
|
+
console.print("[bold cyan]š Suggestions[/bold cyan]\n")
|
|
268
|
+
for i, suggestion in enumerate(result["suggestions"], 1):
|
|
269
|
+
console.print(f" {i}. {suggestion}")
|
|
270
|
+
|
|
271
|
+
# Display next steps
|
|
272
|
+
if result["next_steps"]:
|
|
273
|
+
console.print("\n[bold cyan]š Next Steps[/bold cyan]\n")
|
|
274
|
+
for i, step in enumerate(result["next_steps"], 1):
|
|
275
|
+
console.print(f" {i}. {step}")
|
|
276
|
+
|
|
277
|
+
# Pattern detection mode
|
|
278
|
+
elif patterns:
|
|
279
|
+
console.print(
|
|
280
|
+
f"[bold cyan]š Finding repeated patterns (min {min_occurrences} occurrences)...[/bold cyan]\n"
|
|
281
|
+
)
|
|
282
|
+
result = find_patterns(files=file_list, min_occurrences=min_occurrences)
|
|
283
|
+
|
|
284
|
+
if output_json:
|
|
285
|
+
console.print_json(data=result)
|
|
286
|
+
return
|
|
287
|
+
|
|
288
|
+
pattern_list = result.get("patterns", [])
|
|
289
|
+
if pattern_list:
|
|
290
|
+
table = Table(title=f"Found {len(pattern_list)} Patterns")
|
|
291
|
+
table.add_column("Pattern", style="cyan", no_wrap=False)
|
|
292
|
+
table.add_column("Count", justify="right", style="green")
|
|
293
|
+
table.add_column("First Seen", style="yellow")
|
|
294
|
+
table.add_column("Last Seen", style="yellow")
|
|
295
|
+
|
|
296
|
+
for pattern in pattern_list[:20]: # Show top 20
|
|
297
|
+
pattern_text = pattern.get("pattern", "")[:80]
|
|
298
|
+
count = pattern.get("occurrences", 0)
|
|
299
|
+
first = pattern.get("first_seen", "N/A")
|
|
300
|
+
last = pattern.get("last_seen", "N/A")
|
|
301
|
+
table.add_row(pattern_text, str(count), first, last)
|
|
302
|
+
|
|
303
|
+
console.print(table)
|
|
304
|
+
else:
|
|
305
|
+
console.print("[yellow]No repeated patterns found.[/yellow]")
|
|
306
|
+
|
|
307
|
+
# Thread/correlation following mode
|
|
308
|
+
elif thread or correlation:
|
|
309
|
+
identifier = thread or correlation
|
|
310
|
+
id_type = "thread" if thread else "correlation"
|
|
311
|
+
|
|
312
|
+
# Hierarchy mode
|
|
313
|
+
if hierarchy:
|
|
314
|
+
console.print(
|
|
315
|
+
f"[bold cyan]š³ Building hierarchy for {id_type}: {identifier}...[/bold cyan]\n"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
try:
|
|
319
|
+
hier_result = follow_thread_hierarchy(
|
|
320
|
+
files=file_list,
|
|
321
|
+
root_identifier=identifier,
|
|
322
|
+
max_depth=max_depth,
|
|
323
|
+
min_confidence=min_confidence,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
if output_json:
|
|
327
|
+
console.print_json(data=hier_result)
|
|
328
|
+
return
|
|
329
|
+
|
|
330
|
+
# Import tree formatter
|
|
331
|
+
from .tree_formatter import format_tree, format_waterfall, format_flamegraph
|
|
332
|
+
|
|
333
|
+
# Show summary first
|
|
334
|
+
summary = get_hierarchy_summary(hier_result)
|
|
335
|
+
console.print(summary)
|
|
336
|
+
console.print()
|
|
337
|
+
|
|
338
|
+
# Show tree visualization
|
|
339
|
+
if waterfall:
|
|
340
|
+
console.print("[bold cyan]š Waterfall Timeline[/bold cyan]\n")
|
|
341
|
+
waterfall_str = format_waterfall(hier_result, width=100)
|
|
342
|
+
console.print(waterfall_str)
|
|
343
|
+
elif flamegraph:
|
|
344
|
+
console.print("[bold cyan]š„ Flamegraph Visualization[/bold cyan]\n")
|
|
345
|
+
flamegraph_str = format_flamegraph(hier_result, width=100)
|
|
346
|
+
console.print(flamegraph_str)
|
|
347
|
+
else:
|
|
348
|
+
console.print("[bold cyan]š² Hierarchy Tree[/bold cyan]\n")
|
|
349
|
+
tree_str = format_tree(
|
|
350
|
+
hier_result,
|
|
351
|
+
mode="detailed",
|
|
352
|
+
show_duration=True,
|
|
353
|
+
show_errors=True,
|
|
354
|
+
max_depth=max_depth,
|
|
355
|
+
use_colors=True,
|
|
356
|
+
)
|
|
357
|
+
console.print(tree_str)
|
|
358
|
+
|
|
359
|
+
# Show error flow analysis if requested
|
|
360
|
+
if show_error_flow:
|
|
361
|
+
console.print()
|
|
362
|
+
console.print("[bold cyan]š Error Flow Analysis[/bold cyan]\n")
|
|
363
|
+
error_analysis = analyze_error_flow(hier_result)
|
|
364
|
+
if output_json:
|
|
365
|
+
console.print_json(data=error_analysis)
|
|
366
|
+
else:
|
|
367
|
+
error_flow_str = format_error_flow(error_analysis)
|
|
368
|
+
console.print(error_flow_str)
|
|
369
|
+
|
|
370
|
+
except Exception as e:
|
|
371
|
+
console.print(f"[red]ā Error building hierarchy: {e}[/red]")
|
|
372
|
+
console.print("[yellow]Falling back to regular thread following...[/yellow]\n")
|
|
373
|
+
hierarchy = False # Fall through to regular mode
|
|
374
|
+
|
|
375
|
+
# Regular thread following mode
|
|
376
|
+
if not hierarchy:
|
|
377
|
+
console.print(f"[bold cyan]š§µ Following {id_type}: {identifier}...[/bold cyan]\n")
|
|
378
|
+
|
|
379
|
+
result = follow_thread(
|
|
380
|
+
files=file_list, thread_id=thread, correlation_id=correlation
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
if output_json:
|
|
384
|
+
console.print_json(data=result)
|
|
385
|
+
return
|
|
386
|
+
|
|
387
|
+
entries = result.get("entries", [])
|
|
388
|
+
total = result.get("total_entries", len(entries))
|
|
389
|
+
duration = result.get("duration_ms", 0)
|
|
390
|
+
|
|
391
|
+
console.print(f"[bold]Found {total} entries[/bold]")
|
|
392
|
+
if duration:
|
|
393
|
+
console.print(f"[bold]Duration:[/bold] {duration}ms\n")
|
|
394
|
+
|
|
395
|
+
# Display entries
|
|
396
|
+
for entry in entries[:50]: # Limit display to 50
|
|
397
|
+
timestamp = entry.get("timestamp", "N/A")
|
|
398
|
+
level = entry.get("level", "INFO")
|
|
399
|
+
message = entry.get("message", "")[:100]
|
|
400
|
+
|
|
401
|
+
level_color = {
|
|
402
|
+
"ERROR": "red",
|
|
403
|
+
"FATAL": "red",
|
|
404
|
+
"WARN": "yellow",
|
|
405
|
+
"WARNING": "yellow",
|
|
406
|
+
"INFO": "cyan",
|
|
407
|
+
"DEBUG": "dim",
|
|
408
|
+
"TRACE": "dim",
|
|
409
|
+
}.get(level, "white")
|
|
410
|
+
|
|
411
|
+
console.print(
|
|
412
|
+
f"[dim]{timestamp}[/dim] [{level_color}]{level:8s}[/{level_color}] {message}"
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
if len(entries) > 50:
|
|
416
|
+
console.print(f"\n[dim]... and {len(entries) - 50} more entries[/dim]")
|
|
417
|
+
|
|
418
|
+
# Error analysis mode
|
|
419
|
+
elif errors:
|
|
420
|
+
console.print("[bold cyan]ā Analyzing errors...[/bold cyan]\n")
|
|
421
|
+
result = search(
|
|
422
|
+
files=file_list, level="ERROR", context_lines=context, output_format=output
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
if output_json:
|
|
426
|
+
console.print_json(data=result)
|
|
427
|
+
return
|
|
428
|
+
|
|
429
|
+
if output == "summary":
|
|
430
|
+
total = result.get("total_matches", 0)
|
|
431
|
+
unique = result.get("unique_messages", 0)
|
|
432
|
+
console.print(f"[bold]Total Errors:[/bold] {total}")
|
|
433
|
+
console.print(f"[bold]Unique Messages:[/bold] {unique}\n")
|
|
434
|
+
|
|
435
|
+
top_messages = result.get("top_messages", [])
|
|
436
|
+
if top_messages:
|
|
437
|
+
table = Table(title="Top Error Messages")
|
|
438
|
+
table.add_column("Message", style="red", no_wrap=False)
|
|
439
|
+
table.add_column("Count", justify="right", style="green")
|
|
440
|
+
table.add_column("First Seen", style="yellow")
|
|
441
|
+
|
|
442
|
+
for msg in top_messages[:10]:
|
|
443
|
+
message = msg.get("message", "")[:80]
|
|
444
|
+
count = msg.get("count", 0)
|
|
445
|
+
first = msg.get("first_seen", "N/A")
|
|
446
|
+
table.add_row(message, str(count), first)
|
|
447
|
+
|
|
448
|
+
console.print(table)
|
|
449
|
+
|
|
450
|
+
elif output == "count":
|
|
451
|
+
console.print_json(data=result)
|
|
452
|
+
|
|
453
|
+
elif output == "compact":
|
|
454
|
+
matches = result.get("matches", [])
|
|
455
|
+
for match in matches[:50]:
|
|
456
|
+
time = match.get("time", "N/A")
|
|
457
|
+
msg = match.get("msg", "")
|
|
458
|
+
console.print(f"[dim]{time}[/dim] [red]ERROR[/red] {msg}")
|
|
459
|
+
|
|
460
|
+
else: # full
|
|
461
|
+
results = result.get("results", [])
|
|
462
|
+
for item in results[:50]:
|
|
463
|
+
entry = item.get("entry", {})
|
|
464
|
+
timestamp = entry.get("timestamp", "N/A")
|
|
465
|
+
message = entry.get("message", "")
|
|
466
|
+
console.print(f"[dim]{timestamp}[/dim] [red]ERROR[/red] {message}")
|
|
467
|
+
|
|
468
|
+
# Default search mode
|
|
469
|
+
else:
|
|
470
|
+
console.print("[bold cyan]š Searching logs...[/bold cyan]\n")
|
|
471
|
+
result = search(files=file_list, context_lines=context, output_format=output)
|
|
472
|
+
|
|
473
|
+
if output_json:
|
|
474
|
+
console.print_json(data=result)
|
|
475
|
+
return
|
|
476
|
+
|
|
477
|
+
total = result.get("total_matches", 0)
|
|
478
|
+
console.print(f"[bold]Total matches:[/bold] {total}\n")
|
|
479
|
+
|
|
480
|
+
if output == "summary":
|
|
481
|
+
console.print_json(data=result)
|
|
482
|
+
elif output == "count":
|
|
483
|
+
console.print_json(data=result)
|
|
484
|
+
else:
|
|
485
|
+
results = result.get("results", [])
|
|
486
|
+
for item in results[:50]:
|
|
487
|
+
entry = item.get("entry", {})
|
|
488
|
+
timestamp = entry.get("timestamp", "N/A")
|
|
489
|
+
level = entry.get("level", "INFO")
|
|
490
|
+
message = entry.get("message", "")[:100]
|
|
491
|
+
console.print(f"[dim]{timestamp}[/dim] {level:8s} {message}")
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
console.print(f"[red]ā Error:[/red] {e}", err=True)
|
|
495
|
+
if "--debug" in sys.argv:
|
|
496
|
+
import traceback
|
|
497
|
+
|
|
498
|
+
traceback.print_exc()
|
|
499
|
+
sys.exit(1)
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
@main.command()
|
|
503
|
+
@click.argument("pattern", required=True)
|
|
504
|
+
@click.option("--directory", "-d", default=".", help="Directory to watch")
|
|
505
|
+
@click.option("--recursive", "-r", is_flag=True, help="Watch recursively")
|
|
506
|
+
def watch(pattern: str, directory: str, recursive: bool):
|
|
507
|
+
"""
|
|
508
|
+
Watch for new log files matching a pattern.
|
|
509
|
+
|
|
510
|
+
Examples:
|
|
511
|
+
logler watch "*.log" # Watch current directory
|
|
512
|
+
logler watch "app-*.log" -d /var/log # Watch specific directory
|
|
513
|
+
logler watch "*.log" -r # Watch recursively
|
|
514
|
+
"""
|
|
515
|
+
from .watcher import FileWatcher
|
|
516
|
+
from rich.console import Console
|
|
517
|
+
|
|
518
|
+
console = Console()
|
|
519
|
+
console.print(f"š Watching for files matching: [cyan]{pattern}[/cyan]")
|
|
520
|
+
console.print(f"š Directory: [yellow]{directory}[/yellow]")
|
|
521
|
+
|
|
522
|
+
watcher = FileWatcher(pattern, directory, recursive)
|
|
523
|
+
|
|
524
|
+
try:
|
|
525
|
+
asyncio.run(watcher.watch())
|
|
526
|
+
except KeyboardInterrupt:
|
|
527
|
+
console.print("\nš Stopped watching")
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
# Register the LLM command group
|
|
531
|
+
main.add_command(llm_group)
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
if __name__ == "__main__":
|
|
535
|
+
main()
|