logler 1.1.2__cp311-cp311-win_amd64.whl → 1.1.3__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logler/cli.py CHANGED
@@ -140,8 +140,11 @@ def stats(files: tuple, output_json: bool):
140
140
  @click.option("--patterns", is_flag=True, help="Find repeated patterns")
141
141
  @click.option("--thread", type=str, help="Follow specific thread ID")
142
142
  @click.option("--correlation", type=str, help="Follow specific correlation ID")
143
+ @click.option("--trace", type=str, help="Follow specific trace ID")
143
144
  @click.option(
144
- "--hierarchy", is_flag=True, help="Show thread hierarchy tree (with --thread or --correlation)"
145
+ "--hierarchy",
146
+ is_flag=True,
147
+ help="Show thread hierarchy tree (with --thread, --correlation, or --trace)",
145
148
  )
146
149
  @click.option("--waterfall", is_flag=True, help="Show waterfall timeline (with --hierarchy)")
147
150
  @click.option("--flamegraph", is_flag=True, help="Show flamegraph visualization (with --hierarchy)")
@@ -175,6 +178,7 @@ def investigate(
175
178
  patterns: bool,
176
179
  thread: Optional[str],
177
180
  correlation: Optional[str],
181
+ trace: Optional[str],
178
182
  hierarchy: bool,
179
183
  waterfall: bool,
180
184
  flamegraph: bool,
@@ -195,9 +199,10 @@ def investigate(
195
199
  logler investigate app.log --patterns # Find repeated patterns
196
200
  logler investigate app.log --thread worker-1 # Follow specific thread
197
201
  logler investigate app.log --correlation req-123 # Follow request
198
- logler investigate app.log --thread req-123 --hierarchy # Show hierarchy tree
199
- logler investigate app.log --thread req-123 --hierarchy --waterfall # Show waterfall timeline
200
- logler investigate app.log --thread req-123 --hierarchy --flamegraph # Show flamegraph
202
+ logler investigate app.log --trace trace-abc123 # Follow distributed trace
203
+ logler investigate app.log --correlation req-123 --hierarchy # Show hierarchy tree
204
+ logler investigate app.log --correlation req-123 --hierarchy --waterfall # Show waterfall timeline
205
+ logler investigate app.log --correlation req-123 --hierarchy --flamegraph # Show flamegraph
201
206
  logler investigate app.log --hierarchy --show-error-flow # Analyze error propagation
202
207
  logler investigate app.log --output summary # Token-efficient output
203
208
  """
@@ -217,8 +222,14 @@ def investigate(
217
222
 
218
223
  console = Console()
219
224
  file_list = list(files)
225
+ id_args = {"thread": thread, "correlation": correlation, "trace": trace}
226
+ provided_ids = [name for name, value in id_args.items() if value]
220
227
 
221
228
  try:
229
+ if len(provided_ids) > 1:
230
+ console.print("[red]❌ Provide only one of --thread, --correlation, or --trace.[/red]")
231
+ sys.exit(2)
232
+
222
233
  # Auto-insights mode (most powerful)
223
234
  if auto_insights:
224
235
  console.print("[bold cyan]🎯 Running automatic insights analysis...[/bold cyan]\n")
@@ -305,9 +316,9 @@ def investigate(
305
316
  console.print("[yellow]No repeated patterns found.[/yellow]")
306
317
 
307
318
  # Thread/correlation following mode
308
- elif thread or correlation:
309
- identifier = thread or correlation
310
- id_type = "thread" if thread else "correlation"
319
+ elif thread or correlation or trace:
320
+ identifier = thread or correlation or trace
321
+ id_type = "thread" if thread else "correlation" if correlation else "trace"
311
322
 
312
323
  # Hierarchy mode
313
324
  if hierarchy:
@@ -377,7 +388,7 @@ def investigate(
377
388
  console.print(f"[bold cyan]🧵 Following {id_type}: {identifier}...[/bold cyan]\n")
378
389
 
379
390
  result = follow_thread(
380
- files=file_list, thread_id=thread, correlation_id=correlation
391
+ files=file_list, thread_id=thread, correlation_id=correlation, trace_id=trace
381
392
  )
382
393
 
383
394
  if output_json:
logler/investigate.py CHANGED
@@ -464,7 +464,8 @@ def follow_thread_hierarchy(
464
464
  "depth": 1
465
465
  },
466
466
  "error_nodes": ["worker-2.api-call"],
467
- "detection_method": "ExplicitParentId" | "NamingPattern" | "TemporalInference" | "Mixed"
467
+ "detection_method": "ExplicitParentId" | "NamingPattern" | "TemporalInference" | "Mixed",
468
+ "detection_methods": ["ExplicitParentId", "NamingPattern"]
468
469
  }
469
470
 
470
471
  Example:
@@ -510,6 +511,16 @@ def follow_thread_hierarchy(
510
511
  return json.loads(result_json)
511
512
 
512
513
 
514
+ def _format_detection_method(hierarchy: Dict[str, Any]) -> str:
515
+ method = hierarchy.get("detection_method", "Unknown")
516
+ methods = hierarchy.get("detection_methods") or []
517
+ method_str = str(method)
518
+ method_list = [str(m) for m in methods if m]
519
+ if method_list and (method_str == "Mixed" or len(method_list) > 1):
520
+ return f"{method_str} ({', '.join(method_list)})"
521
+ return method_str
522
+
523
+
513
524
  def get_hierarchy_summary(hierarchy: Dict[str, Any]) -> str:
514
525
  """
515
526
  Generate a human-readable summary of a thread hierarchy.
@@ -531,12 +542,12 @@ def get_hierarchy_summary(hierarchy: Dict[str, Any]) -> str:
531
542
  lines.append("=== Thread Hierarchy Summary ===")
532
543
  lines.append(f"Total nodes: {hierarchy.get('total_nodes', 0)}")
533
544
  lines.append(f"Max depth: {hierarchy.get('max_depth', 0)}")
534
- lines.append(f"Detection method: {hierarchy.get('detection_method', 'Unknown')}")
545
+ lines.append(f"Detection method: {_format_detection_method(hierarchy)}")
535
546
 
536
547
  # Duration
537
548
  total_duration = hierarchy.get("total_duration_ms")
538
549
  if total_duration:
539
- lines.append(f"Total duration: {total_duration}ms ({total_duration/1000:.2f}s)")
550
+ lines.append(f"Total duration: {total_duration}ms ({total_duration / 1000:.2f}s)")
540
551
 
541
552
  # Concurrent operations
542
553
  concurrent = hierarchy.get("concurrent_count", 0)
@@ -3167,7 +3178,7 @@ class InvestigationSession:
3167
3178
 
3168
3179
  for i, entry in enumerate(self.history):
3169
3180
  marker = "→" if i == self.current_index else " "
3170
- lines.append(f" {marker} {i+1}. {entry['description']}")
3181
+ lines.append(f" {marker} {i + 1}. {entry['description']}")
3171
3182
  if entry.get("result_summary"):
3172
3183
  for key, value in entry["result_summary"].items():
3173
3184
  lines.append(f" {key}: {value}")
@@ -3244,7 +3255,7 @@ class InvestigationSession:
3244
3255
  desc = entry["description"]
3245
3256
  operation = entry["operation"]
3246
3257
 
3247
- lines.append(f"### Step {i+1}: {desc}")
3258
+ lines.append(f"### Step {i + 1}: {desc}")
3248
3259
  lines.append("")
3249
3260
  lines.append(f"- **Time:** {timestamp}")
3250
3261
  lines.append(f"- **Operation:** `{operation}`")
@@ -3294,7 +3305,7 @@ class InvestigationSession:
3294
3305
 
3295
3306
  for i, entry in enumerate(self.history):
3296
3307
  timestamp = entry.get("timestamp", "Unknown")
3297
- lines.append(f"{i+1}. [{timestamp}] {entry['description']}")
3308
+ lines.append(f"{i + 1}. [{timestamp}] {entry['description']}")
3298
3309
 
3299
3310
  if entry.get("result_summary"):
3300
3311
  for key, value in entry["result_summary"].items():
@@ -3685,7 +3696,9 @@ def analyze_with_insights(
3685
3696
  "suggestion": "Investigate most common errors first",
3686
3697
  }
3687
3698
  )
3688
- next_steps.append("Run: find_patterns(files, min_occurrences=3)")
3699
+ next_steps.append(
3700
+ 'Run: logler llm sql "SELECT message, COUNT(*) FROM logs GROUP BY message ORDER BY COUNT(*) DESC" to find patterns'
3701
+ )
3689
3702
 
3690
3703
  # Insight 2: Pattern detection
3691
3704
  if auto_investigate and error_count > 0:
@@ -3743,13 +3756,17 @@ def analyze_with_insights(
3743
3756
  "suggestion": "Compare successful vs failed requests",
3744
3757
  }
3745
3758
  )
3746
- next_steps.append("Use: compare_threads() to find differences")
3759
+ next_steps.append(
3760
+ "Run: logler llm compare <failed_id> <success_id> to find differences"
3761
+ )
3747
3762
 
3748
3763
  # Generate suggestions based on insights
3749
3764
  if not suggestions:
3750
3765
  if error_count > 0:
3751
3766
  suggestions.append("Start by examining the first error - it may be the root cause")
3752
- suggestions.append("Use follow_thread() to see full request flow")
3767
+ suggestions.append(
3768
+ "Run: logler llm correlate <correlation_id> to see full request flow"
3769
+ )
3753
3770
  else:
3754
3771
  suggestions.append("No errors found - logs look healthy")
3755
3772
 
logler/llm_cli.py CHANGED
@@ -498,8 +498,8 @@ def sample(files: tuple, strategy: str, size: int, pretty: bool):
498
498
  if "level_distribution" in result:
499
499
  output["sample"]["coverage"] = {"levels": result["level_distribution"]}
500
500
 
501
- # Transform entries
502
- for entry in result.get("entries", []):
501
+ # Transform entries (key is 'samples' from Rust, not 'entries')
502
+ for entry in result.get("samples", []) or result.get("entries", []):
503
503
  out_entry = {
504
504
  "line_number": entry.get("line_number"),
505
505
  "timestamp": entry.get("timestamp"),
@@ -848,7 +848,7 @@ def verify_pattern(
848
848
  match_info["groups"] = list(match.groups())
849
849
  for j, grp in enumerate(match.groups()):
850
850
  if grp:
851
- group_values[f"group_{j+1}"][grp] += 1
851
+ group_values[f"group_{j + 1}"][grp] += 1
852
852
 
853
853
  matches.append(match_info)
854
854
 
@@ -1915,3 +1915,301 @@ def export_trace(identifier: str, files: tuple, export_format: str, pretty: bool
1915
1915
 
1916
1916
  except Exception as e:
1917
1917
  _error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
1918
+
1919
+
1920
+ # =============================================================================
1921
+ # Compare Command - Compare two requests side by side
1922
+ # =============================================================================
1923
+
1924
+
1925
+ @llm.command()
1926
+ @click.argument("id1")
1927
+ @click.argument("id2")
1928
+ @click.option("--files", "-f", multiple=True, help="Files to search (supports globs)")
1929
+ @click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
1930
+ def compare(id1: str, id2: str, files: tuple, pretty: bool):
1931
+ """
1932
+ Compare two requests/traces side by side.
1933
+
1934
+ Shows differences between a failed and successful request,
1935
+ helping identify what went wrong.
1936
+
1937
+ Example:
1938
+ logler llm compare req-001 req-003 --files "*.log"
1939
+ """
1940
+ from . import investigate
1941
+
1942
+ try:
1943
+ file_list = _expand_globs(list(files)) if files else _expand_globs(["*.log"])
1944
+ if not file_list:
1945
+ _error_json(f"No files found matching: {files or ['*.log']}")
1946
+
1947
+ # Get timelines for both requests
1948
+ result1 = investigate.follow_thread(file_list, correlation_id=id1)
1949
+ result2 = investigate.follow_thread(file_list, correlation_id=id2)
1950
+
1951
+ entries1 = result1.get("entries", [])
1952
+ entries2 = result2.get("entries", [])
1953
+
1954
+ # Analyze each request
1955
+ def analyze_request(entries: List[Dict[str, Any]], req_id: str) -> Dict[str, Any]:
1956
+ if not entries:
1957
+ return {"id": req_id, "found": False}
1958
+
1959
+ levels = defaultdict(int)
1960
+ messages = []
1961
+ timestamps = []
1962
+ errors = []
1963
+
1964
+ for e in entries:
1965
+ level = e.get("level", "UNKNOWN")
1966
+ levels[level] += 1
1967
+ messages.append(e.get("message", ""))
1968
+ if e.get("timestamp"):
1969
+ timestamps.append(e["timestamp"])
1970
+ if level in ["ERROR", "FATAL", "CRITICAL"]:
1971
+ errors.append(
1972
+ {
1973
+ "message": e.get("message"),
1974
+ "timestamp": e.get("timestamp"),
1975
+ "line_number": e.get("line_number"),
1976
+ }
1977
+ )
1978
+
1979
+ # Calculate duration
1980
+ duration_ms = None
1981
+ if len(timestamps) >= 2:
1982
+ try:
1983
+ start = datetime.fromisoformat(timestamps[0].replace("Z", "+00:00"))
1984
+ end = datetime.fromisoformat(timestamps[-1].replace("Z", "+00:00"))
1985
+ duration_ms = int((end - start).total_seconds() * 1000)
1986
+ except (ValueError, TypeError):
1987
+ pass
1988
+
1989
+ return {
1990
+ "id": req_id,
1991
+ "found": True,
1992
+ "entry_count": len(entries),
1993
+ "duration_ms": duration_ms,
1994
+ "outcome": "error" if errors else "success",
1995
+ "levels": dict(levels),
1996
+ "errors": errors,
1997
+ "steps": [e.get("message", "")[:80] for e in entries],
1998
+ }
1999
+
2000
+ analysis1 = analyze_request(entries1, id1)
2001
+ analysis2 = analyze_request(entries2, id2)
2002
+
2003
+ # Find differences
2004
+ differences = []
2005
+
2006
+ if analysis1["found"] and analysis2["found"]:
2007
+ # Duration difference
2008
+ if analysis1.get("duration_ms") and analysis2.get("duration_ms"):
2009
+ diff_ms = analysis1["duration_ms"] - analysis2["duration_ms"]
2010
+ if abs(diff_ms) > 100: # Significant difference
2011
+ differences.append(
2012
+ {
2013
+ "type": "duration",
2014
+ "description": f"{id1} took {diff_ms:+d}ms compared to {id2}",
2015
+ "value1": analysis1["duration_ms"],
2016
+ "value2": analysis2["duration_ms"],
2017
+ }
2018
+ )
2019
+
2020
+ # Entry count difference
2021
+ if analysis1["entry_count"] != analysis2["entry_count"]:
2022
+ differences.append(
2023
+ {
2024
+ "type": "entry_count",
2025
+ "description": f"{id1} has {analysis1['entry_count']} entries, {id2} has {analysis2['entry_count']}",
2026
+ "value1": analysis1["entry_count"],
2027
+ "value2": analysis2["entry_count"],
2028
+ }
2029
+ )
2030
+
2031
+ # Outcome difference
2032
+ if analysis1["outcome"] != analysis2["outcome"]:
2033
+ differences.append(
2034
+ {
2035
+ "type": "outcome",
2036
+ "description": f"{id1} {analysis1['outcome']}, {id2} {analysis2['outcome']}",
2037
+ "value1": analysis1["outcome"],
2038
+ "value2": analysis2["outcome"],
2039
+ }
2040
+ )
2041
+
2042
+ # Find where they diverge
2043
+ steps1 = analysis1.get("steps", [])
2044
+ steps2 = analysis2.get("steps", [])
2045
+ divergence_point = None
2046
+ for i, (s1, s2) in enumerate(zip(steps1, steps2)):
2047
+ if s1 != s2:
2048
+ divergence_point = {
2049
+ "step": i + 1,
2050
+ "request1": s1,
2051
+ "request2": s2,
2052
+ }
2053
+ break
2054
+
2055
+ if divergence_point:
2056
+ differences.append(
2057
+ {
2058
+ "type": "divergence",
2059
+ "description": f"Requests diverge at step {divergence_point['step']}",
2060
+ "detail": divergence_point,
2061
+ }
2062
+ )
2063
+
2064
+ output = {
2065
+ "comparison": {
2066
+ "request1": analysis1,
2067
+ "request2": analysis2,
2068
+ },
2069
+ "differences": differences,
2070
+ "summary": f"{id1}: {analysis1.get('outcome', 'not found')}, {id2}: {analysis2.get('outcome', 'not found')}",
2071
+ }
2072
+
2073
+ # Add recommendation if one failed and one succeeded
2074
+ if analysis1.get("outcome") == "error" and analysis2.get("outcome") == "success":
2075
+ if analysis1.get("errors"):
2076
+ output["recommendation"] = (
2077
+ f"Investigate error in {id1}: {analysis1['errors'][0].get('message', 'Unknown error')}"
2078
+ )
2079
+ elif analysis2.get("outcome") == "error" and analysis1.get("outcome") == "success":
2080
+ if analysis2.get("errors"):
2081
+ output["recommendation"] = (
2082
+ f"Investigate error in {id2}: {analysis2['errors'][0].get('message', 'Unknown error')}"
2083
+ )
2084
+
2085
+ _output_json(output, pretty)
2086
+ sys.exit(EXIT_SUCCESS)
2087
+
2088
+ except Exception as e:
2089
+ _error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
2090
+
2091
+
2092
+ # =============================================================================
2093
+ # Summarize Command - Quick text summary for LLMs
2094
+ # =============================================================================
2095
+
2096
+
2097
+ @llm.command()
2098
+ @click.argument("files", nargs=-1, required=True)
2099
+ @click.option(
2100
+ "--focus",
2101
+ type=click.Choice(["errors", "all", "warnings"]),
2102
+ default="errors",
2103
+ help="What to focus on",
2104
+ )
2105
+ @click.option("--pretty", is_flag=True, help="Pretty-print JSON output")
2106
+ def summarize(files: tuple, focus: str, pretty: bool):
2107
+ """
2108
+ Generate a concise summary of log contents.
2109
+
2110
+ Returns structured data with a human-readable summary,
2111
+ perfect for LLM context.
2112
+
2113
+ Example:
2114
+ logler llm summarize app.log --focus errors
2115
+ """
2116
+ from .parser import LogParser
2117
+
2118
+ try:
2119
+ file_list = _expand_globs(list(files))
2120
+ if not file_list:
2121
+ _error_json(f"No files found matching: {files}")
2122
+
2123
+ parser = LogParser()
2124
+
2125
+ # Collect stats
2126
+ total = 0
2127
+ by_level = defaultdict(int)
2128
+ errors = []
2129
+ warnings = []
2130
+ unique_errors = defaultdict(int)
2131
+ time_range = {"start": None, "end": None}
2132
+ correlation_ids = set()
2133
+
2134
+ for file_path in file_list:
2135
+ try:
2136
+ with open(file_path, "r", errors="replace") as f:
2137
+ for i, line in enumerate(f):
2138
+ line = line.rstrip()
2139
+ if not line:
2140
+ continue
2141
+
2142
+ entry = parser.parse_line(i + 1, line)
2143
+ total += 1
2144
+
2145
+ level = str(entry.level).upper() if entry.level else "UNKNOWN"
2146
+ by_level[level] += 1
2147
+
2148
+ if entry.timestamp:
2149
+ ts_str = str(entry.timestamp)
2150
+ if not time_range["start"] or ts_str < time_range["start"]:
2151
+ time_range["start"] = ts_str
2152
+ if not time_range["end"] or ts_str > time_range["end"]:
2153
+ time_range["end"] = ts_str
2154
+
2155
+ if entry.correlation_id:
2156
+ correlation_ids.add(entry.correlation_id)
2157
+
2158
+ if level == "ERROR":
2159
+ msg = entry.message or line[:100]
2160
+ unique_errors[msg] += 1
2161
+ if len(errors) < 10:
2162
+ errors.append(
2163
+ {
2164
+ "line": i + 1,
2165
+ "message": msg,
2166
+ "correlation_id": entry.correlation_id,
2167
+ }
2168
+ )
2169
+ elif level in ["WARN", "WARNING"]:
2170
+ if len(warnings) < 5:
2171
+ warnings.append(
2172
+ {
2173
+ "line": i + 1,
2174
+ "message": entry.message or line[:100],
2175
+ }
2176
+ )
2177
+
2178
+ except (FileNotFoundError, PermissionError):
2179
+ pass
2180
+
2181
+ # Build human-readable summary
2182
+ error_count = by_level.get("ERROR", 0)
2183
+ warn_count = by_level.get("WARN", 0) + by_level.get("WARNING", 0)
2184
+
2185
+ if error_count == 0 and warn_count == 0:
2186
+ summary_text = f"Clean: {total} log entries, no errors or warnings"
2187
+ elif error_count == 0:
2188
+ summary_text = f"{total} entries with {warn_count} warnings, no errors"
2189
+ else:
2190
+ error_types = len(unique_errors)
2191
+ summary_text = f"{total} entries, {error_count} errors ({error_types} unique), {warn_count} warnings"
2192
+
2193
+ # Add top error
2194
+ if unique_errors:
2195
+ top_error = max(unique_errors.items(), key=lambda x: x[1])
2196
+ summary_text += f'. Top error: "{top_error[0][:50]}" ({top_error[1]}x)'
2197
+
2198
+ output = {
2199
+ "summary": summary_text,
2200
+ "stats": {
2201
+ "total_entries": total,
2202
+ "by_level": dict(by_level),
2203
+ "unique_correlation_ids": len(correlation_ids),
2204
+ "time_range": time_range if time_range["start"] else None,
2205
+ },
2206
+ "errors": errors if focus in ["errors", "all"] else [],
2207
+ "warnings": warnings if focus in ["warnings", "all"] else [],
2208
+ "unique_error_messages": dict(unique_errors) if unique_errors else {},
2209
+ }
2210
+
2211
+ _output_json(output, pretty)
2212
+ sys.exit(EXIT_SUCCESS if total > 0 else EXIT_NO_RESULTS)
2213
+
2214
+ except Exception as e:
2215
+ _error_json(f"Internal error: {str(e)}", EXIT_INTERNAL_ERROR)
logler/models.py CHANGED
@@ -66,6 +66,7 @@ class DetectionMethod(str, Enum):
66
66
  EXPLICIT_PARENT_ID = "ExplicitParentId"
67
67
  NAMING_PATTERN = "NamingPattern"
68
68
  TEMPORAL_INFERENCE = "TemporalInference"
69
+ CORRELATION_CHAIN = "CorrelationChain"
69
70
  MIXED = "Mixed"
70
71
  UNKNOWN = "Unknown"
71
72
 
@@ -235,6 +236,9 @@ class ThreadHierarchy(BaseModel):
235
236
  bottleneck: Optional[BottleneckInfo] = Field(None, description="Performance bottleneck")
236
237
  error_nodes: List[str] = Field(default_factory=list, description="Node IDs with errors")
237
238
  detection_method: str = Field("Unknown", description="How relationships were detected")
239
+ detection_methods: List[str] = Field(
240
+ default_factory=list, description="Detection methods used (detailed)"
241
+ )
238
242
 
239
243
 
240
244
  # =============================================================================
logler/parser.py CHANGED
@@ -38,6 +38,7 @@ class LogEntry:
38
38
  trace_id: Optional[str] = None
39
39
  span_id: Optional[str] = None
40
40
  service_name: Optional[str] = None
41
+ format: Optional[str] = None
41
42
  fields: Dict[str, Any] = field(default_factory=dict)
42
43
 
43
44
  def __post_init__(self):
@@ -82,6 +83,7 @@ class LogParser:
82
83
  def _parse_json(self, line_number: int, raw: str, data: dict) -> LogEntry:
83
84
  """Parse JSON log entry."""
84
85
  entry = LogEntry(line_number=line_number, raw=raw)
86
+ entry.format = "Json"
85
87
 
86
88
  # Extract timestamp
87
89
  for ts_field in ["timestamp", "time", "ts", "@timestamp", "datetime"]:
@@ -167,6 +169,7 @@ class LogParser:
167
169
  def _parse_plain(self, line_number: int, raw: str) -> LogEntry:
168
170
  """Parse plain text log entry."""
169
171
  entry = LogEntry(line_number=line_number, raw=raw, message=raw)
172
+ entry.format = "PlainText"
170
173
 
171
174
  # Extract timestamp
172
175
  ts_match = self.PATTERNS["timestamp"].search(raw)
logler/tree_formatter.py CHANGED
@@ -63,6 +63,27 @@ def format_tree(
63
63
  )
64
64
 
65
65
 
66
+ def _format_detection_method(hierarchy: Dict[str, Any]) -> str:
67
+ method = hierarchy.get("detection_method", "Unknown")
68
+ methods = hierarchy.get("detection_methods") or []
69
+ method_str = str(method)
70
+ method_list = [str(m) for m in methods if m]
71
+ if method_list and (method_str == "Mixed" or len(method_list) > 1):
72
+ return f"{method_str} ({', '.join(method_list)})"
73
+ return method_str
74
+
75
+
76
+ def _timeline_label(hierarchy: Dict[str, Any]) -> str:
77
+ roots = hierarchy.get("roots", [])
78
+ if not roots:
79
+ return "Hierarchy"
80
+ root = roots[0]
81
+ label = root.get("name") or root.get("operation_name") or root.get("id", "root")
82
+ if len(roots) > 1:
83
+ label = f"{label} (+{len(roots) - 1} more)"
84
+ return label
85
+
86
+
66
87
  def _format_ascii_tree(
67
88
  hierarchy: Dict[str, Any],
68
89
  mode: str,
@@ -80,7 +101,7 @@ def _format_ascii_tree(
80
101
  lines.append("=" * 70)
81
102
  lines.append(f"Total nodes: {hierarchy.get('total_nodes', 0)}")
82
103
  lines.append(f"Max depth: {hierarchy.get('max_depth', 0)}")
83
- lines.append(f"Detection: {hierarchy.get('detection_method', 'Unknown')}")
104
+ lines.append(f"Detection: {_format_detection_method(hierarchy)}")
84
105
 
85
106
  total_duration = hierarchy.get("total_duration_ms")
86
107
  if total_duration and show_duration:
@@ -412,7 +433,7 @@ def _format_duration(ms: Optional[int]) -> str:
412
433
  if ms < 1000:
413
434
  return f"{ms}ms"
414
435
  elif ms < 60000:
415
- return f"{ms/1000:.2f}s"
436
+ return f"{ms / 1000:.2f}s"
416
437
  else:
417
438
  minutes = ms // 60000
418
439
  seconds = (ms % 60000) / 1000
@@ -495,10 +516,10 @@ def format_waterfall(
495
516
 
496
517
  # Header
497
518
  lines.append("┌" + "─" * (effective_width - 2) + "┐")
498
- header = f"Timeline: {hierarchy.get('detection_method', 'Hierarchy')} ({_format_duration(total_duration)})"
519
+ header = f"Timeline: {_timeline_label(hierarchy)} ({_format_duration(total_duration)})"
499
520
  if len(header) > effective_width - 4:
500
521
  header = header[: effective_width - 7] + "..."
501
- lines.append(f"│ {header:<{effective_width-4}} │")
522
+ lines.append(f"│ {header:<{effective_width - 4}} │")
502
523
  lines.append("├" + "─" * (effective_width - 2) + "┤")
503
524
 
504
525
  # Collect all nodes in order
@@ -702,7 +723,7 @@ def format_flamegraph(
702
723
  return "<1ms" # Show marker for 0ms durations
703
724
  if ms < 1000:
704
725
  return f"{ms:.0f}ms"
705
- return f"{ms/1000:.2f}s"
726
+ return f"{ms / 1000:.2f}s"
706
727
 
707
728
  # Build layers by depth
708
729
  max_depth = hierarchy.get("max_depth", 0)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: logler
3
- Version: 1.1.2
3
+ Version: 1.1.3
4
4
  Classifier: Development Status :: 4 - Beta
5
5
  Classifier: Intended Audience :: Developers
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -92,75 +92,116 @@ A modern, feature-rich log viewer that makes debugging a pleasure. View logs in
92
92
  - 🤔 **Explain Feature** - Plain English explanations of cryptic errors with next steps
93
93
  - 💬 **Contextual Suggestions** - AI suggests what to investigate next based on findings
94
94
 
95
+ ### Public API Contract
96
+
97
+ Each code block carries a **Contract ID** (e.g., `[C01]`). The test suite in `tests/test_readme.py` executes these snippets against the documented public APIs. When this section changes, the tests must change with it — CI proves the README.
98
+
99
+ #### [C01] Auto-insights analysis
95
100
  ```python
96
101
  import logler.investigate as investigate
97
102
 
98
- # 🎯 One-line auto investigation with insights
99
103
  result = investigate.analyze_with_insights(files=["app.log"])
100
104
  print(result['insights']) # Automatic pattern detection, error analysis, suggestions
105
+ ```
106
+
107
+ #### [C02] Token-efficient search
108
+ ```python
109
+ import logler.investigate as investigate
101
110
 
102
- # 📉 Token-efficient search (44x smaller output)
103
111
  errors = investigate.search(files=["app.log"], level="ERROR", output_format="summary")
104
112
  # Returns aggregated stats instead of all entries - perfect for limited context windows
113
+ ```
114
+
115
+ #### [C03] Compare threads
116
+ ```python
117
+ import logler.investigate as investigate
105
118
 
106
- # 🔀 Compare successful vs failed requests
107
119
  diff = investigate.compare_threads(
108
120
  files=["app.log"],
109
121
  correlation_a="req-success-123",
110
122
  correlation_b="req-failed-456"
111
123
  )
112
- print(diff['summary']) # "Thread B took 2341ms longer and had 5 errors (cache miss, timeout)"
124
+ print(diff['summary']) # Comparison of two request flows
125
+ ```
126
+
127
+ #### [C04] Cross-service timeline
128
+ ```python
129
+ import logler.investigate as investigate
113
130
 
114
- # 🌐 Cross-service distributed tracing
115
131
  timeline = investigate.cross_service_timeline(
116
132
  files={"api": ["api.log"], "db": ["db.log"], "cache": ["cache.log"]},
117
133
  correlation_id="req-12345"
118
134
  )
119
135
  # See request flow: API → DB → Cache with latency breakdown
136
+ ```
137
+
138
+ #### [C05] Investigation sessions
139
+ ```python
140
+ import logler.investigate as investigate
120
141
 
121
- # 📝 Track investigation with sessions
122
142
  session = investigate.InvestigationSession(files=["app.log"], name="incident_2024")
123
143
  session.search(level="ERROR")
124
144
  session.find_patterns()
125
145
  session.add_note("Database connection pool exhausted")
126
146
  report = session.generate_report(format="markdown") # Auto-generate report
147
+ ```
148
+
149
+ #### [C06] Smart sampling
150
+ ```python
151
+ import logler.investigate as investigate
127
152
 
128
- # 🎯 Smart sampling (representative sample of huge logs)
129
153
  sample = investigate.smart_sample(
130
154
  files=["huge.log"],
131
155
  strategy="errors_focused", # or "diverse", "representative", "chronological"
132
156
  sample_size=50
133
157
  )
158
+ ```
159
+
160
+ #### [C07] Error explanation
161
+ ```python
162
+ import logler.investigate as investigate
134
163
 
135
- # 🤔 Explain cryptic errors in plain English
136
164
  explanation = investigate.explain(error_message="Connection pool exhausted", context="production")
137
165
  print(explanation) # Common causes, next steps, production-specific advice
166
+ ```
167
+
168
+ #### [C08] Thread hierarchy
169
+ ```python
170
+ import logler.investigate as investigate
138
171
 
139
- # 🌳 Hierarchical thread visualization (NEW!)
140
172
  hierarchy = investigate.follow_thread_hierarchy(
141
173
  files=["app.log"],
142
174
  root_identifier="req-123",
143
175
  min_confidence=0.8 # Only show high-confidence relationships
144
176
  )
145
-
146
177
  # Automatic bottleneck detection
147
- if hierarchy['bottleneck']:
178
+ if hierarchy.get('bottleneck'):
148
179
  print(f"Bottleneck: {hierarchy['bottleneck']['node_id']} took {hierarchy['bottleneck']['duration_ms']}ms")
180
+ ```
181
+
182
+ #### [C09] Hierarchy summary
183
+ ```python
184
+ import logler.investigate as investigate
149
185
 
150
- # Get summary
186
+ # Using hierarchy from [C08]
151
187
  summary = investigate.get_hierarchy_summary(hierarchy)
152
188
  print(summary) # Shows tree structure, errors, bottlenecks
189
+ ```
153
190
 
154
- # Visualize in CLI
191
+ #### [C10] Tree visualization
192
+ ```python
155
193
  from logler.tree_formatter import print_tree, print_waterfall
194
+
195
+ # Using hierarchy from [C08]
156
196
  print_tree(hierarchy, mode="detailed", show_duration=True)
157
- print_waterfall(hierarchy, width=100) # Waterfall timeline showing parallel operations
197
+ print_waterfall(hierarchy, width=100) # Waterfall timeline
158
198
  ```
159
199
 
160
200
  **📚 Complete LLM documentation:**
161
- - [English Guide](docs/LLM_README.md) - Complete API and examples
201
+ - [LLM CLI Reference](docs/LLM_CLI_REFERENCE.md) - All 16 CLI commands for AI agents
202
+ - [Python API Guide](docs/LLM_README.md) - Library API and examples
203
+ - [API Reference](docs/LLM_INVESTIGATION_API.md) - All investigation functions
162
204
  - [日本語ガイド](README.ja.md) - 完全なドキュメント
163
- - [API Reference](docs/LLM_INVESTIGATION_API.md) - All investigation tools
164
205
  - [Examples](examples/) - Production incident investigations
165
206
 
166
207
  ## 🚀 Quick Start
@@ -200,16 +241,48 @@ logler investigate app.log --errors # Analyze errors
200
241
  logler investigate app.log --patterns # Find repeated patterns
201
242
  logler investigate app.log --thread worker-1 # Follow specific thread
202
243
  logler investigate app.log --correlation req-123 # Follow correlation ID
244
+ logler investigate app.log --trace trace-abc123 # Follow distributed trace
203
245
  logler investigate app.log --output summary # Token-efficient output
204
246
 
205
247
  # 🌳 NEW: Hierarchical Thread Visualization
206
248
  logler investigate app.log --correlation req-123 --hierarchy # Show thread hierarchy tree
207
- logler investigate app.log --correlation trace-abc123 --hierarchy --waterfall # Show waterfall timeline
249
+ logler investigate app.log --trace trace-abc123 --hierarchy --waterfall # Show waterfall timeline
208
250
  logler investigate app.log --correlation req-123 --hierarchy --flamegraph # Show flamegraph view
209
251
  logler investigate app.log --hierarchy --show-error-flow # Analyze error propagation
210
252
  logler investigate app.log --thread worker-1 --hierarchy --max-depth 3 # Limit hierarchy depth
211
253
  ```
212
254
 
255
+ **LLM-first CLI (JSON output by default):**
256
+
257
+ Designed for AI agents - 16 commands with structured JSON output, no truncation.
258
+
259
+ ```bash
260
+ # Assessment & Overview
261
+ logler llm triage app.log --last 1h # Quick severity assessment
262
+ logler llm summarize app.log # Concise summary with stats
263
+ logler llm schema app.log # Infer log structure
264
+
265
+ # Search & Analysis
266
+ logler llm search app.log --level ERROR # Find entries (full results)
267
+ logler llm sql "SELECT level, COUNT(*) FROM logs GROUP BY level" -f app.log
268
+
269
+ # Request Tracing
270
+ logler llm correlate req-123 --files "*.log" # Follow correlation ID
271
+ logler llm hierarchy trace-xyz --files "*.log" # Build hierarchy tree
272
+ logler llm bottleneck trace-xyz --files "*.log" # Find slow operations
273
+
274
+ # Comparison
275
+ logler llm compare req-fail req-success --files "*.log" # Compare requests
276
+ logler llm diff app.log --baseline 1h # Before/after analysis
277
+
278
+ # Utilities
279
+ logler llm sample app.log --strategy errors_focused --size 50
280
+ logler llm context app.log 1523 --before 10 --after 10
281
+ logler llm export trace-xyz --format jaeger
282
+ ```
283
+
284
+ See **[LLM CLI Reference](docs/LLM_CLI_REFERENCE.md)** for complete documentation of all 16 commands.
285
+
213
286
  ### Visualization Modes
214
287
 
215
288
  **Tree View** - Shows parent-child relationships:
@@ -327,6 +400,7 @@ logler investigate app.log --patterns --min-occurrences 5
327
400
  # Follow a specific thread or request
328
401
  logler investigate app.log --thread worker-1
329
402
  logler investigate app.log --correlation req-abc123
403
+ logler investigate app.log --trace trace-xyz789
330
404
 
331
405
  # Token-efficient output for LLMs
332
406
  logler investigate app.log --auto-insights --output summary
@@ -437,6 +511,9 @@ Track requests across services:
437
511
  # Follow a specific correlation ID
438
512
  logler investigate app.log --correlation req-12345
439
513
 
514
+ # Follow a distributed trace ID
515
+ logler investigate app.log --trace trace-xyz789
516
+
440
517
  # View across multiple service logs
441
518
  logler view app.log service.log --grep "req-12345"
442
519
  ```
@@ -1,23 +1,23 @@
1
1
  logler\__init__.py,sha256=2BjL1FRLAqnHMa_3UMhmf6KQLnxkTbeUgTQQrBeZRuU,2921
2
2
  logler\bootstrap.py,sha256=4jWhVxy9Rq_PB1-3daVvo9_pS6QkimUmjLRYDfeMuA8,1276
3
3
  logler\cache.py,sha256=U5lstFY3FEA11pRHMk5UDlG5hIdLJwKszwVfWTprH4c,2399
4
- logler\cli.py,sha256=GiucDDgKBzA6qxdo67Eud2-9JyafEdJ7e1hBGtKb3So,21349
4
+ logler\cli.py,sha256=_Vp-2YN1IDocCi76RP2vDVijaKAhM8B8prSwsq-_moc,21941
5
5
  logler\helpers.py,sha256=YYcXgPuW3aZs7Ty3y-U6xBQjo_zSivnOht3lwElHkt0,9464
6
- logler\investigate.py,sha256=zUs7lYeMmLT1ZfQwiQzLVhTIXogarrcQkIvjvzWCQyU,141853
7
- logler\llm_cli.py,sha256=L-RIp3vgAweloJ2VkhI609DdRh9d9dk6YoFRqSj-9Cs,68654
6
+ logler\investigate.py,sha256=MlmI6wq87VsiBe27ri7xb2LBbrxbNmGEM8Xvlr4BL7E,142558
7
+ logler\llm_cli.py,sha256=xk1bd4YZqCbC0J312PZBgpN8S-r3fQgRAs4FSwhAlgQ,80635
8
8
  logler\log_reader.py,sha256=d6vNx-2ZLc6XhbLDgH5nh84j6kZzTMElzZg80fL63Vo,8812
9
- logler\models.py,sha256=ET_wzQulZwRl5HxWNyqNrTFUBVwJEzfypv1YrXI4r8o,22258
10
- logler\parser.py,sha256=DmR4phqGVQXlb2vjZK6712jRN5gFVH1W3ABYVaufznQ,6691
9
+ logler\models.py,sha256=UyF5apCtl1oUOFyEEN2_ORRgvd_1zpTbl7jQjEhh2Hw,22431
10
+ logler\parser.py,sha256=sRpnTRClNi2ddPi0W20v4-KK5Alu4ZSkZyzp7dtGkIU,6792
11
11
  logler\safe_regex.py,sha256=7XREjaW74oCA9ocBFg-lAxu90aOM0CtArDnR9e1vOkY,3555
12
12
  logler\sql.py,sha256=nRwqqknSFpYhFGsjo_uVBG2Mg_Ez2jNj0UKxuJPF85Y,4996
13
13
  logler\terminal.py,sha256=kLZpa_VXKknWg2b-yK8-un2MZgeYj6DwNpdVrX2s5O0,8355
14
14
  logler\tracker.py,sha256=hI24eEBOyoGzQdDy9nyZQ04u7LX541UdTjcrzRoHXfw,4794
15
- logler\tree_formatter.py,sha256=57JdyD5BwKbiebz6xa53to6QMYxIJ11jE_SlJa2HPqY,29140
15
+ logler\tree_formatter.py,sha256=t9SiRhLb7_9XZRwr3aU0SnYVuoNTu9YgUzvlRDB0Rl8,29885
16
16
  logler\watcher.py,sha256=yjacGh6ffqf7uoXuiR6HXR6uAP5RRof6gc7TLV-j5J0,1581
17
- logler-1.1.2.dist-info\METADATA,sha256=gwXRnvrzNLgynJUgI4ByFH1MkB93nCGBtWef62yFZDE,20910
18
- logler-1.1.2.dist-info\WHEEL,sha256=X79LywvMB9iCuFHu88xBAFTJDhRqJi6Yh9hhoCI9jao,97
19
- logler-1.1.2.dist-info\entry_points.txt,sha256=KAtycgnrhm85NgD_TCx2QslE9oClYHWKPM9NG598RNs,41
20
- logler-1.1.2.dist-info\licenses\LICENSE,sha256=rvAbCW736CB0UFBLmJ3Jz1vxrtFwCXv4F1OKNcDdY1s,1082
17
+ logler-1.1.3.dist-info\METADATA,sha256=LGvA84NvVM48cBvfK0UAMCXU56pA5At8a0srggcn7hA,23117
18
+ logler-1.1.3.dist-info\WHEEL,sha256=X79LywvMB9iCuFHu88xBAFTJDhRqJi6Yh9hhoCI9jao,97
19
+ logler-1.1.3.dist-info\entry_points.txt,sha256=KAtycgnrhm85NgD_TCx2QslE9oClYHWKPM9NG598RNs,41
20
+ logler-1.1.3.dist-info\licenses\LICENSE,sha256=rvAbCW736CB0UFBLmJ3Jz1vxrtFwCXv4F1OKNcDdY1s,1082
21
21
  logler_rs\__init__.py,sha256=y2ULUqMIhS92vwz6utTdkCn5l_T1Kso3YLGqCIZUxKY,119
22
- logler_rs\logler_rs.cp311-win_amd64.pyd,sha256=euHBNEEhDf-eu0XCmSOk7CYxoukZS4DHodZ6CUVJu94,2801664
23
- logler-1.1.2.dist-info\RECORD,,
22
+ logler_rs\logler_rs.cp311-win_amd64.pyd,sha256=9sMC6nZc52IJzZlCiI4F0YNXB6G0zVQg-t6k2S82y4M,2770944
23
+ logler-1.1.3.dist-info\RECORD,,
Binary file
File without changes