logler 1.1.2__cp311-cp311-win_amd64.whl → 1.2.0__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logler/cli.py CHANGED
@@ -135,13 +135,16 @@ def stats(files: tuple, output_json: bool):
135
135
 
136
136
  @main.command()
137
137
  @click.argument("files", nargs=-1, required=True, type=click.Path(exists=True))
138
- @click.option("--auto-insights", is_flag=True, help="Run automatic insights analysis")
138
+ @click.option("--auto-insights", is_flag=True, hidden=True, help="[DEPRECATED] Removed")
139
139
  @click.option("--errors", is_flag=True, help="Show only errors with analysis")
140
- @click.option("--patterns", is_flag=True, help="Find repeated patterns")
140
+ @click.option("--patterns", is_flag=True, hidden=True, help="[DEPRECATED] Removed")
141
141
  @click.option("--thread", type=str, help="Follow specific thread ID")
142
142
  @click.option("--correlation", type=str, help="Follow specific correlation ID")
143
+ @click.option("--trace", type=str, help="Follow specific trace ID")
143
144
  @click.option(
144
- "--hierarchy", is_flag=True, help="Show thread hierarchy tree (with --thread or --correlation)"
145
+ "--hierarchy",
146
+ is_flag=True,
147
+ help="Show thread hierarchy tree (with --thread, --correlation, or --trace)",
145
148
  )
146
149
  @click.option("--waterfall", is_flag=True, help="Show waterfall timeline (with --hierarchy)")
147
150
  @click.option("--flamegraph", is_flag=True, help="Show flamegraph visualization (with --hierarchy)")
@@ -175,6 +178,7 @@ def investigate(
175
178
  patterns: bool,
176
179
  thread: Optional[str],
177
180
  correlation: Optional[str],
181
+ trace: Optional[str],
178
182
  hierarchy: bool,
179
183
  waterfall: bool,
180
184
  flamegraph: bool,
@@ -187,24 +191,21 @@ def investigate(
187
191
  min_occurrences: int,
188
192
  ):
189
193
  """
190
- Investigate log files with smart analysis and insights.
194
+ Investigate log files with analysis tools.
191
195
 
192
196
  Examples:
193
- logler investigate app.log --auto-insights # Auto-detect issues
194
197
  logler investigate app.log --errors # Analyze errors
195
- logler investigate app.log --patterns # Find repeated patterns
196
198
  logler investigate app.log --thread worker-1 # Follow specific thread
197
199
  logler investigate app.log --correlation req-123 # Follow request
198
- logler investigate app.log --thread req-123 --hierarchy # Show hierarchy tree
199
- logler investigate app.log --thread req-123 --hierarchy --waterfall # Show waterfall timeline
200
- logler investigate app.log --thread req-123 --hierarchy --flamegraph # Show flamegraph
200
+ logler investigate app.log --trace trace-abc123 # Follow distributed trace
201
+ logler investigate app.log --correlation req-123 --hierarchy # Show hierarchy tree
202
+ logler investigate app.log --correlation req-123 --hierarchy --waterfall # Show waterfall timeline
203
+ logler investigate app.log --correlation req-123 --hierarchy --flamegraph # Show flamegraph
201
204
  logler investigate app.log --hierarchy --show-error-flow # Analyze error propagation
202
205
  logler investigate app.log --output summary # Token-efficient output
203
206
  """
204
207
  from .investigate import (
205
- analyze_with_insights,
206
208
  search,
207
- find_patterns,
208
209
  follow_thread,
209
210
  follow_thread_hierarchy,
210
211
  get_hierarchy_summary,
@@ -213,101 +214,44 @@ def investigate(
213
214
  )
214
215
  from rich.console import Console
215
216
  from rich.table import Table
216
- from rich.panel import Panel
217
217
 
218
218
  console = Console()
219
219
  file_list = list(files)
220
+ id_args = {"thread": thread, "correlation": correlation, "trace": trace}
221
+ provided_ids = [name for name, value in id_args.items() if value]
220
222
 
221
223
  try:
222
- # Auto-insights mode (most powerful)
223
- if auto_insights:
224
- console.print("[bold cyan]🎯 Running automatic insights analysis...[/bold cyan]\n")
225
- result = analyze_with_insights(files=file_list, auto_investigate=True)
226
-
227
- if output_json:
228
- console.print_json(data=result)
229
- return
224
+ if len(provided_ids) > 1:
225
+ console.print("[red]❌ Provide only one of --thread, --correlation, or --trace.[/red]")
226
+ sys.exit(2)
230
227
 
231
- # Display overview
232
- overview = result["overview"]
228
+ # Deprecated: Auto-insights mode
229
+ if auto_insights:
233
230
  console.print(
234
- Panel(
235
- f"[bold]Total Logs:[/bold] {overview['total_logs']}\n"
236
- f"[bold]Error Count:[/bold] {overview['error_count']}\n"
237
- f"[bold]Error Rate:[/bold] {overview['error_rate']:.1%}\n"
238
- f"[bold]Log Levels:[/bold] {overview['log_levels']}",
239
- title="📊 Overview",
240
- border_style="cyan",
241
- )
231
+ "[yellow]WARNING: --auto-insights is deprecated and has been removed.[/yellow]"
242
232
  )
233
+ console.print(
234
+ "[dim]Use 'logler llm search --level ERROR' for error analysis instead.[/dim]"
235
+ )
236
+ sys.exit(0)
243
237
 
244
- # Display insights
245
- if result["insights"]:
246
- console.print("\n[bold cyan]💡 Automatic Insights[/bold cyan]\n")
247
- for i, insight in enumerate(result["insights"], 1):
248
- severity_color = {"high": "red", "medium": "yellow", "low": "green"}.get(
249
- insight["severity"], "white"
250
- )
251
-
252
- severity_icon = {"high": "🔴", "medium": "🟡", "low": "🟢"}.get(
253
- insight["severity"], "⚪"
254
- )
255
-
256
- console.print(
257
- f"{severity_icon} [bold {severity_color}]Insight #{i}:[/bold {severity_color}] {insight['type']}"
258
- )
259
- console.print(
260
- f" [dim]Severity:[/dim] [{severity_color}]{insight['severity'].upper()}[/{severity_color}]"
261
- )
262
- console.print(f" [dim]Description:[/dim] {insight['description']}")
263
- console.print(f" [dim]Suggestion:[/dim] {insight['suggestion']}\n")
264
-
265
- # Display suggestions
266
- if result["suggestions"]:
267
- console.print("[bold cyan]📝 Suggestions[/bold cyan]\n")
268
- for i, suggestion in enumerate(result["suggestions"], 1):
269
- console.print(f" {i}. {suggestion}")
270
-
271
- # Display next steps
272
- if result["next_steps"]:
273
- console.print("\n[bold cyan]🚀 Next Steps[/bold cyan]\n")
274
- for i, step in enumerate(result["next_steps"], 1):
275
- console.print(f" {i}. {step}")
276
-
277
- # Pattern detection mode
238
+ # Deprecated: Pattern detection mode
278
239
  elif patterns:
279
240
  console.print(
280
- f"[bold cyan]🔍 Finding repeated patterns (min {min_occurrences} occurrences)...[/bold cyan]\n"
241
+ "[yellow]WARNING: --patterns is deprecated and has been removed.[/yellow]"
281
242
  )
282
- result = find_patterns(files=file_list, min_occurrences=min_occurrences)
283
-
284
- if output_json:
285
- console.print_json(data=result)
286
- return
287
-
288
- pattern_list = result.get("patterns", [])
289
- if pattern_list:
290
- table = Table(title=f"Found {len(pattern_list)} Patterns")
291
- table.add_column("Pattern", style="cyan", no_wrap=False)
292
- table.add_column("Count", justify="right", style="green")
293
- table.add_column("First Seen", style="yellow")
294
- table.add_column("Last Seen", style="yellow")
295
-
296
- for pattern in pattern_list[:20]: # Show top 20
297
- pattern_text = pattern.get("pattern", "")[:80]
298
- count = pattern.get("occurrences", 0)
299
- first = pattern.get("first_seen", "N/A")
300
- last = pattern.get("last_seen", "N/A")
301
- table.add_row(pattern_text, str(count), first, last)
302
-
303
- console.print(table)
304
- else:
305
- console.print("[yellow]No repeated patterns found.[/yellow]")
243
+ console.print(
244
+ "[dim]Pattern detection requires specialized tools like Drain3 or LogMine.[/dim]"
245
+ )
246
+ console.print(
247
+ "[dim]Use 'logler llm search' with SQL grouping for similar results.[/dim]"
248
+ )
249
+ sys.exit(0)
306
250
 
307
251
  # Thread/correlation following mode
308
- elif thread or correlation:
309
- identifier = thread or correlation
310
- id_type = "thread" if thread else "correlation"
252
+ elif thread or correlation or trace:
253
+ identifier = thread or correlation or trace
254
+ id_type = "thread" if thread else "correlation" if correlation else "trace"
311
255
 
312
256
  # Hierarchy mode
313
257
  if hierarchy:
@@ -377,7 +321,7 @@ def investigate(
377
321
  console.print(f"[bold cyan]🧵 Following {id_type}: {identifier}...[/bold cyan]\n")
378
322
 
379
323
  result = follow_thread(
380
- files=file_list, thread_id=thread, correlation_id=correlation
324
+ files=file_list, thread_id=thread, correlation_id=correlation, trace_id=trace
381
325
  )
382
326
 
383
327
  if output_json:
logler/helpers.py CHANGED
@@ -46,21 +46,6 @@ def quick_summary(files: List[str]) -> Dict[str, Any]:
46
46
  }
47
47
 
48
48
 
49
- def find_top_errors(files: List[str], limit: int = 10) -> List[Dict[str, Any]]:
50
- """
51
- Find the most common error patterns.
52
-
53
- Returns a list of error patterns sorted by frequency.
54
-
55
- Example:
56
- errors = find_top_errors(["app.log"], limit=5)
57
- for err in errors:
58
- print(f"{err['occurrences']}x: {err['pattern']}")
59
- """
60
- patterns = investigate.find_patterns(files, min_occurrences=2)
61
- return sorted(patterns["patterns"], key=lambda x: x["occurrences"], reverse=True)[:limit]
62
-
63
-
64
49
  def search_errors(
65
50
  files: List[str], query: Optional[str] = None, limit: int = 100
66
51
  ) -> List[Dict[str, Any]]:
@@ -111,38 +96,6 @@ def trace_request(files: List[str], correlation_id: str) -> Dict[str, Any]:
111
96
  }
112
97
 
113
98
 
114
- def detect_spikes(files: List[str], window_minutes: int = 5) -> List[Dict[str, Any]]:
115
- """
116
- Detect error rate spikes.
117
-
118
- Note: This requires the SQL feature to be enabled.
119
-
120
- Returns list of time windows with abnormally high error rates.
121
-
122
- Example:
123
- spikes = detect_spikes(["app.log"], window_minutes=5)
124
- for spike in spikes:
125
- print(f"Spike at {spike['time']}: {spike['errors']} errors")
126
- """
127
- # This would require SQL queries to implement properly
128
- # For now, use pattern detection as a simpler alternative
129
- patterns = investigate.find_patterns(files, min_occurrences=3)
130
-
131
- spikes = []
132
- for pattern in patterns["patterns"]:
133
- if pattern["occurrences"] >= 5: # Threshold for "spike"
134
- spikes.append(
135
- {
136
- "pattern": pattern["pattern"],
137
- "occurrences": pattern["occurrences"],
138
- "first_seen": pattern["first_seen"],
139
- "last_seen": pattern["last_seen"],
140
- }
141
- )
142
-
143
- return spikes
144
-
145
-
146
99
  def get_error_context(file: str, line_number: int, lines: int = 10) -> Dict[str, Any]:
147
100
  """
148
101
  Get context around an error line.
@@ -190,39 +143,6 @@ def analyze_thread_health(files: List[str]) -> Dict[str, Dict[str, int]]:
190
143
  }
191
144
 
192
145
 
193
- def find_cascading_failures(files: List[str]) -> List[Dict[str, Any]]:
194
- """
195
- Find patterns that suggest cascading failures.
196
-
197
- Looks for:
198
- - Multiple errors in quick succession
199
- - Errors across multiple threads/services
200
- - Increasing error rates over time
201
-
202
- Example:
203
- cascades = find_cascading_failures(["app.log"])
204
- for cascade in cascades:
205
- print(f"Cascade: {cascade['pattern']} across {len(cascade['threads'])} threads")
206
- """
207
- patterns = investigate.find_patterns(files, min_occurrences=3)
208
-
209
- cascades = []
210
- for pattern in patterns["patterns"]:
211
- # Cascading failures typically affect multiple threads
212
- if len(pattern["affected_threads"]) >= 3:
213
- cascades.append(
214
- {
215
- "pattern": pattern["pattern"],
216
- "occurrences": pattern["occurrences"],
217
- "threads": pattern["affected_threads"],
218
- "first_seen": pattern["first_seen"],
219
- "last_seen": pattern["last_seen"],
220
- }
221
- )
222
-
223
- return cascades
224
-
225
-
226
146
  def get_timeline_summary(files: List[str], correlation_id: str) -> str:
227
147
  """
228
148
  Get a human-readable timeline summary for a request.
@@ -275,8 +195,3 @@ def trace(files: List[str], correlation_id: str) -> Dict[str, Any]:
275
195
  def summary(files: List[str]) -> Dict[str, Any]:
276
196
  """Shorthand for quick_summary()"""
277
197
  return quick_summary(files)
278
-
279
-
280
- def patterns(files: List[str], limit: int = 10) -> List[Dict[str, Any]]:
281
- """Shorthand for find_top_errors()"""
282
- return find_top_errors(files, limit)