logler 1.0.7__cp311-cp311-macosx_10_12_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logler/__init__.py ADDED
@@ -0,0 +1,22 @@
1
+ """
2
+ Logler - Beautiful local log viewer with thread tracking and real-time updates.
3
+ """
4
+
5
+ __version__ = "1.0.7"
6
+ __author__ = "Logler Contributors"
7
+
8
+ from .parser import LogParser, LogEntry
9
+ from .tracker import ThreadTracker
10
+ from .log_reader import LogReader
11
+ from .tree_formatter import format_tree, format_waterfall, print_tree, print_waterfall
12
+
13
+ __all__ = [
14
+ "LogParser",
15
+ "LogEntry",
16
+ "ThreadTracker",
17
+ "LogReader",
18
+ "format_tree",
19
+ "format_waterfall",
20
+ "print_tree",
21
+ "print_waterfall",
22
+ ]
logler/bootstrap.py ADDED
@@ -0,0 +1,57 @@
1
+ """
2
+ Helper to ensure the Rust backend is installed.
3
+
4
+ Attempts to import `logler_rs`; if missing, runs `maturin develop` against
5
+ `crates/logler-py/Cargo.toml` with the `sql` feature enabled.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import subprocess
11
+ from pathlib import Path
12
+ from typing import Optional
13
+
14
+
15
+ def ensure_rust_backend(auto_install: bool = True) -> bool:
16
+ """Ensure logler_rs is importable. Optionally auto-installs via maturin."""
17
+ try:
18
+ import logler_rs # noqa: F401
19
+
20
+ return True
21
+ except Exception:
22
+ if not auto_install:
23
+ return False
24
+
25
+ maturin = _which("maturin")
26
+ if not maturin:
27
+ return False
28
+
29
+ repo_root = Path(__file__).resolve().parents[2]
30
+ cmd = [
31
+ maturin,
32
+ "develop",
33
+ "--release",
34
+ "-m",
35
+ str(repo_root / "crates" / "logler-py" / "Cargo.toml"),
36
+ "--features",
37
+ "sql",
38
+ ]
39
+ try:
40
+ subprocess.run(
41
+ cmd, cwd=repo_root, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
42
+ )
43
+ except Exception:
44
+ return False
45
+
46
+ try:
47
+ import logler_rs # noqa: F401
48
+
49
+ return True
50
+ except Exception:
51
+ return False
52
+
53
+
54
+ def _which(cmd: str) -> Optional[str]:
55
+ from shutil import which
56
+
57
+ return which(cmd)
logler/cache.py ADDED
@@ -0,0 +1,75 @@
1
+ """
2
+ Performance optimization: Add file-level index caching to standalone functions.
3
+
4
+ Currently, standalone functions like search(), follow_thread() create a new
5
+ Investigator and re-read files every time. This is inefficient for repeated
6
+ queries on the same files.
7
+
8
+ Solution: Add a module-level LRU cache that reuses Investigator instances.
9
+ """
10
+
11
+ import threading
12
+ from typing import Dict
13
+ import logler_rs
14
+
15
+ # Thread-safe cache for Investigator instances
16
+ _investigator_lock = threading.Lock()
17
+ _investigator_cache: Dict[tuple, logler_rs.PyInvestigator] = {}
18
+ _cache_max_size = 10 # Keep up to 10 file sets in cache
19
+
20
+
21
+ def _get_cached_investigator(files: tuple) -> logler_rs.PyInvestigator:
22
+ """
23
+ Get or create a cached Investigator for the given files.
24
+
25
+ This allows standalone functions to reuse parsed indices when
26
+ called multiple times with the same files.
27
+ """
28
+ with _investigator_lock:
29
+ # Check cache
30
+ if files in _investigator_cache:
31
+ return _investigator_cache[files]
32
+
33
+ # Create new investigator
34
+ inv = logler_rs.PyInvestigator()
35
+ inv.load_files(list(files))
36
+
37
+ # Add to cache (with simple size limit)
38
+ if len(_investigator_cache) >= _cache_max_size:
39
+ # Remove oldest entry (simple FIFO)
40
+ oldest = next(iter(_investigator_cache))
41
+ del _investigator_cache[oldest]
42
+
43
+ _investigator_cache[files] = inv
44
+ return inv
45
+
46
+
47
+ def get_cached_investigator(files) -> logler_rs.PyInvestigator:
48
+ """
49
+ Public accessor that normalizes the incoming file list into a stable cache key.
50
+ """
51
+ key = tuple(sorted(str(f) for f in files))
52
+ return _get_cached_investigator(key)
53
+
54
+
55
+ def clear_cache():
56
+ """Clear the investigator cache (useful for testing or freeing memory)"""
57
+ with _investigator_lock:
58
+ _investigator_cache.clear()
59
+
60
+
61
+ # Example usage showing the difference:
62
+ #
63
+ # SLOW (current):
64
+ # for i in range(100):
65
+ # search(["app.log"], level="ERROR") # Re-reads file 100 times!
66
+ #
67
+ # FAST (with cache):
68
+ # for i in range(100):
69
+ # search(["app.log"], level="ERROR") # Reads once, caches index!
70
+ #
71
+ # FASTEST (explicit Investigator):
72
+ # inv = Investigator()
73
+ # inv.load_files(["app.log"])
74
+ # for i in range(100):
75
+ # inv.search(level="ERROR") # Explicit control, no cache overhead