llmess 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llmess/__init__.py +3 -0
- llmess/__main__.py +6 -0
- llmess/cli.py +311 -0
- llmess/pager.py +790 -0
- llmess-0.1.0.dist-info/METADATA +205 -0
- llmess-0.1.0.dist-info/RECORD +9 -0
- llmess-0.1.0.dist-info/WHEEL +4 -0
- llmess-0.1.0.dist-info/entry_points.txt +2 -0
- llmess-0.1.0.dist-info/licenses/LICENSE +21 -0
llmess/__init__.py
ADDED
llmess/__main__.py
ADDED
llmess/cli.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""Command-line interface for llmess."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import os
|
|
5
|
+
import shlex
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from . import __version__
|
|
9
|
+
|
|
10
|
+
# Default system prompt (used unless -s overrides)
|
|
11
|
+
DEFAULT_INSTRUCT_PROMPT = (
|
|
12
|
+
"Continue the following text exactly as if you were autocomplete. "
|
|
13
|
+
"Do not add commentary, greetings, explanations, or markdown formatting. "
|
|
14
|
+
"Just continue the text naturally from where it left off."
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def parse_args(argv=None):
|
|
19
|
+
"""Parse command-line arguments.
|
|
20
|
+
|
|
21
|
+
If argv is None, uses sys.argv[1:] with LLMESS env var prepended.
|
|
22
|
+
LLMESS env var is parsed as shell-style arguments and prepended,
|
|
23
|
+
so CLI args naturally override them.
|
|
24
|
+
"""
|
|
25
|
+
if argv is None:
|
|
26
|
+
argv = sys.argv[1:]
|
|
27
|
+
llmess_env = os.environ.get("LLMESS", "")
|
|
28
|
+
if llmess_env:
|
|
29
|
+
env_args = shlex.split(llmess_env)
|
|
30
|
+
argv = env_args + list(argv)
|
|
31
|
+
|
|
32
|
+
parser = argparse.ArgumentParser(
|
|
33
|
+
prog="llmess",
|
|
34
|
+
description="A less pager that uses LLMs to hallucinate infinite file continuations.",
|
|
35
|
+
epilog="Scroll past EOF to generate more content using your configured LLM.",
|
|
36
|
+
)
|
|
37
|
+
parser.add_argument(
|
|
38
|
+
"file",
|
|
39
|
+
nargs="?",
|
|
40
|
+
default=None,
|
|
41
|
+
help="File to view (use '-' for stdin, or pipe content)",
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"-m", "--model",
|
|
45
|
+
dest="model",
|
|
46
|
+
default=None,
|
|
47
|
+
help="LLM model to use (default: your llm default model)",
|
|
48
|
+
)
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"-s", "--system",
|
|
51
|
+
dest="system",
|
|
52
|
+
default=None,
|
|
53
|
+
help="System prompt (default: instruct prompt for continuation)",
|
|
54
|
+
)
|
|
55
|
+
parser.add_argument(
|
|
56
|
+
"-B", "--base",
|
|
57
|
+
action="store_true",
|
|
58
|
+
dest="base_mode",
|
|
59
|
+
help="Base model mode: no system prompt (overridden by -s)",
|
|
60
|
+
)
|
|
61
|
+
parser.add_argument(
|
|
62
|
+
"-S", "--stealth",
|
|
63
|
+
action="store_true",
|
|
64
|
+
help="Stealth mode: mimic less appearance exactly",
|
|
65
|
+
)
|
|
66
|
+
parser.add_argument(
|
|
67
|
+
"-C", "--context",
|
|
68
|
+
type=int,
|
|
69
|
+
dest="context",
|
|
70
|
+
default=None,
|
|
71
|
+
metavar="CHARS",
|
|
72
|
+
help="Characters of context to send to LLM (default: 2000)",
|
|
73
|
+
)
|
|
74
|
+
parser.add_argument(
|
|
75
|
+
"-T", "--max-tokens",
|
|
76
|
+
type=int,
|
|
77
|
+
dest="max_tokens",
|
|
78
|
+
default=None,
|
|
79
|
+
metavar="N",
|
|
80
|
+
help="Max tokens to generate per LLM call (limits wait time)",
|
|
81
|
+
)
|
|
82
|
+
parser.add_argument(
|
|
83
|
+
"-o", "--option",
|
|
84
|
+
nargs=2,
|
|
85
|
+
action="append",
|
|
86
|
+
dest="options",
|
|
87
|
+
metavar=("KEY", "VALUE"),
|
|
88
|
+
help="Model option to pass to llm (can be repeated)",
|
|
89
|
+
)
|
|
90
|
+
parser.add_argument(
|
|
91
|
+
"-P", "--prefetch",
|
|
92
|
+
type=int,
|
|
93
|
+
nargs="?",
|
|
94
|
+
const=2,
|
|
95
|
+
default=0,
|
|
96
|
+
metavar="SCREENS",
|
|
97
|
+
help="Prefetch N screens ahead in background (default: 2 if flag used, 0 if not)",
|
|
98
|
+
)
|
|
99
|
+
parser.add_argument(
|
|
100
|
+
"--real-lines",
|
|
101
|
+
type=int,
|
|
102
|
+
dest="real_lines",
|
|
103
|
+
default=None,
|
|
104
|
+
metavar="N",
|
|
105
|
+
help="Show first N real lines, then generate continuations",
|
|
106
|
+
)
|
|
107
|
+
parser.add_argument(
|
|
108
|
+
"--real-screen",
|
|
109
|
+
action="store_true",
|
|
110
|
+
dest="real_screen",
|
|
111
|
+
help="Show first screenful of real content, then generate",
|
|
112
|
+
)
|
|
113
|
+
parser.add_argument(
|
|
114
|
+
"-V", "--version",
|
|
115
|
+
action="version",
|
|
116
|
+
version=f"%(prog)s {__version__}",
|
|
117
|
+
)
|
|
118
|
+
parser.add_argument(
|
|
119
|
+
"--install-prank",
|
|
120
|
+
action="store_true",
|
|
121
|
+
dest="install_prank",
|
|
122
|
+
help="Output shell function to wrap 'less' with llmess",
|
|
123
|
+
)
|
|
124
|
+
return parser.parse_args(argv)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def load_content(file_arg):
|
|
128
|
+
"""
|
|
129
|
+
Load content from file or stdin.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
tuple: (lines, source_name) where lines is list of strings
|
|
133
|
+
and source_name is for display in status bar
|
|
134
|
+
"""
|
|
135
|
+
# Case 1: Explicit stdin via '-'
|
|
136
|
+
if file_arg == "-":
|
|
137
|
+
if sys.stdin.isatty():
|
|
138
|
+
print("llmess: no input (stdin is a terminal)", file=sys.stderr)
|
|
139
|
+
sys.exit(1)
|
|
140
|
+
content = sys.stdin.read()
|
|
141
|
+
return content.splitlines(keepends=True), "[stdin]"
|
|
142
|
+
|
|
143
|
+
# Case 2: No file argument - check for piped stdin
|
|
144
|
+
if file_arg is None:
|
|
145
|
+
if sys.stdin.isatty():
|
|
146
|
+
# No file and no piped input - show help
|
|
147
|
+
print("Usage: llmess [OPTIONS] FILE", file=sys.stderr)
|
|
148
|
+
print(" llmess [OPTIONS] -", file=sys.stderr)
|
|
149
|
+
print(" command | llmess [OPTIONS]", file=sys.stderr)
|
|
150
|
+
print("\nTry 'llmess --help' for more information.", file=sys.stderr)
|
|
151
|
+
sys.exit(1)
|
|
152
|
+
# Piped input
|
|
153
|
+
content = sys.stdin.read()
|
|
154
|
+
return content.splitlines(keepends=True), "[stdin]"
|
|
155
|
+
|
|
156
|
+
# Case 3: File argument provided
|
|
157
|
+
try:
|
|
158
|
+
with open(file_arg, encoding="utf-8", errors="replace") as f:
|
|
159
|
+
return f.readlines(), file_arg
|
|
160
|
+
except FileNotFoundError:
|
|
161
|
+
print(f"llmess: {file_arg}: No such file or directory", file=sys.stderr)
|
|
162
|
+
sys.exit(1)
|
|
163
|
+
except IsADirectoryError:
|
|
164
|
+
print(f"llmess: {file_arg}: Is a directory", file=sys.stderr)
|
|
165
|
+
sys.exit(1)
|
|
166
|
+
except PermissionError:
|
|
167
|
+
print(f"llmess: {file_arg}: Permission denied", file=sys.stderr)
|
|
168
|
+
sys.exit(1)
|
|
169
|
+
except OSError as e:
|
|
170
|
+
print(f"llmess: {file_arg}: {e.strerror}", file=sys.stderr)
|
|
171
|
+
sys.exit(1)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def reopen_tty_for_curses():
|
|
175
|
+
"""
|
|
176
|
+
Reopen /dev/tty for stdin so curses can read keyboard input
|
|
177
|
+
after we've consumed piped stdin.
|
|
178
|
+
"""
|
|
179
|
+
if not sys.stdin.isatty():
|
|
180
|
+
try:
|
|
181
|
+
with open("/dev/tty") as tty:
|
|
182
|
+
os.dup2(tty.fileno(), sys.stdin.fileno())
|
|
183
|
+
except OSError as e:
|
|
184
|
+
print(f"llmess: cannot open terminal for input: {e}", file=sys.stderr)
|
|
185
|
+
sys.exit(1)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def get_model(args_model):
|
|
189
|
+
"""
|
|
190
|
+
Determine which model to use.
|
|
191
|
+
|
|
192
|
+
Priority:
|
|
193
|
+
1. --model flag
|
|
194
|
+
2. LLMESS_MODEL environment variable
|
|
195
|
+
3. None (use llm's default)
|
|
196
|
+
"""
|
|
197
|
+
if args_model:
|
|
198
|
+
return args_model
|
|
199
|
+
return os.environ.get("LLMESS_MODEL")
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def get_options(args_options):
|
|
203
|
+
"""
|
|
204
|
+
Determine which options to use.
|
|
205
|
+
|
|
206
|
+
Priority:
|
|
207
|
+
1. LLMESS_OPTIONS environment variable (base options)
|
|
208
|
+
2. --option flags (override/extend env options)
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
list: List of [key, value] pairs, or None if no options
|
|
212
|
+
"""
|
|
213
|
+
options = []
|
|
214
|
+
|
|
215
|
+
# Parse env var first (format: "key=value,key2=value2")
|
|
216
|
+
env_options = os.environ.get("LLMESS_OPTIONS", "")
|
|
217
|
+
if env_options:
|
|
218
|
+
for pair in env_options.split(","):
|
|
219
|
+
pair = pair.strip()
|
|
220
|
+
if "=" in pair:
|
|
221
|
+
key, value = pair.split("=", 1)
|
|
222
|
+
options.append([key.strip(), value.strip()])
|
|
223
|
+
|
|
224
|
+
# CLI options override/extend env options
|
|
225
|
+
if args_options:
|
|
226
|
+
# Build a dict to allow CLI to override env
|
|
227
|
+
options_dict = {k: v for k, v in options}
|
|
228
|
+
for key, value in args_options:
|
|
229
|
+
options_dict[key] = value
|
|
230
|
+
options = [[k, v] for k, v in options_dict.items()]
|
|
231
|
+
|
|
232
|
+
return options if options else None
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
PRANK_SHELL_FUNCTION = '''\
|
|
236
|
+
# llmess prank wrapper for 'less'
|
|
237
|
+
# Add this to your target's ~/.bashrc or ~/.zshrc
|
|
238
|
+
less() {
|
|
239
|
+
if [[ -t 1 ]]; then
|
|
240
|
+
# Interactive terminal: use llmess in stealth mode
|
|
241
|
+
llmess --stealth --real-screen --prefetch "$@"
|
|
242
|
+
else
|
|
243
|
+
# Piped output: use real less
|
|
244
|
+
command less "$@"
|
|
245
|
+
fi
|
|
246
|
+
}
|
|
247
|
+
'''
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def main(argv=None):
|
|
251
|
+
"""Main entry point for llmess."""
|
|
252
|
+
args = parse_args(argv)
|
|
253
|
+
|
|
254
|
+
# Handle --install-prank (just print and exit)
|
|
255
|
+
if args.install_prank:
|
|
256
|
+
print("# Add this to target's shell config (~/.bashrc or ~/.zshrc):")
|
|
257
|
+
print("#")
|
|
258
|
+
print("# To uninstall, remove the function or run: unset -f less")
|
|
259
|
+
print()
|
|
260
|
+
print(PRANK_SHELL_FUNCTION)
|
|
261
|
+
return
|
|
262
|
+
|
|
263
|
+
# Resolve model (flag > env var > llm default)
|
|
264
|
+
model = get_model(args.model)
|
|
265
|
+
|
|
266
|
+
# Resolve options (env var as base, CLI overrides)
|
|
267
|
+
options = get_options(args.options)
|
|
268
|
+
|
|
269
|
+
# Load content before reopening tty (need to read stdin first if piped)
|
|
270
|
+
lines, source_name = load_content(args.file)
|
|
271
|
+
|
|
272
|
+
# Truncate to real-lines if specified (real-screen is handled in pager)
|
|
273
|
+
if args.real_lines is not None and args.real_lines > 0:
|
|
274
|
+
lines = lines[:args.real_lines]
|
|
275
|
+
|
|
276
|
+
# Reopen tty for curses keyboard input if we consumed stdin
|
|
277
|
+
reopen_tty_for_curses()
|
|
278
|
+
|
|
279
|
+
# Import here to avoid curses initialization issues
|
|
280
|
+
import curses
|
|
281
|
+
|
|
282
|
+
from .pager import check_llm_available, run_pager
|
|
283
|
+
|
|
284
|
+
# Resolve system prompt
|
|
285
|
+
# Priority: -s (explicit) > -B (base mode) > default (instruct prompt)
|
|
286
|
+
if args.system is not None:
|
|
287
|
+
system = args.system if args.system else None
|
|
288
|
+
elif args.base_mode:
|
|
289
|
+
system = None
|
|
290
|
+
else:
|
|
291
|
+
system = DEFAULT_INSTRUCT_PROMPT
|
|
292
|
+
|
|
293
|
+
# Check if llm is configured (warn but don't block - they might fix it)
|
|
294
|
+
if not model: # Only check if no explicit model given
|
|
295
|
+
llm_ok, llm_msg = check_llm_available()
|
|
296
|
+
if not llm_ok:
|
|
297
|
+
print(f"Warning: {llm_msg}", file=sys.stderr)
|
|
298
|
+
print("Generation will fail until this is fixed.", file=sys.stderr)
|
|
299
|
+
print("", file=sys.stderr)
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
curses.wrapper(run_pager, lines, source_name, model, system,
|
|
303
|
+
args.stealth, args.prefetch, args.context, args.max_tokens,
|
|
304
|
+
options, args.real_screen)
|
|
305
|
+
except KeyboardInterrupt:
|
|
306
|
+
# Clean exit on Ctrl+C
|
|
307
|
+
pass
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
if __name__ == "__main__":
|
|
311
|
+
main()
|
llmess/pager.py
ADDED
|
@@ -0,0 +1,790 @@
|
|
|
1
|
+
"""Curses-based pager with LLM continuation."""
|
|
2
|
+
|
|
3
|
+
import curses
|
|
4
|
+
import subprocess
|
|
5
|
+
import threading
|
|
6
|
+
|
|
7
|
+
# Default context limit for LLM prompt (characters)
|
|
8
|
+
DEFAULT_CONTEXT_LIMIT = 2000
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def check_llm_available():
|
|
12
|
+
"""
|
|
13
|
+
Check if llm CLI is configured and ready.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
tuple: (ok, message) where ok is bool and message explains any issue
|
|
17
|
+
"""
|
|
18
|
+
try:
|
|
19
|
+
# Check if llm is installed and has a model available
|
|
20
|
+
result = subprocess.run(
|
|
21
|
+
["llm", "models", "default"],
|
|
22
|
+
capture_output=True,
|
|
23
|
+
text=True,
|
|
24
|
+
timeout=5,
|
|
25
|
+
)
|
|
26
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
27
|
+
return True, None
|
|
28
|
+
# No default model set
|
|
29
|
+
return False, "No default model set. Run: llm models default <model>"
|
|
30
|
+
except FileNotFoundError:
|
|
31
|
+
return False, "llm not found. Install with: pip install llm"
|
|
32
|
+
except subprocess.TimeoutExpired:
|
|
33
|
+
return True, None # Assume OK if it's just slow
|
|
34
|
+
except Exception:
|
|
35
|
+
return True, None # Assume OK, will fail later with better context
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class PrefetchState:
|
|
39
|
+
"""Thread-safe state management for prefetch generation."""
|
|
40
|
+
|
|
41
|
+
def __init__(self):
|
|
42
|
+
self.lock = threading.Lock()
|
|
43
|
+
self.generating = False
|
|
44
|
+
self.error = None
|
|
45
|
+
self.thread = None
|
|
46
|
+
|
|
47
|
+
def start_generation(self):
|
|
48
|
+
"""Try to start generation. Returns True if started, False if already running."""
|
|
49
|
+
with self.lock:
|
|
50
|
+
if self.generating:
|
|
51
|
+
return False
|
|
52
|
+
self.generating = True
|
|
53
|
+
self.error = None
|
|
54
|
+
return True
|
|
55
|
+
|
|
56
|
+
def finish_generation(self, error=None):
|
|
57
|
+
"""Mark generation as complete."""
|
|
58
|
+
with self.lock:
|
|
59
|
+
self.generating = False
|
|
60
|
+
self.error = error
|
|
61
|
+
|
|
62
|
+
def is_generating(self):
|
|
63
|
+
"""Check if generation is in progress."""
|
|
64
|
+
with self.lock:
|
|
65
|
+
return self.generating
|
|
66
|
+
|
|
67
|
+
def get_error(self):
|
|
68
|
+
"""Get and clear any error from last generation."""
|
|
69
|
+
with self.lock:
|
|
70
|
+
error = self.error
|
|
71
|
+
self.error = None
|
|
72
|
+
return error
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_continuation(context, model=None, system=None, context_limit=None,
|
|
76
|
+
max_tokens=None, options=None):
|
|
77
|
+
"""
|
|
78
|
+
Call the llm CLI to generate continuation.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
context: The text context to continue from
|
|
82
|
+
model: Optional model name to pass to llm
|
|
83
|
+
system: Optional system prompt to pass to llm
|
|
84
|
+
context_limit: Max characters of context to send (default: DEFAULT_CONTEXT_LIMIT)
|
|
85
|
+
max_tokens: Max tokens to generate (passed to llm -o max_tokens)
|
|
86
|
+
options: List of [key, value] pairs to pass as llm -o options
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
tuple: (lines, error) where lines is list of strings and error is str or None
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
# Limit context to avoid overly long prompts
|
|
93
|
+
limit = context_limit if context_limit is not None else DEFAULT_CONTEXT_LIMIT
|
|
94
|
+
prompt = context[-limit:]
|
|
95
|
+
|
|
96
|
+
# Build command
|
|
97
|
+
cmd = ["llm", "--no-stream"]
|
|
98
|
+
if model:
|
|
99
|
+
cmd.extend(["-m", model])
|
|
100
|
+
if system:
|
|
101
|
+
cmd.extend(["-s", system])
|
|
102
|
+
if max_tokens:
|
|
103
|
+
cmd.extend(["-o", "max_tokens", str(max_tokens)])
|
|
104
|
+
if options:
|
|
105
|
+
for key, value in options:
|
|
106
|
+
cmd.extend(["-o", key, value])
|
|
107
|
+
|
|
108
|
+
process = subprocess.Popen(
|
|
109
|
+
cmd,
|
|
110
|
+
stdin=subprocess.PIPE,
|
|
111
|
+
stdout=subprocess.PIPE,
|
|
112
|
+
stderr=subprocess.PIPE,
|
|
113
|
+
text=True,
|
|
114
|
+
)
|
|
115
|
+
stdout, stderr = process.communicate(input=prompt)
|
|
116
|
+
|
|
117
|
+
if process.returncode != 0:
|
|
118
|
+
# Collapse newlines to spaces for status bar display
|
|
119
|
+
error_msg = " ".join(stderr.strip().split())
|
|
120
|
+
# Provide helpful suggestions for common errors
|
|
121
|
+
if "No key found" in error_msg or "API key" in error_msg:
|
|
122
|
+
return [], "No LLM configured. Run: llm keys set <provider>"
|
|
123
|
+
elif "No model" in error_msg:
|
|
124
|
+
return [], "No default model. Run: llm models default <model>"
|
|
125
|
+
return [], f"llm error: {error_msg}"
|
|
126
|
+
|
|
127
|
+
return stdout.splitlines(keepends=True), None
|
|
128
|
+
|
|
129
|
+
except FileNotFoundError:
|
|
130
|
+
return [], "llm not found - install with: pip install llm"
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return [], f"Error: {e}"
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def find_matches(wrapped, term):
|
|
136
|
+
"""
|
|
137
|
+
Find all occurrences of term in wrapped lines (case-insensitive).
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
wrapped: List of (display_line, original_line_index) tuples
|
|
141
|
+
term: Search term
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
list: List of (wrapped_line_idx, char_pos) tuples
|
|
145
|
+
"""
|
|
146
|
+
matches = []
|
|
147
|
+
term_lower = term.lower()
|
|
148
|
+
for idx, (display_line, _) in enumerate(wrapped):
|
|
149
|
+
pos = display_line.lower().find(term_lower)
|
|
150
|
+
if pos != -1:
|
|
151
|
+
matches.append((idx, pos))
|
|
152
|
+
return matches
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def augment_system_for_search(system, term):
|
|
156
|
+
"""
|
|
157
|
+
Add search term requirement to system prompt for instruct models.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
system: Original system prompt (may be None)
|
|
161
|
+
term: Search term to include
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
str: Augmented system prompt
|
|
165
|
+
"""
|
|
166
|
+
base = system or ""
|
|
167
|
+
addition = f" Include this exact string in your continuation: '{term}'"
|
|
168
|
+
return (base + addition).strip()
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def render_line_with_highlight(stdscr, row, line, width, search_term):
|
|
172
|
+
"""
|
|
173
|
+
Render a line with search term highlighting (like less).
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
stdscr: Curses screen
|
|
177
|
+
row: Row number to render at
|
|
178
|
+
line: The line text to render
|
|
179
|
+
width: Terminal width
|
|
180
|
+
search_term: Term to highlight (case-insensitive), or None
|
|
181
|
+
"""
|
|
182
|
+
line = line[:width] # Truncate to screen width
|
|
183
|
+
|
|
184
|
+
if not search_term:
|
|
185
|
+
stdscr.addstr(row, 0, line)
|
|
186
|
+
return
|
|
187
|
+
|
|
188
|
+
# Find all matches in this line (case-insensitive)
|
|
189
|
+
term_lower = search_term.lower()
|
|
190
|
+
line_lower = line.lower()
|
|
191
|
+
term_len = len(search_term)
|
|
192
|
+
|
|
193
|
+
col = 0
|
|
194
|
+
pos = 0
|
|
195
|
+
while pos < len(line):
|
|
196
|
+
match_pos = line_lower.find(term_lower, pos)
|
|
197
|
+
if match_pos == -1:
|
|
198
|
+
# No more matches - render rest of line normally
|
|
199
|
+
stdscr.addstr(row, col, line[pos:])
|
|
200
|
+
break
|
|
201
|
+
else:
|
|
202
|
+
# Render text before match normally
|
|
203
|
+
if match_pos > pos:
|
|
204
|
+
stdscr.addstr(row, col, line[pos:match_pos])
|
|
205
|
+
col += match_pos - pos
|
|
206
|
+
|
|
207
|
+
# Render match with standout (like less)
|
|
208
|
+
stdscr.attron(curses.A_STANDOUT)
|
|
209
|
+
stdscr.addstr(row, col, line[match_pos:match_pos + term_len])
|
|
210
|
+
stdscr.attroff(curses.A_STANDOUT)
|
|
211
|
+
col += term_len
|
|
212
|
+
pos = match_pos + term_len
|
|
213
|
+
else:
|
|
214
|
+
# Loop completed without break - line ended exactly at/after last match
|
|
215
|
+
pass
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def wrap_lines(lines, width):
|
|
219
|
+
"""
|
|
220
|
+
Wrap lines to fit within terminal width.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
lines: List of content lines
|
|
224
|
+
width: Terminal width
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
list: List of (display_line, original_line_index) tuples
|
|
228
|
+
"""
|
|
229
|
+
if width <= 0:
|
|
230
|
+
width = 80 # Fallback
|
|
231
|
+
|
|
232
|
+
wrapped = []
|
|
233
|
+
for orig_idx, line in enumerate(lines):
|
|
234
|
+
# Strip trailing newline for display
|
|
235
|
+
line = line.rstrip("\n\r")
|
|
236
|
+
|
|
237
|
+
if not line:
|
|
238
|
+
wrapped.append(("", orig_idx))
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
# Wrap long lines
|
|
242
|
+
while len(line) > width:
|
|
243
|
+
wrapped.append((line[:width], orig_idx))
|
|
244
|
+
line = line[width:]
|
|
245
|
+
|
|
246
|
+
wrapped.append((line, orig_idx))
|
|
247
|
+
|
|
248
|
+
return wrapped
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def build_status_bar(source_name, scroll_pos, content_height, wrapped, lines,
|
|
252
|
+
generating, status_error, at_bottom, stealth=False,
|
|
253
|
+
prefetching=False, search_term=None, search_matches=None,
|
|
254
|
+
search_match_idx=0):
|
|
255
|
+
"""
|
|
256
|
+
Build the status bar string.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
source_name: File name or [stdin]
|
|
260
|
+
scroll_pos: Current scroll position in wrapped lines
|
|
261
|
+
content_height: Number of visible content lines
|
|
262
|
+
wrapped: List of wrapped display lines
|
|
263
|
+
lines: Original content lines
|
|
264
|
+
generating: Whether synchronous generation is in progress
|
|
265
|
+
status_error: Error message to display, or None
|
|
266
|
+
at_bottom: Whether scrolled to bottom
|
|
267
|
+
stealth: Whether to use less-style status bar
|
|
268
|
+
prefetching: Whether prefetch generation is running in background
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
str: Status bar text
|
|
272
|
+
"""
|
|
273
|
+
if stealth:
|
|
274
|
+
# Mimic less status bar format
|
|
275
|
+
if not wrapped:
|
|
276
|
+
return f" {source_name} (empty)"
|
|
277
|
+
|
|
278
|
+
# Calculate visible line range
|
|
279
|
+
first_visible = scroll_pos + 1
|
|
280
|
+
last_visible = min(scroll_pos + content_height, len(wrapped))
|
|
281
|
+
|
|
282
|
+
# In stealth mode, don't reveal true line count - show ???
|
|
283
|
+
# or a plausible fake that grows
|
|
284
|
+
fake_total = max(len(lines), scroll_pos + content_height + 100)
|
|
285
|
+
|
|
286
|
+
if generating:
|
|
287
|
+
# In stealth mode, don't show generating - just show normal status
|
|
288
|
+
# The pause is the only tell
|
|
289
|
+
pct = min(99, int(100 * last_visible / fake_total))
|
|
290
|
+
return f" {source_name} lines {first_visible}-{last_visible} {pct}%"
|
|
291
|
+
elif status_error:
|
|
292
|
+
# Still show errors even in stealth (user needs to know)
|
|
293
|
+
return f" {source_name} [{status_error}]"
|
|
294
|
+
elif at_bottom and not prefetching:
|
|
295
|
+
# Mimic less (END) indicator
|
|
296
|
+
return f" {source_name} lines {first_visible}-{last_visible} (END)"
|
|
297
|
+
else:
|
|
298
|
+
pct = min(99, int(100 * last_visible / fake_total))
|
|
299
|
+
return f" {source_name} lines {first_visible}-{last_visible} {pct}%"
|
|
300
|
+
else:
|
|
301
|
+
# Normal llmess status bar
|
|
302
|
+
if wrapped:
|
|
303
|
+
_, orig_line = wrapped[min(scroll_pos, len(wrapped) - 1)]
|
|
304
|
+
line_info = f"Line {orig_line + 1}/{len(lines)}"
|
|
305
|
+
else:
|
|
306
|
+
line_info = "Empty"
|
|
307
|
+
|
|
308
|
+
status = f" llmess - {source_name} - {line_info}"
|
|
309
|
+
|
|
310
|
+
if generating:
|
|
311
|
+
if search_term:
|
|
312
|
+
status += f" [searching '{search_term}'...]"
|
|
313
|
+
else:
|
|
314
|
+
status += " [GENERATING...]"
|
|
315
|
+
elif status_error:
|
|
316
|
+
status += f" [{status_error}]"
|
|
317
|
+
elif search_term:
|
|
318
|
+
if search_matches:
|
|
319
|
+
status += f" ['{search_term}' {search_match_idx + 1}/{len(search_matches)}]"
|
|
320
|
+
else:
|
|
321
|
+
status += f" ['{search_term}' not found]"
|
|
322
|
+
elif prefetching:
|
|
323
|
+
status += " [buffering...]"
|
|
324
|
+
elif at_bottom:
|
|
325
|
+
status += " [END - ↓ generates more]"
|
|
326
|
+
else:
|
|
327
|
+
status += " [q:quit /search]"
|
|
328
|
+
|
|
329
|
+
return status
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def generate_sync(lines, model, system, context_limit=None, max_tokens=None, options=None):
|
|
333
|
+
"""
|
|
334
|
+
Synchronously generate continuation and append to lines.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
lines: Content lines list (modified in place)
|
|
338
|
+
model: LLM model name
|
|
339
|
+
system: System prompt
|
|
340
|
+
context_limit: Max characters of context to send
|
|
341
|
+
max_tokens: Max tokens to generate
|
|
342
|
+
options: List of [key, value] pairs for llm options
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
str or None: Error message if generation failed
|
|
346
|
+
"""
|
|
347
|
+
context = "".join(lines)
|
|
348
|
+
new_lines, error = get_continuation(context, model, system, context_limit, max_tokens, options)
|
|
349
|
+
|
|
350
|
+
if error:
|
|
351
|
+
return error
|
|
352
|
+
elif new_lines:
|
|
353
|
+
# Ensure last line has newline for continuity
|
|
354
|
+
if lines and not lines[-1].endswith("\n"):
|
|
355
|
+
lines[-1] += "\n"
|
|
356
|
+
lines.extend(new_lines)
|
|
357
|
+
return None
|
|
358
|
+
else:
|
|
359
|
+
return "LLM returned empty response"
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def prefetch_worker(lines, model, system, prefetch_state, context_limit=None,
|
|
363
|
+
max_tokens=None, options=None):
|
|
364
|
+
"""
|
|
365
|
+
Background worker for prefetch generation.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
lines: Content lines list (modified in place - thread safe due to GIL)
|
|
369
|
+
model: LLM model name
|
|
370
|
+
system: System prompt
|
|
371
|
+
prefetch_state: PrefetchState instance for coordination
|
|
372
|
+
context_limit: Max characters of context to send
|
|
373
|
+
max_tokens: Max tokens to generate
|
|
374
|
+
options: List of [key, value] pairs for llm options
|
|
375
|
+
"""
|
|
376
|
+
try:
|
|
377
|
+
context = "".join(lines)
|
|
378
|
+
new_lines, error = get_continuation(
|
|
379
|
+
context, model, system, context_limit, max_tokens, options
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
if error:
|
|
383
|
+
prefetch_state.finish_generation(error)
|
|
384
|
+
elif new_lines:
|
|
385
|
+
# Ensure last line has newline for continuity
|
|
386
|
+
if lines and not lines[-1].endswith("\n"):
|
|
387
|
+
lines[-1] += "\n"
|
|
388
|
+
lines.extend(new_lines)
|
|
389
|
+
prefetch_state.finish_generation(None)
|
|
390
|
+
else:
|
|
391
|
+
prefetch_state.finish_generation("LLM returned empty response")
|
|
392
|
+
except Exception as e:
|
|
393
|
+
prefetch_state.finish_generation(f"Prefetch error: {e}")
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def should_prefetch(scroll_pos, content_height, wrapped_len, prefetch_screens):
|
|
397
|
+
"""
|
|
398
|
+
Check if we should start prefetch generation.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
scroll_pos: Current scroll position
|
|
402
|
+
content_height: Visible content lines
|
|
403
|
+
wrapped_len: Total wrapped lines
|
|
404
|
+
prefetch_screens: Number of screens to buffer ahead
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
bool: True if prefetch should be triggered
|
|
408
|
+
"""
|
|
409
|
+
if prefetch_screens <= 0:
|
|
410
|
+
return False
|
|
411
|
+
|
|
412
|
+
visible_end = scroll_pos + content_height
|
|
413
|
+
buffer_remaining = wrapped_len - visible_end
|
|
414
|
+
buffer_target = content_height * prefetch_screens
|
|
415
|
+
|
|
416
|
+
# Start prefetch when less than target buffer remains
|
|
417
|
+
return buffer_remaining < buffer_target
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def start_prefetch(lines, model, system, prefetch_state, context_limit=None,
|
|
421
|
+
max_tokens=None, options=None):
|
|
422
|
+
"""
|
|
423
|
+
Start prefetch generation in background thread.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
lines: Content lines list
|
|
427
|
+
model: LLM model name
|
|
428
|
+
system: System prompt
|
|
429
|
+
prefetch_state: PrefetchState instance
|
|
430
|
+
context_limit: Max characters of context to send
|
|
431
|
+
max_tokens: Max tokens to generate
|
|
432
|
+
options: List of [key, value] pairs for llm options
|
|
433
|
+
|
|
434
|
+
Returns:
|
|
435
|
+
bool: True if prefetch was started, False if already running
|
|
436
|
+
"""
|
|
437
|
+
if not prefetch_state.start_generation():
|
|
438
|
+
return False # Already generating
|
|
439
|
+
|
|
440
|
+
thread = threading.Thread(
|
|
441
|
+
target=prefetch_worker,
|
|
442
|
+
args=(lines, model, system, prefetch_state, context_limit, max_tokens, options),
|
|
443
|
+
daemon=True,
|
|
444
|
+
)
|
|
445
|
+
prefetch_state.thread = thread
|
|
446
|
+
thread.start()
|
|
447
|
+
return True
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def run_pager(stdscr, lines, source_name, model=None, system=None,
|
|
451
|
+
stealth=False, prefetch_screens=0, context_limit=None,
|
|
452
|
+
max_tokens=None, options=None, real_screen=False):
|
|
453
|
+
"""
|
|
454
|
+
Run the interactive pager.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
stdscr: curses window
|
|
458
|
+
lines: List of content lines
|
|
459
|
+
source_name: Display name for status bar
|
|
460
|
+
model: Optional LLM model name
|
|
461
|
+
system: Optional system prompt for LLM
|
|
462
|
+
stealth: Whether to use less-compatible stealth mode
|
|
463
|
+
prefetch_screens: Number of screens to buffer ahead (0 = disabled)
|
|
464
|
+
context_limit: Max characters of context to send to LLM
|
|
465
|
+
max_tokens: Max tokens to generate per LLM call
|
|
466
|
+
options: List of [key, value] pairs for llm options
|
|
467
|
+
real_screen: If True, truncate to first screenful then generate
|
|
468
|
+
"""
|
|
469
|
+
curses.curs_set(0) # Hide cursor
|
|
470
|
+
|
|
471
|
+
# Handle real_screen: truncate to first screenful
|
|
472
|
+
if real_screen:
|
|
473
|
+
height, width = stdscr.getmaxyx()
|
|
474
|
+
content_height = height - 1 # Reserve line for status bar
|
|
475
|
+
# Truncate lines to fit one screen (accounting for wrapping)
|
|
476
|
+
wrapped_count = 0
|
|
477
|
+
truncate_at = 0
|
|
478
|
+
for i, line in enumerate(lines):
|
|
479
|
+
line_display = line.rstrip("\n\r")
|
|
480
|
+
# Count wrapped lines this line will produce
|
|
481
|
+
if not line_display:
|
|
482
|
+
wrapped_count += 1
|
|
483
|
+
else:
|
|
484
|
+
wrapped_count += max(1, (len(line_display) + width - 1) // width)
|
|
485
|
+
if wrapped_count >= content_height:
|
|
486
|
+
truncate_at = i + 1
|
|
487
|
+
break
|
|
488
|
+
truncate_at = i + 1
|
|
489
|
+
lines[:] = lines[:truncate_at] # Truncate in place
|
|
490
|
+
|
|
491
|
+
# Track state
|
|
492
|
+
scroll_pos = 0
|
|
493
|
+
generating = False # Synchronous generation in progress
|
|
494
|
+
status_error = None
|
|
495
|
+
|
|
496
|
+
# Prefetch state (only used if prefetch_screens > 0)
|
|
497
|
+
prefetch_state = PrefetchState() if prefetch_screens > 0 else None
|
|
498
|
+
|
|
499
|
+
# Search state
|
|
500
|
+
search_term = None
|
|
501
|
+
search_matches = []
|
|
502
|
+
search_match_idx = 0
|
|
503
|
+
|
|
504
|
+
# Use non-blocking getch for prefetch mode to allow responsive UI
|
|
505
|
+
if prefetch_screens > 0:
|
|
506
|
+
stdscr.timeout(100) # 100ms timeout for getch
|
|
507
|
+
|
|
508
|
+
while True:
|
|
509
|
+
height, width = stdscr.getmaxyx()
|
|
510
|
+
content_height = height - 1 # Reserve last line for status bar
|
|
511
|
+
|
|
512
|
+
# Wrap lines to current width
|
|
513
|
+
wrapped = wrap_lines(lines, width)
|
|
514
|
+
|
|
515
|
+
# Check for prefetch errors
|
|
516
|
+
if prefetch_state:
|
|
517
|
+
prefetch_error = prefetch_state.get_error()
|
|
518
|
+
if prefetch_error:
|
|
519
|
+
status_error = prefetch_error
|
|
520
|
+
|
|
521
|
+
stdscr.clear()
|
|
522
|
+
|
|
523
|
+
# Calculate scroll bounds
|
|
524
|
+
max_scroll = max(0, len(wrapped) - content_height)
|
|
525
|
+
at_bottom = scroll_pos >= max_scroll
|
|
526
|
+
|
|
527
|
+
# Check if prefetch should be triggered
|
|
528
|
+
prefetching = False
|
|
529
|
+
if prefetch_state:
|
|
530
|
+
prefetching = prefetch_state.is_generating()
|
|
531
|
+
if not prefetching and should_prefetch(
|
|
532
|
+
scroll_pos, content_height, len(wrapped), prefetch_screens
|
|
533
|
+
):
|
|
534
|
+
start_prefetch(
|
|
535
|
+
lines, model, system, prefetch_state,
|
|
536
|
+
context_limit, max_tokens, options
|
|
537
|
+
)
|
|
538
|
+
prefetching = True
|
|
539
|
+
|
|
540
|
+
# Display wrapped lines
|
|
541
|
+
for i in range(content_height):
|
|
542
|
+
display_idx = scroll_pos + i
|
|
543
|
+
if display_idx < len(wrapped):
|
|
544
|
+
display_line, _ = wrapped[display_idx]
|
|
545
|
+
try:
|
|
546
|
+
render_line_with_highlight(stdscr, i, display_line, width, search_term)
|
|
547
|
+
except curses.error:
|
|
548
|
+
pass
|
|
549
|
+
else:
|
|
550
|
+
# Show ~ for lines beyond content (like vim/less)
|
|
551
|
+
stdscr.attron(curses.A_DIM)
|
|
552
|
+
try:
|
|
553
|
+
stdscr.addstr(i, 0, "~")
|
|
554
|
+
except curses.error:
|
|
555
|
+
pass
|
|
556
|
+
finally:
|
|
557
|
+
stdscr.attroff(curses.A_DIM)
|
|
558
|
+
|
|
559
|
+
# Build and display status bar
|
|
560
|
+
status = build_status_bar(
|
|
561
|
+
source_name, scroll_pos, content_height, wrapped, lines,
|
|
562
|
+
generating, status_error, at_bottom, stealth, prefetching,
|
|
563
|
+
search_term, search_matches, search_match_idx
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
stdscr.attron(curses.A_REVERSE)
|
|
567
|
+
try:
|
|
568
|
+
padded_status = status[:width].ljust(width)
|
|
569
|
+
stdscr.addstr(height - 1, 0, padded_status[:width])
|
|
570
|
+
except curses.error:
|
|
571
|
+
pass
|
|
572
|
+
finally:
|
|
573
|
+
stdscr.attroff(curses.A_REVERSE)
|
|
574
|
+
|
|
575
|
+
stdscr.refresh()
|
|
576
|
+
|
|
577
|
+
# Input handling
|
|
578
|
+
key = stdscr.getch()
|
|
579
|
+
|
|
580
|
+
# In prefetch mode, -1 means timeout (no key pressed)
|
|
581
|
+
if key == -1:
|
|
582
|
+
continue # Just redraw and check prefetch status
|
|
583
|
+
|
|
584
|
+
# Clear transient error on any keypress
|
|
585
|
+
if status_error:
|
|
586
|
+
status_error = None
|
|
587
|
+
|
|
588
|
+
if key == ord("q"):
|
|
589
|
+
break
|
|
590
|
+
|
|
591
|
+
elif key == curses.KEY_UP or key == ord("k"):
|
|
592
|
+
if scroll_pos > 0:
|
|
593
|
+
scroll_pos -= 1
|
|
594
|
+
|
|
595
|
+
elif key == curses.KEY_DOWN or key == ord("j"):
|
|
596
|
+
max_scroll = max(0, len(wrapped) - content_height)
|
|
597
|
+
if scroll_pos < max_scroll:
|
|
598
|
+
scroll_pos += 1
|
|
599
|
+
elif scroll_pos >= max_scroll and not generating:
|
|
600
|
+
# At bottom - trigger synchronous generation if no prefetch
|
|
601
|
+
# or if prefetch buffer is exhausted
|
|
602
|
+
if prefetch_state and prefetch_state.is_generating():
|
|
603
|
+
# Prefetch in progress - just wait (non-blocking)
|
|
604
|
+
pass
|
|
605
|
+
elif prefetch_state and len(wrapped) > scroll_pos + content_height:
|
|
606
|
+
# Prefetch added content - can scroll
|
|
607
|
+
scroll_pos += 1
|
|
608
|
+
else:
|
|
609
|
+
# No prefetch or buffer exhausted - sync generate
|
|
610
|
+
generating = True
|
|
611
|
+
if not stealth:
|
|
612
|
+
# Show generating status
|
|
613
|
+
if wrapped:
|
|
614
|
+
_, orig_line = wrapped[min(scroll_pos, len(wrapped) - 1)]
|
|
615
|
+
line_info = f"Line {orig_line + 1}/{len(lines)}"
|
|
616
|
+
else:
|
|
617
|
+
line_info = "Empty"
|
|
618
|
+
gen_status = f" llmess - {source_name} - {line_info} [GENERATING...]"
|
|
619
|
+
stdscr.attron(curses.A_REVERSE)
|
|
620
|
+
try:
|
|
621
|
+
stdscr.addstr(height - 1, 0, gen_status[:width].ljust(width))
|
|
622
|
+
except curses.error:
|
|
623
|
+
pass
|
|
624
|
+
finally:
|
|
625
|
+
stdscr.attroff(curses.A_REVERSE)
|
|
626
|
+
stdscr.refresh()
|
|
627
|
+
|
|
628
|
+
status_error = generate_sync(
|
|
629
|
+
lines, model, system, context_limit, max_tokens, options
|
|
630
|
+
)
|
|
631
|
+
generating = False
|
|
632
|
+
|
|
633
|
+
elif key == curses.KEY_PPAGE or key == ord("b"):
|
|
634
|
+
# Page up
|
|
635
|
+
scroll_pos = max(0, scroll_pos - content_height)
|
|
636
|
+
|
|
637
|
+
elif key == curses.KEY_NPAGE or key == ord("f") or key == ord(" "):
|
|
638
|
+
# Page down
|
|
639
|
+
max_scroll = max(0, len(wrapped) - content_height)
|
|
640
|
+
if scroll_pos < max_scroll:
|
|
641
|
+
scroll_pos = min(max_scroll, scroll_pos + content_height)
|
|
642
|
+
elif not generating:
|
|
643
|
+
# At bottom - same logic as down arrow
|
|
644
|
+
if prefetch_state and prefetch_state.is_generating():
|
|
645
|
+
pass
|
|
646
|
+
elif prefetch_state and len(wrapped) > scroll_pos + content_height:
|
|
647
|
+
scroll_pos = min(max_scroll, scroll_pos + content_height)
|
|
648
|
+
else:
|
|
649
|
+
generating = True
|
|
650
|
+
if not stealth:
|
|
651
|
+
if wrapped:
|
|
652
|
+
_, orig_line = wrapped[min(scroll_pos, len(wrapped) - 1)]
|
|
653
|
+
line_info = f"Line {orig_line + 1}/{len(lines)}"
|
|
654
|
+
else:
|
|
655
|
+
line_info = "Empty"
|
|
656
|
+
gen_status = f" llmess - {source_name} - {line_info} [GENERATING...]"
|
|
657
|
+
stdscr.attron(curses.A_REVERSE)
|
|
658
|
+
try:
|
|
659
|
+
stdscr.addstr(height - 1, 0, gen_status[:width].ljust(width))
|
|
660
|
+
except curses.error:
|
|
661
|
+
pass
|
|
662
|
+
finally:
|
|
663
|
+
stdscr.attroff(curses.A_REVERSE)
|
|
664
|
+
stdscr.refresh()
|
|
665
|
+
|
|
666
|
+
status_error = generate_sync(
|
|
667
|
+
lines, model, system, context_limit, max_tokens, options
|
|
668
|
+
)
|
|
669
|
+
generating = False
|
|
670
|
+
|
|
671
|
+
elif key == ord("g"):
|
|
672
|
+
# Go to top
|
|
673
|
+
scroll_pos = 0
|
|
674
|
+
|
|
675
|
+
elif key == ord("G"):
|
|
676
|
+
# Go to bottom
|
|
677
|
+
scroll_pos = max(0, len(wrapped) - content_height)
|
|
678
|
+
|
|
679
|
+
elif key == curses.KEY_HOME:
|
|
680
|
+
scroll_pos = 0
|
|
681
|
+
|
|
682
|
+
elif key == curses.KEY_END:
|
|
683
|
+
scroll_pos = max(0, len(wrapped) - content_height)
|
|
684
|
+
|
|
685
|
+
elif key == curses.KEY_RESIZE:
|
|
686
|
+
# Terminal resized - just redraw
|
|
687
|
+
pass
|
|
688
|
+
|
|
689
|
+
elif key == ord("/"):
|
|
690
|
+
# Search mode - switch to blocking input
|
|
691
|
+
stdscr.timeout(-1) # Blocking mode for search input
|
|
692
|
+
curses.echo()
|
|
693
|
+
curses.curs_set(1)
|
|
694
|
+
# Clear status line (avoid writing to last cell - causes curses ERR)
|
|
695
|
+
stdscr.addstr(height - 1, 0, "/" + " " * (width - 2))
|
|
696
|
+
stdscr.move(height - 1, 1)
|
|
697
|
+
stdscr.refresh()
|
|
698
|
+
try:
|
|
699
|
+
search_input = stdscr.getstr(height - 1, 1, width - 2).decode('utf-8')
|
|
700
|
+
except curses.error:
|
|
701
|
+
search_input = ""
|
|
702
|
+
curses.noecho()
|
|
703
|
+
curses.curs_set(0)
|
|
704
|
+
stdscr.timeout(100) # Restore non-blocking mode
|
|
705
|
+
|
|
706
|
+
if search_input:
|
|
707
|
+
search_term = search_input
|
|
708
|
+
search_match_idx = 0
|
|
709
|
+
search_matches = find_matches(wrapped, search_term)
|
|
710
|
+
|
|
711
|
+
if search_matches:
|
|
712
|
+
# Found - jump to first match
|
|
713
|
+
scroll_pos = max(0, min(search_matches[0][0], len(wrapped) - content_height))
|
|
714
|
+
elif system:
|
|
715
|
+
# Not found but in instruct mode - trigger search-aware generation
|
|
716
|
+
generating = True
|
|
717
|
+
augmented_system = augment_system_for_search(system, search_term)
|
|
718
|
+
|
|
719
|
+
# Show searching status
|
|
720
|
+
search_status = build_status_bar(
|
|
721
|
+
source_name, scroll_pos, content_height, wrapped, lines,
|
|
722
|
+
True, None, at_bottom, stealth, False,
|
|
723
|
+
search_term, [], 0
|
|
724
|
+
)
|
|
725
|
+
stdscr.attron(curses.A_REVERSE)
|
|
726
|
+
try:
|
|
727
|
+
stdscr.addstr(height - 1, 0, search_status[:width].ljust(width))
|
|
728
|
+
except curses.error:
|
|
729
|
+
pass
|
|
730
|
+
finally:
|
|
731
|
+
stdscr.attroff(curses.A_REVERSE)
|
|
732
|
+
stdscr.refresh()
|
|
733
|
+
|
|
734
|
+
# Generate with augmented prompt
|
|
735
|
+
status_error = generate_sync(lines, model, augmented_system,
|
|
736
|
+
context_limit, max_tokens, options)
|
|
737
|
+
generating = False
|
|
738
|
+
|
|
739
|
+
# Re-wrap and search again
|
|
740
|
+
wrapped = wrap_lines(lines, width)
|
|
741
|
+
search_matches = find_matches(wrapped, search_term)
|
|
742
|
+
if search_matches:
|
|
743
|
+
match_line = search_matches[0][0]
|
|
744
|
+
scroll_pos = max(0, min(match_line, len(wrapped) - content_height))
|
|
745
|
+
# else: base mode, no match - status bar will show "not found"
|
|
746
|
+
|
|
747
|
+
elif key == ord("n"):
|
|
748
|
+
# Next search match
|
|
749
|
+
if search_matches and len(search_matches) > 1:
|
|
750
|
+
search_match_idx = (search_match_idx + 1) % len(search_matches)
|
|
751
|
+
scroll_pos = max(0, min(search_matches[search_match_idx][0],
|
|
752
|
+
len(wrapped) - content_height))
|
|
753
|
+
|
|
754
|
+
elif key == ord("N"):
|
|
755
|
+
# Previous search match
|
|
756
|
+
if search_matches and len(search_matches) > 1:
|
|
757
|
+
search_match_idx = (search_match_idx - 1) % len(search_matches)
|
|
758
|
+
scroll_pos = max(0, min(search_matches[search_match_idx][0],
|
|
759
|
+
len(wrapped) - content_height))
|
|
760
|
+
|
|
761
|
+
elif key == 27: # Escape
|
|
762
|
+
# Clear search
|
|
763
|
+
search_term = None
|
|
764
|
+
search_matches = []
|
|
765
|
+
search_match_idx = 0
|
|
766
|
+
|
|
767
|
+
elif key == ord("s"):
|
|
768
|
+
# Save to file (like less)
|
|
769
|
+
stdscr.timeout(-1) # Blocking mode for filename input
|
|
770
|
+
curses.echo()
|
|
771
|
+
curses.curs_set(1)
|
|
772
|
+
# Prompt for filename (like less)
|
|
773
|
+
stdscr.addstr(height - 1, 0, "log file: " + " " * (width - 11))
|
|
774
|
+
stdscr.move(height - 1, 10)
|
|
775
|
+
stdscr.refresh()
|
|
776
|
+
try:
|
|
777
|
+
filename = stdscr.getstr(height - 1, 10, width - 11).decode('utf-8').strip()
|
|
778
|
+
except curses.error:
|
|
779
|
+
filename = ""
|
|
780
|
+
curses.noecho()
|
|
781
|
+
curses.curs_set(0)
|
|
782
|
+
stdscr.timeout(100) # Restore non-blocking mode
|
|
783
|
+
|
|
784
|
+
if filename:
|
|
785
|
+
try:
|
|
786
|
+
with open(filename, 'w', encoding='utf-8') as f:
|
|
787
|
+
f.writelines(lines)
|
|
788
|
+
status_error = f"Saved to {filename}"
|
|
789
|
+
except OSError as e:
|
|
790
|
+
status_error = f"Save failed: {e.strerror}"
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llmess
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A less pager that uses LLMs to hallucinate infinite file continuations
|
|
5
|
+
Project-URL: Homepage, https://github.com/tgies/llmess
|
|
6
|
+
Project-URL: Repository, https://github.com/tgies/llmess
|
|
7
|
+
Author: tgies
|
|
8
|
+
License-Expression: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Keywords: cli,humor,less,llm,pager
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Environment :: Console
|
|
13
|
+
Classifier: Environment :: Console :: Curses
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Operating System :: POSIX
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Topic :: Games/Entertainment
|
|
24
|
+
Classifier: Topic :: Text Processing
|
|
25
|
+
Requires-Python: >=3.8
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
|
|
28
|
+
# llmess
|
|
29
|
+
|
|
30
|
+
Some files end too early. Some files don't contain what you're looking for.
|
|
31
|
+
|
|
32
|
+
`llmess` is a `less` pager that addresses both issues:
|
|
33
|
+
|
|
34
|
+
- When you scroll past the end of a file, an LLM generates more content.
|
|
35
|
+
- When you search for a term that doesn't exist, the LLM generates content containing it.
|
|
36
|
+
|
|
37
|
+
Works with any model supported by the [`llm`](https://llm.datasette.io/) CLI.
|
|
38
|
+
|
|
39
|
+
## Installation
|
|
40
|
+
|
|
41
|
+
### Prerequisites
|
|
42
|
+
|
|
43
|
+
You need the [`llm`](https://llm.datasette.io/) CLI tool installed and configured:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
# Install llm
|
|
47
|
+
pipx install llm
|
|
48
|
+
|
|
49
|
+
# Install plugins for your preferred provider
|
|
50
|
+
llm install llm-openrouter # or llm-claude, llm-ollama, etc.
|
|
51
|
+
|
|
52
|
+
# Configure API keys and default model
|
|
53
|
+
llm keys set openrouter
|
|
54
|
+
llm models default openrouter/meta-llama/llama-3.1-405b
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Install llmess
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
pipx install llmess
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
Or with pip:
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
pip install llmess
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
> **Note**: llmess calls `llm` via subprocess, so it uses whatever `llm` is on your PATH. Your plugins and configuration are preserved.
|
|
70
|
+
|
|
71
|
+
## Usage
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
llmess myfile.txt # View a file
|
|
75
|
+
llmess -m gpt-4o myfile.txt # Use a specific model
|
|
76
|
+
llmess -B myfile.txt # Base mode: no system prompt
|
|
77
|
+
cat file.txt | llmess # Read from stdin
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Options
|
|
81
|
+
|
|
82
|
+
| Flag | Description |
|
|
83
|
+
|------|-------------|
|
|
84
|
+
| `-m`, `--model` | LLM model to use (default: your `llm` default) |
|
|
85
|
+
| `-s`, `--system` | System prompt (default: instruct prompt) |
|
|
86
|
+
| `-B`, `--base` | Base model mode: no system prompt |
|
|
87
|
+
| `-S`, `--stealth` | Mimic `less` appearance exactly-ish probably |
|
|
88
|
+
| `-T`, `--max-tokens N` | Max tokens to generate per LLM call |
|
|
89
|
+
| `-C`, `--context CHARS` | Characters of context to send to LLM (default: 2000) |
|
|
90
|
+
| `-o`, `--option KEY VALUE` | Model option to pass to llm (can be repeated) |
|
|
91
|
+
| `-P`, `--prefetch [N]` | Prefetch N screens ahead in background (off by default; 2 if flag given without N) |
|
|
92
|
+
| `--real-lines N` | Show first N real lines, then generate continuations |
|
|
93
|
+
| `--real-screen` | Show first screenful of real content, then generate |
|
|
94
|
+
| `--install-prank` | Output shell function to wrap `less` with llmess |
|
|
95
|
+
| `-V`, `--version` | Show version |
|
|
96
|
+
|
|
97
|
+
### Environment Variables
|
|
98
|
+
|
|
99
|
+
| Variable | Description |
|
|
100
|
+
|----------|-------------|
|
|
101
|
+
| `LLMESS` | Default CLI flags (like `LESS` for less) |
|
|
102
|
+
| `LLMESS_MODEL` | Default model |
|
|
103
|
+
| `LLMESS_OPTIONS` | Default model options (comma-separated `key=value` pairs) |
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
# In ~/.bashrc or ~/.zshrc
|
|
107
|
+
export LLMESS="-S -P" # stealth + prefetch by default
|
|
108
|
+
export LLMESS_MODEL="openrouter/meta-llama/llama-3.1-405b"
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
Priority: CLI flags > `LLMESS` > `LLMESS_MODEL`/`LLMESS_OPTIONS` > built-in defaults.
|
|
112
|
+
|
|
113
|
+
## Controls
|
|
114
|
+
|
|
115
|
+
| Key | Action |
|
|
116
|
+
|-----|--------|
|
|
117
|
+
| ↑ / k | Scroll up one line |
|
|
118
|
+
| ↓ / j | Scroll down one line |
|
|
119
|
+
| Page Up / b | Scroll up one page |
|
|
120
|
+
| Page Down / f / Space | Scroll down one page |
|
|
121
|
+
| g | Go to top |
|
|
122
|
+
| G | Go to bottom |
|
|
123
|
+
| / | Search forward |
|
|
124
|
+
| n | Next match |
|
|
125
|
+
| N | Previous match |
|
|
126
|
+
| Esc | Clear search |
|
|
127
|
+
| s | Save to file |
|
|
128
|
+
| q | Quit |
|
|
129
|
+
|
|
130
|
+
When you reach the bottom, scrolling down triggers LLM generation.
|
|
131
|
+
|
|
132
|
+
## Search
|
|
133
|
+
|
|
134
|
+
Press `/` to search. Matches are highlighted.
|
|
135
|
+
|
|
136
|
+
If the term isn't found, llmess generates content containing it:
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
echo "Hello world" | llmess
|
|
140
|
+
# Press /password<Enter>
|
|
141
|
+
# → Generates content containing "password", jumps to match
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
This requires a system prompt (the default). With `-B` (base mode), search behaves normally.
|
|
145
|
+
|
|
146
|
+
## Base Models vs. Instruct Models
|
|
147
|
+
|
|
148
|
+
By default, llmess sends a system prompt instructing the model to continue text without commentary. This works well with instruct models and is ignored by base models.
|
|
149
|
+
|
|
150
|
+
**Base models** continue text naturally. Use `-B` to skip the system prompt:
|
|
151
|
+
|
|
152
|
+
```bash
|
|
153
|
+
llmess -B -m openrouter/meta-llama/llama-3.1-405b myfile.txt
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
Search-triggered generation is not available with base models since they don't follow instructions.
|
|
157
|
+
|
|
158
|
+
**Instruct models** work out of the box. Use `-s "custom prompt"` to override the default:
|
|
159
|
+
|
|
160
|
+
```bash
|
|
161
|
+
llmess -s "Continue this text exactly. No commentary." myfile.txt
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
Any model supported by `llm` works, including local models via Ollama.
|
|
165
|
+
|
|
166
|
+
## Modes
|
|
167
|
+
|
|
168
|
+
### Stealth Mode
|
|
169
|
+
|
|
170
|
+
With `-S`, llmess mimics the appearance of `less`:
|
|
171
|
+
- Status bar shows `filename lines 1-24 50%` format
|
|
172
|
+
- No `[GENERATING...]` indicator
|
|
173
|
+
- Shows `(END)` at bottom
|
|
174
|
+
|
|
175
|
+
### Prefetch Mode
|
|
176
|
+
|
|
177
|
+
With `-P`, llmess generates content ahead of where you're reading:
|
|
178
|
+
|
|
179
|
+
```bash
|
|
180
|
+
llmess -P file.txt # Prefetch 2 screens ahead
|
|
181
|
+
llmess -P 5 file.txt # Prefetch 5 screens ahead
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
### Real-Then-Fake Mode
|
|
185
|
+
|
|
186
|
+
Show real file content first, then generate continuations:
|
|
187
|
+
|
|
188
|
+
```bash
|
|
189
|
+
llmess --real-lines 50 ~/.bashrc # First 50 lines are real
|
|
190
|
+
llmess --real-screen config.yaml # First screenful is real
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
## Prank Installation
|
|
194
|
+
|
|
195
|
+
```bash
|
|
196
|
+
llmess --install-prank
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
Outputs a shell function that wraps `less` with llmess. Add it to the target's shell config.
|
|
200
|
+
|
|
201
|
+
The wrapper uses stealth mode, shows real content first, prefetches in background, and falls back to real `less` for piped output.
|
|
202
|
+
|
|
203
|
+
## License
|
|
204
|
+
|
|
205
|
+
MIT
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
llmess/__init__.py,sha256=GM0DZhEUCPx-JE30myGrmeWItACSj63mo4J4Xnxd5Pg,110
|
|
2
|
+
llmess/__main__.py,sha256=YyzpMIX0IHQc7mS-mvbPu8cCBd98IatcyYIJBtgl2VM,102
|
|
3
|
+
llmess/cli.py,sha256=oMp0qCOIuPzhqMAcmORHFvv3vTqYqnUzIdaMIkbzv-Q,9509
|
|
4
|
+
llmess/pager.py,sha256=Vgw7kojaw2YovZicQvfHwR15Clp-aOb1YlEXmR_LiV4,28309
|
|
5
|
+
llmess-0.1.0.dist-info/METADATA,sha256=-43mHNvvFDq_znt-0qcXEWq5IJz9g9aHN6GDHkJEg6E,6122
|
|
6
|
+
llmess-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
7
|
+
llmess-0.1.0.dist-info/entry_points.txt,sha256=1cjLsC3upf8cBtfWXS3O4HF8NizRHMzJ3vhse8jwNyY,43
|
|
8
|
+
llmess-0.1.0.dist-info/licenses/LICENSE,sha256=C1e3c9puJ-GWoIwD5NmvtJn8BPDZaPOjDEuLw-d8PfM,1062
|
|
9
|
+
llmess-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 tgies
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|