patchpal 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
patchpal/cli.py ADDED
@@ -0,0 +1,476 @@
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import warnings
5
+ from pathlib import Path
6
+
7
+ from prompt_toolkit import prompt as pt_prompt
8
+ from prompt_toolkit.completion import Completer, Completion, PathCompleter, merge_completers
9
+ from prompt_toolkit.document import Document
10
+ from prompt_toolkit.formatted_text import FormattedText
11
+ from prompt_toolkit.history import InMemoryHistory
12
+ from rich.console import Console
13
+ from rich.markdown import Markdown
14
+
15
+ from patchpal.agent import create_agent
16
+ from patchpal.tools import audit_logger
17
+
18
+
19
+ class SkillCompleter(Completer):
20
+ """Completer for skill names when input starts with /"""
21
+
22
+ def __init__(self):
23
+ self.repo_root = Path(".").resolve()
24
+
25
+ def get_completions(self, document, complete_event):
26
+ text = document.text_before_cursor
27
+
28
+ # Only complete if line starts with /
29
+ if not text.startswith("/"):
30
+ return
31
+
32
+ # Get the text after the /
33
+ word = text[1:]
34
+
35
+ # Import here to avoid circular imports
36
+ from patchpal.skills import discover_skills
37
+
38
+ try:
39
+ # Get all available skills
40
+ skills = discover_skills(repo_root=self.repo_root)
41
+
42
+ # Filter skills that match the current word
43
+ for skill_name in sorted(skills.keys()):
44
+ if skill_name.startswith(word):
45
+ # Calculate how much we need to complete
46
+ yield Completion(
47
+ skill_name,
48
+ start_position=-len(word),
49
+ display=skill_name,
50
+ display_meta=skills[skill_name].description[:60] + "..."
51
+ if len(skills[skill_name].description) > 60
52
+ else skills[skill_name].description,
53
+ )
54
+ except Exception:
55
+ # Silently fail if skills discovery fails
56
+ pass
57
+
58
+
59
+ class SmartPathCompleter(Completer):
60
+ """Path completer that works anywhere in the text, not just at the start."""
61
+
62
+ def __init__(self):
63
+ self.path_completer = PathCompleter(expanduser=True)
64
+
65
+ def get_completions(self, document, complete_event):
66
+ text = document.text_before_cursor
67
+
68
+ # Find the start of the current path-like token
69
+ # Look for common path prefixes: ./ ../ / ~/
70
+ import re
71
+
72
+ # Find all potential path starts
73
+ path_pattern = r"(?:^|[\s])([.~/][\S]*?)$"
74
+ match = re.search(path_pattern, text)
75
+
76
+ if match:
77
+ # Extract the path portion
78
+ path_start = match.group(1)
79
+
80
+ # Create a fake document with just the path for PathCompleter
81
+ fake_doc = Document(path_start, len(path_start))
82
+
83
+ # Get completions from PathCompleter
84
+ for completion in self.path_completer.get_completions(fake_doc, complete_event):
85
+ # Use the PathCompleter's start_position directly
86
+ # It's already calculated correctly relative to the cursor
87
+ yield Completion(
88
+ completion.text,
89
+ start_position=completion.start_position,
90
+ display=completion.display,
91
+ display_meta=completion.display_meta,
92
+ )
93
+
94
+
95
+ def _get_patchpal_dir() -> Path:
96
+ """Get the patchpal directory for this repository.
97
+
98
+ Returns the directory ~/.patchpal/<repo-name>/ where repo-specific
99
+ data like history and logs are stored.
100
+ """
101
+ repo_root = Path(".").resolve()
102
+ home = Path.home()
103
+ patchpal_root = home / ".patchpal"
104
+
105
+ # Use repo name (last part of path) to create unique directory
106
+ repo_name = repo_root.name
107
+ repo_dir = patchpal_root / repo_name
108
+
109
+ # Create directory if it doesn't exist
110
+ repo_dir.mkdir(parents=True, exist_ok=True)
111
+
112
+ return repo_dir
113
+
114
+
115
+ def _save_to_history_file(command: str, history_file: Path, max_entries: int = 1000):
116
+ """Append a command to the persistent history file.
117
+
118
+ This allows users to manually review their command history,
119
+ while keeping InMemoryHistory for session-only terminal scrolling.
120
+
121
+ Keeps only the last max_entries commands to prevent unbounded growth.
122
+ """
123
+ try:
124
+ from datetime import datetime
125
+
126
+ # Read existing entries
127
+ entries = []
128
+ if history_file.exists():
129
+ with open(history_file, "r", encoding="utf-8") as f:
130
+ lines = f.readlines()
131
+ # Each entry is 2 lines (timestamp + command)
132
+ for i in range(0, len(lines), 2):
133
+ if i + 1 < len(lines):
134
+ entries.append((lines[i], lines[i + 1]))
135
+
136
+ # Add new entry
137
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
138
+ entries.append((f"# {timestamp}\n", f"+{command}\n"))
139
+
140
+ # Keep only last N entries
141
+ entries = entries[-max_entries:]
142
+
143
+ # Write back
144
+ with open(history_file, "w", encoding="utf-8") as f:
145
+ for ts, cmd in entries:
146
+ f.write(ts)
147
+ f.write(cmd)
148
+ except Exception:
149
+ # Silently fail if history can't be written
150
+ pass
151
+
152
+
153
+ def main():
154
+ """Main CLI entry point for PatchPal."""
155
+ # Suppress warnings to keep CLI clean (e.g., Pydantic, deprecation warnings from dependencies)
156
+ warnings.simplefilter("ignore")
157
+
158
+ # Parse command-line arguments
159
+ parser = argparse.ArgumentParser(
160
+ description="PatchPal - Claude Code Clone",
161
+ formatter_class=argparse.RawDescriptionHelpFormatter,
162
+ epilog="""
163
+ Examples:
164
+ patchpal # Use default model
165
+ patchpal --model openai/gpt-4o # Use GPT-4o
166
+ patchpal --model anthropic/claude-opus-4 # Use Claude Opus
167
+ patchpal --model ollama_chat/llama3.1 # Use Ollama (local, no API key!)
168
+ PATCHPAL_MODEL=openai/gpt-4o patchpal # Use environment variable
169
+
170
+ Supported models: Any LiteLLM-supported model
171
+ - Anthropic: anthropic/claude-sonnet-4-5 (default), anthropic/claude-opus-4-5, etc.
172
+ - OpenAI: openai/gpt-4o, openai/gpt-3.5-turbo, etc.
173
+ - Ollama (local): ollama_chat/llama3.1, ollama_chat/codellama, ollama_chat/deepseek-coder, etc.
174
+ - Others: See https://docs.litellm.ai/docs/providers
175
+ """,
176
+ )
177
+ parser.add_argument(
178
+ "--model",
179
+ type=str,
180
+ default=None,
181
+ help="LiteLLM model identifier (e.g., openai/gpt-4o, anthropic/claude-opus-4, ollama_chat/llama3.1). "
182
+ "Can also be set via PATCHPAL_MODEL environment variable.",
183
+ )
184
+ args = parser.parse_args()
185
+
186
+ # Determine model to use (priority: CLI arg > env var > default)
187
+ model_id = args.model or os.getenv("PATCHPAL_MODEL") or "anthropic/claude-sonnet-4-5"
188
+
189
+ # Create the agent with the specified model
190
+ # LiteLLM will handle API key validation and provide appropriate error messages
191
+ agent = create_agent(model_id=model_id)
192
+
193
+ # Get max iterations from environment variable or use default
194
+ max_iterations = int(os.getenv("PATCHPAL_MAX_ITERATIONS", "100"))
195
+
196
+ # Create Rich console for markdown rendering
197
+ console = Console()
198
+
199
+ # Create completers for paths and skills
200
+ path_completer = SmartPathCompleter()
201
+ skill_completer = SkillCompleter()
202
+ # Merge completers - skill completer takes precedence for / commands
203
+ completer = merge_completers([skill_completer, path_completer])
204
+
205
+ # Create in-memory history (within session only, no persistence)
206
+ history = InMemoryHistory()
207
+
208
+ # Get history file path for manual logging
209
+ history_file = _get_patchpal_dir() / "history.txt"
210
+
211
+ print("=" * 80)
212
+ print("PatchPal - Claude Code–inspired coding and automation assistant")
213
+ print("=" * 80)
214
+ print(f"\nUsing model: {model_id}")
215
+
216
+ # Show custom prompt indicator if set
217
+ custom_prompt_path = os.getenv("PATCHPAL_SYSTEM_PROMPT")
218
+ if custom_prompt_path:
219
+ print(f"\033[1;36m🔧 Using custom system prompt: {custom_prompt_path}\033[0m")
220
+
221
+ print("\nType 'exit' to quit.")
222
+ print("Use '/status' to check context window usage, '/compact' to manually compact.")
223
+ print("Use 'list skills' or /skillname to invoke skills.")
224
+ print("Press Ctrl-C during agent execution to interrupt the agent.\n")
225
+
226
+ while True:
227
+ try:
228
+ # Flush any pending output to ensure clean prompt
229
+ sys.stdout.flush()
230
+ sys.stderr.flush()
231
+
232
+ # Print separator and prompt on fresh line to ensure visibility
233
+ # even if warnings/logs appear above
234
+ print() # Blank line for separation
235
+
236
+ # Use prompt_toolkit for input with autocompletion
237
+ # FormattedText: (style, text) tuples
238
+ prompt_text = FormattedText([("ansibrightcyan bold", "You:"), ("", " ")])
239
+ user_input = pt_prompt(
240
+ prompt_text,
241
+ completer=completer,
242
+ complete_while_typing=False, # Only show completions on Tab
243
+ history=history, # In-memory history for this session only
244
+ ).strip()
245
+
246
+ # Replace newlines with spaces to prevent history file corruption
247
+ # This can happen if user pastes multi-line text
248
+ user_input = user_input.replace("\n", " ").replace("\r", " ")
249
+
250
+ # Save command to history file for manual review
251
+ _save_to_history_file(user_input, history_file)
252
+
253
+ # Check for exit commands
254
+ if user_input.lower() in ["exit", "quit", "q"]:
255
+ print("\nGoodbye!")
256
+ break
257
+
258
+ # Handle /status command - show context window usage
259
+ if user_input.lower() in ["status", "/status"]:
260
+ stats = agent.context_manager.get_usage_stats(agent.messages)
261
+
262
+ print("\n" + "=" * 70)
263
+ print("\033[1;36mContext Window Status\033[0m")
264
+ print("=" * 70)
265
+ print(f" Model: {model_id}")
266
+
267
+ # Show context limit info
268
+ override = os.getenv("PATCHPAL_CONTEXT_LIMIT")
269
+ if override:
270
+ print(
271
+ f" \033[1;33m⚠️ Context limit overridden: {stats['context_limit']:,} tokens (PATCHPAL_CONTEXT_LIMIT={override})\033[0m"
272
+ )
273
+ else:
274
+ print(f" Context limit: {stats['context_limit']:,} tokens (model default)")
275
+
276
+ print(f" Messages in history: {len(agent.messages)}")
277
+ print(f" System prompt: {stats['system_tokens']:,} tokens")
278
+ print(f" Conversation: {stats['message_tokens']:,} tokens")
279
+ print(f" Output reserve: {stats['output_reserve']:,} tokens")
280
+ print(f" Total: {stats['total_tokens']:,} tokens")
281
+ print(f" Usage: {stats['usage_percent']}%")
282
+
283
+ # Visual progress bar (cap at 100% for display)
284
+ bar_width = 50
285
+ display_ratio = min(stats["usage_ratio"], 1.0) # Cap at 100% for visual
286
+ filled = int(bar_width * display_ratio)
287
+ empty = bar_width - filled
288
+ bar = "█" * filled + "░" * empty
289
+
290
+ # Color based on usage
291
+ if stats["usage_ratio"] < 0.7:
292
+ color = "\033[32m" # Green
293
+ elif stats["usage_ratio"] < 0.85:
294
+ color = "\033[33m" # Yellow
295
+ else:
296
+ color = "\033[31m" # Red
297
+
298
+ print(f" {color}[{bar}]\033[0m")
299
+
300
+ # Show warning if over capacity
301
+ if stats["usage_ratio"] > 1.0:
302
+ print(
303
+ f"\n \033[1;31m⚠️ Context is {stats['usage_percent']}% over capacity!\033[0m"
304
+ )
305
+ if not agent.enable_auto_compact:
306
+ print(
307
+ " \033[1;33m Enable auto-compaction or start a new session.\033[0m"
308
+ )
309
+ else:
310
+ print(
311
+ " \033[1;33m Compaction may have failed. Consider starting a new session.\033[0m"
312
+ )
313
+
314
+ # Also check if context limit is artificially low
315
+ if override and int(override) < 50000:
316
+ print(
317
+ f" \033[1;33m Note: Context limit is overridden to a very low value ({override})\033[0m"
318
+ )
319
+ print(
320
+ " \033[1;33m Run 'unset PATCHPAL_CONTEXT_LIMIT' to use model's actual capacity.\033[0m"
321
+ )
322
+
323
+ # Show auto-compaction status
324
+ if agent.enable_auto_compact:
325
+ print("\n Auto-compaction: \033[32mEnabled\033[0m (triggers at 85%)")
326
+ else:
327
+ print(
328
+ "\n Auto-compaction: \033[33mDisabled\033[0m (set PATCHPAL_DISABLE_AUTOCOMPACT=false to enable)"
329
+ )
330
+
331
+ print("=" * 70 + "\n")
332
+ continue
333
+
334
+ # Handle /compact command - manually trigger compaction
335
+ if user_input.lower() in ["compact", "/compact"]:
336
+ print("\n" + "=" * 70)
337
+ print("\033[1;36mManual Compaction\033[0m")
338
+ print("=" * 70)
339
+
340
+ # Check if auto-compaction is disabled
341
+ if not agent.enable_auto_compact:
342
+ print(
343
+ "\033[1;33m⚠️ Auto-compaction is disabled (PATCHPAL_DISABLE_AUTOCOMPACT=true)\033[0m"
344
+ )
345
+ print("\033[1;33m Manual compaction will still work.\033[0m\n")
346
+
347
+ # Check current status
348
+ stats_before = agent.context_manager.get_usage_stats(agent.messages)
349
+ print(
350
+ f" Current usage: {stats_before['usage_percent']}% "
351
+ f"({stats_before['total_tokens']:,} / {stats_before['context_limit']:,} tokens)"
352
+ )
353
+ print(f" Messages: {len(agent.messages)} in history")
354
+
355
+ # Check if compaction is needed
356
+ if len(agent.messages) < 5:
357
+ print("\n\033[1;33m⚠️ Not enough messages to compact (need at least 5)\033[0m")
358
+ print("=" * 70 + "\n")
359
+ continue
360
+
361
+ if stats_before["usage_ratio"] < 0.5:
362
+ print(
363
+ "\n\033[1;33m⚠️ Context usage is below 50% - compaction not recommended\033[0m"
364
+ )
365
+ print("\033[2m Compaction works best when context is >50% full.\033[0m")
366
+ # Ask for confirmation
367
+ try:
368
+ confirm = pt_prompt(
369
+ FormattedText([("ansiyellow", " Compact anyway? (y/n): "), ("", "")])
370
+ ).strip()
371
+ if confirm.lower() not in ["y", "yes"]:
372
+ print("=" * 70 + "\n")
373
+ continue
374
+ except KeyboardInterrupt:
375
+ print("\n Cancelled.")
376
+ print("=" * 70 + "\n")
377
+ continue
378
+
379
+ print("\n Compacting conversation history...")
380
+ agent._perform_auto_compaction()
381
+
382
+ # Show results
383
+ stats_after = agent.context_manager.get_usage_stats(agent.messages)
384
+ if stats_after["total_tokens"] < stats_before["total_tokens"]:
385
+ saved = stats_before["total_tokens"] - stats_after["total_tokens"]
386
+ print("\n\033[1;32m✓ Compaction successful!\033[0m")
387
+ print(
388
+ f" Saved {saved:,} tokens "
389
+ f"({stats_before['usage_percent']}% → {stats_after['usage_percent']}%)"
390
+ )
391
+ print(f" Messages: {len(agent.messages)} in history")
392
+ else:
393
+ print(
394
+ "\n\033[1;33m⚠️ No tokens saved - compaction may not have been effective\033[0m"
395
+ )
396
+
397
+ print("=" * 70 + "\n")
398
+ continue
399
+
400
+ # Skip empty input
401
+ if not user_input:
402
+ continue
403
+
404
+ # Handle skill invocations (/skillname args...)
405
+ if user_input.startswith("/"):
406
+ parts = user_input[1:].split(maxsplit=1)
407
+ skill_name = parts[0]
408
+ skill_args = parts[1] if len(parts) > 1 else ""
409
+
410
+ from pathlib import Path
411
+
412
+ from patchpal.skills import get_skill
413
+
414
+ skill = get_skill(skill_name, repo_root=Path(".").resolve())
415
+
416
+ if skill:
417
+ print(f"\n\033[1;35m⚡ Invoking skill: {skill.name}\033[0m")
418
+ print("=" * 80)
419
+
420
+ # Pass skill instructions to agent with context
421
+ prompt = f"Execute this skill:\n\n{skill.instructions}"
422
+ if skill_args:
423
+ prompt += f"\n\nArguments: {skill_args}"
424
+
425
+ # Log user prompt to audit log
426
+ audit_logger.info(f"USER_PROMPT: /{skill_name} {skill_args}")
427
+ result = agent.run(prompt, max_iterations=max_iterations)
428
+
429
+ print("\n" + "=" * 80)
430
+ print("\033[1;32mAgent:\033[0m")
431
+ print("=" * 80)
432
+ console.print(Markdown(result))
433
+ print("=" * 80)
434
+ else:
435
+ print(f"\n\033[1;31mSkill not found: {skill_name}\033[0m")
436
+ print("Ask 'list skills' to see available skills.")
437
+ print(
438
+ "See example skills at: https://github.com/amaiya/patchpal/tree/main/examples/skills"
439
+ )
440
+
441
+ continue
442
+
443
+ # Run the agent (Ctrl-C here will interrupt agent, not exit)
444
+ try:
445
+ print() # Add blank line before agent output
446
+ # Log user prompt to audit log
447
+ audit_logger.info(f"USER_PROMPT: {user_input}")
448
+ result = agent.run(user_input, max_iterations=max_iterations)
449
+
450
+ print("\n" + "=" * 80)
451
+ print("\033[1;32mAgent:\033[0m")
452
+ print("=" * 80)
453
+ # Render markdown output
454
+ console.print(Markdown(result))
455
+ print("=" * 80)
456
+
457
+ except KeyboardInterrupt:
458
+ print(
459
+ "\n\n\033[1;33mAgent interrupted.\033[0m Type your next command or 'exit' to quit."
460
+ )
461
+ continue
462
+
463
+ except KeyboardInterrupt:
464
+ # Ctrl-C during input prompt - show message instead of exiting
465
+ print("\n\n\033[1;33mUse 'exit' to quit PatchPal.\033[0m")
466
+ print(
467
+ "\033[2m(Ctrl-C is reserved for interrupting the agent during execution)\033[0m\n"
468
+ )
469
+ continue
470
+ except Exception as e:
471
+ print(f"\n\033[1;31mError:\033[0m {e}")
472
+ print("Please try again or type 'exit' to quit.")
473
+
474
+
475
+ if __name__ == "__main__":
476
+ main()