patchllm 0.2.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. patchllm/__main__.py +0 -0
  2. patchllm/agent/__init__.py +0 -0
  3. patchllm/agent/actions.py +73 -0
  4. patchllm/agent/executor.py +57 -0
  5. patchllm/agent/planner.py +76 -0
  6. patchllm/agent/session.py +425 -0
  7. patchllm/cli/__init__.py +0 -0
  8. patchllm/cli/entrypoint.py +120 -0
  9. patchllm/cli/handlers.py +192 -0
  10. patchllm/cli/helpers.py +72 -0
  11. patchllm/interactive/__init__.py +0 -0
  12. patchllm/interactive/selector.py +100 -0
  13. patchllm/llm.py +39 -0
  14. patchllm/main.py +1 -323
  15. patchllm/parser.py +120 -64
  16. patchllm/patcher.py +118 -0
  17. patchllm/scopes/__init__.py +0 -0
  18. patchllm/scopes/builder.py +55 -0
  19. patchllm/scopes/constants.py +70 -0
  20. patchllm/scopes/helpers.py +147 -0
  21. patchllm/scopes/resolvers.py +82 -0
  22. patchllm/scopes/structure.py +64 -0
  23. patchllm/tui/__init__.py +0 -0
  24. patchllm/tui/completer.py +153 -0
  25. patchllm/tui/interface.py +703 -0
  26. patchllm/utils.py +19 -1
  27. patchllm/voice/__init__.py +0 -0
  28. patchllm/{listener.py → voice/listener.py} +8 -1
  29. patchllm-1.0.0.dist-info/METADATA +153 -0
  30. patchllm-1.0.0.dist-info/RECORD +51 -0
  31. patchllm-1.0.0.dist-info/entry_points.txt +2 -0
  32. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/top_level.txt +1 -0
  33. tests/__init__.py +0 -0
  34. tests/conftest.py +112 -0
  35. tests/test_actions.py +62 -0
  36. tests/test_agent.py +383 -0
  37. tests/test_completer.py +121 -0
  38. tests/test_context.py +140 -0
  39. tests/test_executor.py +60 -0
  40. tests/test_interactive.py +64 -0
  41. tests/test_parser.py +70 -0
  42. tests/test_patcher.py +71 -0
  43. tests/test_planner.py +53 -0
  44. tests/test_recipes.py +111 -0
  45. tests/test_scopes.py +47 -0
  46. tests/test_structure.py +48 -0
  47. tests/test_tui.py +397 -0
  48. tests/test_utils.py +31 -0
  49. patchllm/context.py +0 -238
  50. patchllm-0.2.2.dist-info/METADATA +0 -129
  51. patchllm-0.2.2.dist-info/RECORD +0 -12
  52. patchllm-0.2.2.dist-info/entry_points.txt +0 -2
  53. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/WHEEL +0 -0
  54. {patchllm-0.2.2.dist-info → patchllm-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,120 @@
1
+ import argparse
2
+ import textwrap
3
+ import os
4
+ import sys
5
+ from pathlib import Path
6
+ from rich.console import Console
7
+
8
+ from .handlers import (
9
+ handle_init, handle_scope_management, handle_file_io,
10
+ handle_main_task_flow, handle_voice_flow
11
+ )
12
+ from ..utils import load_from_py_file
13
+
14
+ console = Console()
15
+
16
+ def main():
17
+ """Main entry point for the patchllm command-line tool."""
18
+ from dotenv import load_dotenv
19
+ load_dotenv()
20
+
21
+ scopes_file_path = os.getenv("PATCHLLM_SCOPES_FILE", "./scopes.py")
22
+ recipes_file_path = os.getenv("PATCHLLM_RECIPES_FILE", "./recipes.py")
23
+
24
+ parser = argparse.ArgumentParser(
25
+ description="A CLI tool to apply code changes using an LLM.",
26
+ formatter_class=argparse.RawTextHelpFormatter
27
+ )
28
+
29
+ patch_group = parser.add_argument_group('Core Patching Flow')
30
+ scope_group = parser.add_argument_group('Scope Management')
31
+ code_io = parser.add_argument_group('Code I/O')
32
+ options_group = parser.add_argument_group('General Options')
33
+
34
+ patch_group.add_argument(
35
+ "-c", "--chat", action="store_true",
36
+ help="[DEPRECATED] Start the agentic TUI. This is now the default action."
37
+ )
38
+ patch_group.add_argument(
39
+ "-in", "--interactive", action="store_true",
40
+ help="Interactively build the context by selecting files and folders."
41
+ )
42
+ patch_group.add_argument(
43
+ "-s", "--scope", type=str, default=None,
44
+ help=textwrap.dedent("""\
45
+ Name of the scope to use (static or dynamic).
46
+ Dynamic: @structure, @git, @git:staged, @git:unstaged, @git:branch[:base],
47
+ @git:lastcommit, @git:conflicts, @recent, @dir:<path>,
48
+ @related:<file>, @search:"<term>", @error:"<traceback>"
49
+ """))
50
+ patch_group.add_argument(
51
+ "-r", "--recipe", type=str, default=None,
52
+ help="Name of the recipe to use from your 'recipes.py' file."
53
+ )
54
+ patch_group.add_argument("-t", "--task", type=str, help="The task instructions for the assistant.")
55
+ patch_group.add_argument("-p", "--patch", action="store_true", help="Query the LLM and apply file updates.")
56
+
57
+ scope_group.add_argument("-i", "--init", action="store_true", help="Create a default 'scopes.py' file.")
58
+ scope_group.add_argument("-sl", "--list-scopes", action="store_true", help="List all available scopes.")
59
+ scope_group.add_argument("-ss", "--show-scope", type=str, help="Display the settings for a specific scope.")
60
+ scope_group.add_argument("-sa", "--add-scope", type=str, help="Add a new scope with default settings.")
61
+ scope_group.add_argument("-sr", "--remove-scope", type=str, help="Remove a scope.")
62
+ scope_group.add_argument("-su", "--update-scope", nargs='+', help="Update a scope. Usage: -su <scope> key=\"['val']\"")
63
+
64
+ code_io.add_argument("-co", "--context-out", nargs='?', const="context.md", default=None, help="Export context to a file.")
65
+ code_io.add_argument("-ci", "--context-in", type=str, default=None, help="Import context from a file.")
66
+ code_io.add_argument("-tf", "--to-file", nargs='?', const="response.md", default=None, help="Save LLM response to a file.")
67
+ code_io.add_argument("-tc", "--to-clipboard", action="store_true", help="Copy LLM response to clipboard.")
68
+ code_io.add_argument("-ff", "--from-file", type=str, default=None, help="Apply updates from a file.")
69
+ code_io.add_argument("-fc", "--from-clipboard", action="store_true", help="Apply updates from the clipboard.")
70
+
71
+ options_group.add_argument("-m", "--model", type=str, default="gemini/gemini-1.5-flash", help="Model name to use.")
72
+ options_group.add_argument("-v", "--voice", type=str, default="False", help="Enable voice interaction (True/False).")
73
+ options_group.add_argument("-g", "--guidelines", nargs='?', const=True, default=None, help="Prepend guidelines to the context.")
74
+
75
+ args = parser.parse_args()
76
+
77
+ try:
78
+ scopes = load_from_py_file(scopes_file_path, "scopes")
79
+ except FileNotFoundError:
80
+ scopes = {}
81
+ if not any([args.list_scopes, args.show_scope, args.add_scope, args.init, len(sys.argv) == 1]):
82
+ console.print(f"⚠️ Scope file '{scopes_file_path}' not found. You can create one with --init.", style="yellow")
83
+ except Exception as e:
84
+ console.print(f"❌ Error loading scopes file: {e}", style="red")
85
+ return
86
+
87
+ try:
88
+ recipes = load_from_py_file(recipes_file_path, "recipes")
89
+ except FileNotFoundError:
90
+ recipes = {}
91
+ if args.recipe:
92
+ console.print(f"⚠️ Recipes file '{recipes_file_path}' not found.", style="yellow")
93
+ except Exception as e:
94
+ console.print(f"❌ Error loading recipes file: {e}", style="red")
95
+ return
96
+
97
+ # If no arguments are provided (or the deprecated --chat is used), start the agentic TUI.
98
+ if len(sys.argv) == 1 or args.chat:
99
+ from ..tui.interface import run_tui
100
+ run_tui(args, scopes, recipes, scopes_file_path)
101
+ return
102
+
103
+ if args.init:
104
+ handle_init(scopes_file_path)
105
+ return
106
+
107
+ if any([args.list_scopes, args.show_scope, args.add_scope, args.remove_scope, args.update_scope]):
108
+ handle_scope_management(args, scopes, scopes_file_path, parser)
109
+ return
110
+
111
+ if any([args.from_file, args.from_clipboard]):
112
+ handle_file_io(args)
113
+ return
114
+
115
+ if args.voice.lower() == 'true':
116
+ handle_voice_flow(args, scopes, parser)
117
+ return
118
+
119
+ # Fallback to the original, non-interactive workflow for other flags.
120
+ handle_main_task_flow(args, scopes, recipes, parser)
@@ -0,0 +1,192 @@
1
+ import pprint
2
+ import ast
3
+ import textwrap
4
+ import re
5
+ import argparse
6
+ from pathlib import Path
7
+ from rich.console import Console
8
+ from rich.panel import Panel
9
+
10
+ from ..utils import write_scopes_to_file
11
+ from ..parser import paste_response
12
+ from ..patcher import apply_external_patch
13
+ from ..scopes.builder import build_context_from_files, helpers
14
+ from ..llm import run_llm_query
15
+ from .helpers import get_system_prompt, _collect_context
16
+
17
+ console = Console()
18
+
19
+ def handle_init(scopes_file_path):
20
+ if Path(scopes_file_path).exists():
21
+ console.print(f"⚠️ '{scopes_file_path}' already exists. Aborting.", style="yellow")
22
+ return
23
+ default_scopes = {"base": {"path": ".", "include_patterns": ["**/*"], "exclude_patterns": []}}
24
+ write_scopes_to_file(scopes_file_path, default_scopes)
25
+ console.print(f"✅ Successfully created '{scopes_file_path}'.", style="green")
26
+
27
+ def handle_scope_management(args, scopes, scopes_file_path, parser):
28
+ """Handles all commands related to managing scopes."""
29
+ if args.list_scopes:
30
+ console.print(f"Available scopes in '[bold]{scopes_file_path}[/]':", style="bold")
31
+ if not scopes:
32
+ console.print(f" -> No scopes found.")
33
+ else:
34
+ for scope_name in sorted(scopes.keys()):
35
+ console.print(f" - {scope_name}")
36
+
37
+ elif args.show_scope:
38
+ scope_data = scopes.get(args.show_scope)
39
+ if scope_data:
40
+ console.print(Panel(pprint.pformat(scope_data, indent=2), title=f"[bold cyan]Scope: '{args.show_scope}'[/]"))
41
+ else:
42
+ console.print(f"❌ Scope '[bold]{args.show_scope}[/]' not found.", style="red")
43
+
44
+ elif args.add_scope:
45
+ if args.add_scope in scopes:
46
+ console.print(f"❌ Scope '[bold]{args.add_scope}[/]' already exists.", style="red")
47
+ return
48
+ scopes[args.add_scope] = {"path": ".", "include_patterns": ["**/*"], "exclude_patterns": []}
49
+ write_scopes_to_file(scopes_file_path, scopes)
50
+ console.print(f"✅ Scope '[bold]{args.add_scope}[/]' added.", style="green")
51
+
52
+
53
+ elif args.remove_scope:
54
+ if args.remove_scope not in scopes:
55
+ console.print(f"❌ Scope '[bold]{args.remove_scope}[/]' not found.", style="red")
56
+ return
57
+ del scopes[args.remove_scope]
58
+ write_scopes_to_file(scopes_file_path, scopes)
59
+ console.print(f"✅ Scope '[bold]{args.remove_scope}[/]' removed.", style="green")
60
+
61
+ elif args.update_scope:
62
+ if len(args.update_scope) < 2:
63
+ parser.error("--update-scope requires a scope name and at least one 'key=value' pair.")
64
+ scope_name = args.update_scope[0]
65
+ updates = args.update_scope[1:]
66
+ if scope_name not in scopes:
67
+ console.print(f"❌ Scope '[bold]{scope_name}[/]' not found.", style="red")
68
+ return
69
+ try:
70
+ for update in updates:
71
+ key, value_str = update.split('=', 1)
72
+ value = ast.literal_eval(value_str)
73
+ scopes[scope_name][key.strip()] = value
74
+ write_scopes_to_file(scopes_file_path, scopes)
75
+ except (ValueError, SyntaxError) as e:
76
+ console.print(f"❌ Error parsing update values: {e}", style="red")
77
+
78
+ def handle_file_io(args):
79
+ """Handles commands that read from files or clipboard to apply patches."""
80
+ content_to_patch = None
81
+ if args.from_clipboard:
82
+ try:
83
+ import pyperclip
84
+ content_to_patch = pyperclip.paste()
85
+ if not content_to_patch:
86
+ console.print("⚠️ Clipboard is empty.", style="yellow")
87
+ return
88
+ except ImportError:
89
+ console.print("❌ 'pyperclip' is required. `pip install pyperclip`", style="red")
90
+ return
91
+ elif args.from_file:
92
+ try:
93
+ content_to_patch = Path(args.from_file).read_text(encoding="utf-8")
94
+ except Exception as e:
95
+ console.print(f"❌ Failed to read from file {args.from_file}: {e}", style="red")
96
+ return
97
+
98
+ if content_to_patch:
99
+ base_path = Path(".").resolve()
100
+ apply_external_patch(content_to_patch, base_path)
101
+
102
+ def handle_main_task_flow(args, scopes, recipes, parser):
103
+ """Handles the primary workflow of building context and querying the LLM."""
104
+ system_prompt = get_system_prompt()
105
+ history = [{"role": "system", "content": system_prompt}]
106
+
107
+ task = args.task
108
+ if args.recipe:
109
+ if args.task:
110
+ console.print(f"⚠️ Both --task and --recipe provided. Using explicit --task.", style="yellow")
111
+ else:
112
+ task = recipes.get(args.recipe)
113
+ if not task:
114
+ parser.error(f"Recipe '{args.recipe}' not found in recipes file.")
115
+
116
+ context = None
117
+ if args.context_in:
118
+ context = Path(args.context_in).read_text()
119
+ else:
120
+ context_object = _collect_context(args, scopes)
121
+ context = context_object.get("context") if context_object else None
122
+ if context is None and not args.guidelines:
123
+ if any([args.scope, args.interactive]):
124
+ return
125
+ if task:
126
+ parser.error("A scope (-s), interactive (-in), or context-in (-ci) is required for a task or recipe.")
127
+
128
+ if args.guidelines:
129
+ guidelines_content = system_prompt if args.guidelines is True else args.guidelines
130
+ context = f"{guidelines_content}\n\n{context}" if context else guidelines_content
131
+
132
+ if args.context_out and context:
133
+ Path(args.context_out).write_text(context)
134
+
135
+ if task:
136
+ action_flags = [args.patch, args.to_file is not None, args.to_clipboard]
137
+ if sum(action_flags) > 1:
138
+ parser.error("Please specify only one action: --patch, --to-file, or --to-clipboard.")
139
+ if sum(action_flags) == 0:
140
+ parser.error("A task or recipe was provided, but no action was specified (e.g., --patch).")
141
+
142
+ if not context:
143
+ console.print("Proceeding with task but without any file context.", style="yellow")
144
+
145
+ llm_response = run_llm_query(task, args.model, history, context)
146
+
147
+ if llm_response:
148
+ if args.patch:
149
+ paste_response(llm_response)
150
+ elif args.to_file is not None:
151
+ Path(args.to_file).write_text(llm_response)
152
+ elif args.to_clipboard:
153
+ try:
154
+ import pyperclip
155
+ pyperclip.copy(llm_response)
156
+ console.print("✅ Copied to clipboard.", style="green")
157
+ except ImportError:
158
+ console.print("❌ 'pyperclip' is required. `pip install pyperclip`", style="red")
159
+
160
+ def handle_voice_flow(args, scopes, parser):
161
+ """Handles the voice-activated workflow."""
162
+ try:
163
+ from ..voice.listener import listen, speak
164
+ except ImportError:
165
+ console.print("❌ Voice dependencies are not installed.", style="red")
166
+ console.print(" Install with: pip install 'patchllm[voice]'", style="cyan")
167
+ return
168
+
169
+ speak("Say your task instruction.")
170
+ task = listen()
171
+ if not task:
172
+ speak("No instruction heard. Exiting.")
173
+ return
174
+
175
+ speak(f"You said: {task}. Should I proceed?")
176
+ confirm = listen()
177
+ if confirm and "yes" in confirm.lower():
178
+ context_object = _collect_context(args, scopes)
179
+ context = context_object.get("context") if context_object else None
180
+ if context is None:
181
+ speak("Context building failed. Exiting.")
182
+ return
183
+
184
+ system_prompt = get_system_prompt()
185
+ history = [{"role": "system", "content": system_prompt}]
186
+
187
+ llm_response = run_llm_query(task, args.model, history, context)
188
+ if llm_response:
189
+ paste_response(llm_response)
190
+ speak("Changes applied.")
191
+ else:
192
+ speak("Cancelled.")
@@ -0,0 +1,72 @@
1
+ import textwrap
2
+ from pathlib import Path
3
+ from rich.console import Console
4
+
5
+ from ..scopes.builder import build_context, build_context_from_files
6
+
7
+ console = Console()
8
+
9
+ def get_system_prompt():
10
+ """Returns the system prompt for the LLM."""
11
+ return textwrap.dedent("""
12
+ You are an expert pair programmer. Your purpose is to help users by modifying files based on their instructions.
13
+ Follow these rules strictly:
14
+ 1. Before providing any file blocks, you MUST include a `<change_summary>` block. This block should contain a brief, high-level, natural language explanation of the changes you are about to make. Do not describe the changes file-by-file in this summary.
15
+ 2. Your output should contain one or more file blocks. For each file-block:
16
+ a. Only include code for files that need to be updated / edited.
17
+ b. For updated files, do not exclude any code even if it is unchanged code; assume the file code will be copy-pasted full in the file.
18
+ c. Do not include verbose inline comments explaining what every small change does. Try to keep comments concise but informative, if any.
19
+ d. Only update the relevant parts of each file relative to the provided task; do not make irrelevant edits even if you notice areas of improvements elsewhere.
20
+ e. Do not use diffs.
21
+ 3. Make sure each file-block is returned in the following exact format. No additional text, comments, or explanations should be outside these blocks.
22
+
23
+ Example of a complete and valid response:
24
+ <change_summary>
25
+ I will add a new `GET /health` endpoint to the main application file to provide a simple health check. I will also add a new test case to verify that this endpoint returns a 200 OK status.
26
+ </change_summary>
27
+ <file_path:/absolute/path/to/your/app.py>
28
+ ```python
29
+ # The full, complete content of /absolute/path/to/your/app.py goes here.
30
+ app = Flask(__name__)
31
+
32
+ @app.route('/health')
33
+ def health_check():
34
+ return "OK", 200
35
+ ```
36
+ <file_path:/absolute/path/to/your/test_app.py>
37
+ ```python
38
+ # The full, complete content of /absolute/path/to/your/test_app.py goes here.
39
+ def test_health_check(client):
40
+ response = client.get('/health')
41
+ assert response.status_code == 200
42
+ ```
43
+ """)
44
+
45
+ def _collect_context(args, scopes):
46
+ """Helper to determine and build the context from args."""
47
+ base_path = Path(".").resolve()
48
+ context_object = None
49
+
50
+ if args.interactive:
51
+ try:
52
+ from ..interactive.selector import select_files_interactively
53
+ selected_files = select_files_interactively(base_path)
54
+ if selected_files:
55
+ context_object = build_context_from_files(selected_files, base_path)
56
+ except ImportError:
57
+ console.print("❌ 'InquirerPy' is required for interactive mode.", style="red")
58
+ console.print(" Install it with: pip install 'patchllm[interactive]'", style="cyan")
59
+ return None
60
+ elif args.scope:
61
+ context_object = build_context(args.scope, scopes, base_path)
62
+
63
+ if context_object:
64
+ tree = context_object.get("tree", "")
65
+ console.print("\n--- Context Summary ---", style="bold")
66
+ console.print(tree)
67
+
68
+ return context_object
69
+
70
+ if any([args.interactive, args.scope]):
71
+ console.print("--- Context building failed or returned no files. ---", style="yellow")
72
+ return None
File without changes
@@ -0,0 +1,100 @@
1
+ from pathlib import Path
2
+ from InquirerPy import prompt
3
+ from InquirerPy.validator import EmptyInputValidator
4
+ from rich.console import Console
5
+ import re
6
+
7
+ from ..scopes.constants import DEFAULT_EXCLUDE_EXTENSIONS, STRUCTURE_EXCLUDE_DIRS
8
+
9
+ console = Console()
10
+
11
+ def _build_choices_recursively(current_path: Path, base_path: Path, indent: str = "") -> list[str]:
12
+ """
13
+ Recursively builds a list of formatted strings representing files and folders
14
+ for the InquirerPy checklist, creating a visual tree structure.
15
+ """
16
+ choices = []
17
+
18
+ try:
19
+ items_to_process = [p for p in current_path.iterdir() if p.name not in STRUCTURE_EXCLUDE_DIRS]
20
+ sorted_items = sorted(items_to_process, key=lambda p: (p.is_file(), p.name.lower()))
21
+ except FileNotFoundError:
22
+ return []
23
+
24
+ for i, item in enumerate(sorted_items):
25
+ is_last = i == len(sorted_items) - 1
26
+ connector = "└── " if is_last else "├── "
27
+ relative_item_path = item.relative_to(base_path)
28
+
29
+ if item.is_dir():
30
+ choices.append(f"{indent}{connector}📁 {relative_item_path}/")
31
+ new_indent = indent + (" " if is_last else "│ ")
32
+ choices.extend(_build_choices_recursively(item, base_path, new_indent))
33
+
34
+ elif item.is_file():
35
+ if item.suffix.lower() not in DEFAULT_EXCLUDE_EXTENSIONS:
36
+ choices.append(f"{indent}{connector}📄 {relative_item_path}")
37
+
38
+ return choices
39
+
40
+ def select_files_interactively(base_path: Path) -> list[Path]:
41
+ """
42
+ Displays an interactive checklist with a folder/file tree for the user to select from.
43
+ Returns a list of absolute paths for the selected files, expanding any selected folders.
44
+ """
45
+ choices = _build_choices_recursively(base_path, base_path)
46
+ if not choices:
47
+ console.print("No selectable files or folders found in this project.", style="yellow")
48
+ return []
49
+
50
+ questions = [
51
+ {
52
+ "type": "fuzzy",
53
+ "name": "selected_items",
54
+ "message": "Fuzzy search and select files/folders for the context:",
55
+ "choices": choices,
56
+ "validate": EmptyInputValidator("You must select at least one item."),
57
+ "transformer": lambda result: f"{len(result)} item(s) selected",
58
+ "long_instruction": "Press <tab> or <ctrl+space> to select, <enter> to confirm.",
59
+ "border": True,
60
+ "cycle": False,
61
+ "multiselect": True,
62
+ }
63
+ ]
64
+
65
+ try:
66
+ console.print("\n--- Interactive File Selection ---", style="bold")
67
+ result = prompt(questions, vi_mode=True)
68
+
69
+ selected_choices = result.get("selected_items") if result else []
70
+ if not selected_choices:
71
+ return []
72
+
73
+ path_extraction_pattern = re.compile(r"[📁📄]\s(.*)")
74
+
75
+ selected_paths_str = []
76
+ for selection in selected_choices:
77
+ match = path_extraction_pattern.search(selection)
78
+ if match:
79
+ selected_paths_str.append(match.group(1).rstrip('/'))
80
+
81
+ final_files = set()
82
+
83
+ for path_str in selected_paths_str:
84
+ full_path = (base_path / path_str).resolve()
85
+
86
+ if full_path.is_dir():
87
+ for file_in_dir in full_path.rglob('*'):
88
+ if file_in_dir.is_file() and file_in_dir.suffix.lower() not in DEFAULT_EXCLUDE_EXTENSIONS:
89
+ final_files.add(file_in_dir)
90
+ elif full_path.is_file():
91
+ final_files.add(full_path)
92
+
93
+ return sorted(list(final_files))
94
+
95
+ except KeyboardInterrupt:
96
+ console.print("\nSelection cancelled by user.", style="yellow")
97
+ return []
98
+ except Exception as e:
99
+ console.print(f"An error occurred during interactive selection: {e}", style="red")
100
+ return []
patchllm/llm.py ADDED
@@ -0,0 +1,39 @@
1
+ import litellm
2
+ from rich.console import Console
3
+
4
+ console = Console()
5
+
6
+ def run_llm_query(messages: list[dict], model_name: str) -> str | None:
7
+ """
8
+ Sends a list of messages to the LLM and returns the response.
9
+ This function is stateless and does not modify any history object.
10
+
11
+ Args:
12
+ messages (list[dict]): The full list of messages for the API call.
13
+ model_name (str): The name of the model to query.
14
+
15
+ Returns:
16
+ The text content of the assistant's response, or None if an error occurs.
17
+ """
18
+ console.print("\n--- Sending Prompt to LLM... ---", style="bold")
19
+
20
+ try:
21
+ # Using litellm's built-in retry and timeout features.
22
+ # This helps prevent getting stuck if the connection drops.
23
+ response = litellm.completion(
24
+ model=model_name,
25
+ messages=messages,
26
+ timeout=120, # 120-second timeout for the API call
27
+ max_retries=3 # Retry up to 3 times on failure
28
+ )
29
+
30
+ assistant_response = response.choices[0].message.content
31
+ if not assistant_response or not assistant_response.strip():
32
+ console.print("⚠️ Response is empty.", style="yellow")
33
+ return None
34
+
35
+ return assistant_response
36
+ except Exception as e:
37
+ # Using rich.print to handle complex exception objects better
38
+ console.print(f"❌ LLM communication error after retries: {e}", style="bold red")
39
+ return None