patchllm 0.2.2__tar.gz → 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- patchllm-1.0.0/PKG-INFO +153 -0
- patchllm-1.0.0/README.md +108 -0
- patchllm-1.0.0/patchllm/__main__.py +0 -0
- patchllm-1.0.0/patchllm/agent/__init__.py +0 -0
- patchllm-1.0.0/patchllm/agent/actions.py +73 -0
- patchllm-1.0.0/patchllm/agent/executor.py +57 -0
- patchllm-1.0.0/patchllm/agent/planner.py +76 -0
- patchllm-1.0.0/patchllm/agent/session.py +425 -0
- patchllm-1.0.0/patchllm/cli/__init__.py +0 -0
- patchllm-1.0.0/patchllm/cli/entrypoint.py +120 -0
- patchllm-1.0.0/patchllm/cli/handlers.py +192 -0
- patchllm-1.0.0/patchllm/cli/helpers.py +72 -0
- patchllm-1.0.0/patchllm/interactive/__init__.py +0 -0
- patchllm-1.0.0/patchllm/interactive/selector.py +100 -0
- patchllm-1.0.0/patchllm/llm.py +39 -0
- patchllm-1.0.0/patchllm/main.py +4 -0
- patchllm-1.0.0/patchllm/parser.py +141 -0
- patchllm-1.0.0/patchllm/patcher.py +118 -0
- patchllm-1.0.0/patchllm/scopes/__init__.py +0 -0
- patchllm-1.0.0/patchllm/scopes/builder.py +55 -0
- patchllm-1.0.0/patchllm/scopes/constants.py +70 -0
- patchllm-1.0.0/patchllm/scopes/helpers.py +147 -0
- patchllm-1.0.0/patchllm/scopes/resolvers.py +82 -0
- patchllm-1.0.0/patchllm/scopes/structure.py +64 -0
- patchllm-1.0.0/patchllm/tui/__init__.py +0 -0
- patchllm-1.0.0/patchllm/tui/completer.py +153 -0
- patchllm-1.0.0/patchllm/tui/interface.py +703 -0
- patchllm-1.0.0/patchllm/utils.py +36 -0
- patchllm-1.0.0/patchllm/voice/__init__.py +0 -0
- {patchllm-0.2.2/patchllm → patchllm-1.0.0/patchllm/voice}/listener.py +8 -1
- patchllm-1.0.0/patchllm.egg-info/PKG-INFO +153 -0
- patchllm-1.0.0/patchllm.egg-info/SOURCES.txt +54 -0
- patchllm-1.0.0/patchllm.egg-info/entry_points.txt +2 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/patchllm.egg-info/requires.txt +7 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/patchllm.egg-info/top_level.txt +1 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/pyproject.toml +17 -5
- patchllm-1.0.0/tests/__init__.py +0 -0
- patchllm-1.0.0/tests/conftest.py +112 -0
- patchllm-1.0.0/tests/test_actions.py +62 -0
- patchllm-1.0.0/tests/test_agent.py +383 -0
- patchllm-1.0.0/tests/test_completer.py +121 -0
- patchllm-1.0.0/tests/test_context.py +140 -0
- patchllm-1.0.0/tests/test_executor.py +60 -0
- patchllm-1.0.0/tests/test_interactive.py +64 -0
- patchllm-1.0.0/tests/test_parser.py +70 -0
- patchllm-1.0.0/tests/test_patcher.py +71 -0
- patchllm-1.0.0/tests/test_planner.py +53 -0
- patchllm-1.0.0/tests/test_recipes.py +111 -0
- patchllm-1.0.0/tests/test_scopes.py +47 -0
- patchllm-1.0.0/tests/test_structure.py +48 -0
- patchllm-1.0.0/tests/test_tui.py +397 -0
- patchllm-1.0.0/tests/test_utils.py +31 -0
- patchllm-0.2.2/PKG-INFO +0 -129
- patchllm-0.2.2/README.md +0 -90
- patchllm-0.2.2/patchllm/context.py +0 -238
- patchllm-0.2.2/patchllm/main.py +0 -326
- patchllm-0.2.2/patchllm/parser.py +0 -85
- patchllm-0.2.2/patchllm/utils.py +0 -18
- patchllm-0.2.2/patchllm.egg-info/PKG-INFO +0 -129
- patchllm-0.2.2/patchllm.egg-info/SOURCES.txt +0 -15
- patchllm-0.2.2/patchllm.egg-info/entry_points.txt +0 -2
- {patchllm-0.2.2 → patchllm-1.0.0}/LICENSE +0 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/patchllm/__init__.py +0 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/patchllm.egg-info/dependency_links.txt +0 -0
- {patchllm-0.2.2 → patchllm-1.0.0}/setup.cfg +0 -0
patchllm-1.0.0/PKG-INFO
ADDED
@@ -0,0 +1,153 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: patchllm
|
3
|
+
Version: 1.0.0
|
4
|
+
Summary: An interactive agent for codebase modification using LLMs
|
5
|
+
Author: nassimberrada
|
6
|
+
License: MIT License
|
7
|
+
|
8
|
+
Copyright (c) 2025 nassimberrada
|
9
|
+
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
11
|
+
of this software and associated documentation files (the “Software”), to deal
|
12
|
+
in the Software without restriction, including without limitation the rights
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
15
|
+
furnished to do so, subject to the following conditions:
|
16
|
+
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
18
|
+
copies or substantial portions of the Software.
|
19
|
+
|
20
|
+
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
26
|
+
SOFTWARE.
|
27
|
+
Requires-Python: >=3.8
|
28
|
+
Description-Content-Type: text/markdown
|
29
|
+
License-File: LICENSE
|
30
|
+
Requires-Dist: litellm
|
31
|
+
Requires-Dist: python-dotenv
|
32
|
+
Requires-Dist: rich
|
33
|
+
Requires-Dist: prompt_toolkit
|
34
|
+
Requires-Dist: InquirerPy
|
35
|
+
Provides-Extra: voice
|
36
|
+
Requires-Dist: SpeechRecognition; extra == "voice"
|
37
|
+
Requires-Dist: pyttsx3; extra == "voice"
|
38
|
+
Provides-Extra: url
|
39
|
+
Requires-Dist: html2text; extra == "url"
|
40
|
+
Provides-Extra: all
|
41
|
+
Requires-Dist: SpeechRecognition; extra == "all"
|
42
|
+
Requires-Dist: pyttsx3; extra == "all"
|
43
|
+
Requires-Dist: html2text; extra == "all"
|
44
|
+
Dynamic: license-file
|
45
|
+
|
46
|
+
<p align="center">
|
47
|
+
<picture>
|
48
|
+
<source srcset="./assets/logo_dark.png" media="(prefers-color-scheme: dark)">
|
49
|
+
<source srcset="./assets/logo_light.png" media="(prefers-color-scheme: light)">
|
50
|
+
<img src="./assets/logo_light.png" alt="PatchLLM Logo" height="200">
|
51
|
+
</picture>
|
52
|
+
</p>
|
53
|
+
|
54
|
+
## About
|
55
|
+
PatchLLM is an interactive command-line agent that helps you modify your codebase. It uses an LLM to plan and execute changes, allowing you to review and approve every step.
|
56
|
+
|
57
|
+
## Key Features
|
58
|
+
- **Interactive Planning:** The agent proposes a step-by-step plan before writing any code. You stay in control.
|
59
|
+
- **Dynamic Context:** Build and modify the code context on-the-fly using powerful scope definitions (`@git:staged`, `@dir:src`, etc.).
|
60
|
+
- **Mobile-First TUI:** A clean, command-driven interface with autocompletion makes it easy to use on any device.
|
61
|
+
- **Resilient Sessions:** Automatically saves your progress so you can resume if you get disconnected.
|
62
|
+
|
63
|
+
## Getting Started
|
64
|
+
|
65
|
+
**1. Initialize a configuration file (optional):**
|
66
|
+
This creates a `scopes.py` file to define reusable file collections.
|
67
|
+
```bash
|
68
|
+
patchllm --init
|
69
|
+
```
|
70
|
+
|
71
|
+
**2. Start the Agent:**
|
72
|
+
Running `patchllm` with no arguments drops you into the interactive agentic TUI.
|
73
|
+
```bash
|
74
|
+
patchllm
|
75
|
+
```
|
76
|
+
|
77
|
+
**3. Follow the Agent Workflow:**
|
78
|
+
Inside the TUI, you direct the agent with simple slash commands.
|
79
|
+
|
80
|
+
```bash
|
81
|
+
# 1. Set the goal
|
82
|
+
>>> /task Add a health check endpoint to the API
|
83
|
+
|
84
|
+
# 2. Build the context
|
85
|
+
>>> /context @dir:src/api
|
86
|
+
|
87
|
+
# 3. Ask the agent to generate a plan
|
88
|
+
>>> /plan
|
89
|
+
1. Add a new route `/health` to `src/api/routes.py`.
|
90
|
+
2. Implement the health check logic to return a 200 OK status.
|
91
|
+
|
92
|
+
# 4. Execute the first step and review the proposed changes
|
93
|
+
>>> /run
|
94
|
+
|
95
|
+
# 5. If the changes look good, approve them
|
96
|
+
>>> /approve
|
97
|
+
```
|
98
|
+
|
99
|
+
## Agent Commands (TUI)
|
100
|
+
| Command | Description |
|
101
|
+
|---|---|
|
102
|
+
| `/task <goal>` | Sets the high-level goal for the agent. |
|
103
|
+
| `/plan [management]` | Generates a plan, or opens an interactive TUI to edit/add/remove steps. |
|
104
|
+
| `/run [all]` | Executes the next step, or all remaining steps with `/run all`. |
|
105
|
+
| `/approve` | Interactively select and apply changes from the last run. |
|
106
|
+
| `/diff [all \| file]`| Shows the full diff for the proposed changes. |
|
107
|
+
| `/retry <feedback>`| Retries the last step with new feedback. |
|
108
|
+
| `/skip` | Skips the current step and moves to the next. |
|
109
|
+
| `/revert` | Reverts the changes from the last `/approve`. |
|
110
|
+
| `/context <scope>` | Replaces the context with files from a scope. |
|
111
|
+
| `/scopes` | Opens an interactive menu to manage your saved scopes. |
|
112
|
+
| `/ask <question>` | Ask a question about the plan or code context. |
|
113
|
+
| `/refine <feedback>`| Refine the plan based on new feedback or ideas. |
|
114
|
+
| `/show [state]` | Shows the current state (goal, plan, context, history, step). |
|
115
|
+
| `/settings` | Configure the model and API keys. |
|
116
|
+
| `/help` | Shows the detailed help message. |
|
117
|
+
| `/exit` | Exits the agent session. |
|
118
|
+
|
119
|
+
## Headless Mode Flags
|
120
|
+
For scripting or single-shot edits, you can still use the original flags.
|
121
|
+
|
122
|
+
| Flag | Alias | Description |
|
123
|
+
|---|---|---|
|
124
|
+
| `-p`, `--patch` | **Main action:** Query the LLM and apply file changes. |
|
125
|
+
| `-t`, `--task` | Provide a specific instruction to the LLM. |
|
126
|
+
| `-s`, `--scope` | Use a static scope from `scopes.py` or a dynamic one. |
|
127
|
+
| `-r`, `--recipe` | Use a predefined task from `recipes.py`. |
|
128
|
+
| `-in`, `--interactive` | Interactively build the context by selecting files. |
|
129
|
+
| `-i`, `--init` | Create a new `scopes.py` file. |
|
130
|
+
| `-sl`, `--list-scopes`| List all available scopes. |
|
131
|
+
| `-ff`, `--from-file` | Apply patches from a local file. |
|
132
|
+
| `-fc`, `--from-clipboard` | Apply patches from the system clipboard. |
|
133
|
+
| `-m`, `--model` | Specify a different model (default: `gemini/gemini-1.5-flash`). |
|
134
|
+
| `-v`, `--voice` | Enable voice interaction (requires voice dependencies). |
|
135
|
+
|
136
|
+
## Setup
|
137
|
+
PatchLLM uses [LiteLLM](https://github.com/BerriAI/litellm). Set up your API keys (e.g., `OPENAI_API_KEY`, `GEMINI_API_KEY`) in a `.env` file.
|
138
|
+
|
139
|
+
The interactive TUI requires `prompt_toolkit` and `InquirerPy`. You can install all core dependencies with:```bash
|
140
|
+
pip install -r requirements.txt
|
141
|
+
```
|
142
|
+
|
143
|
+
Optional features require extra dependencies:
|
144
|
+
```bash
|
145
|
+
# For URL support in scopes
|
146
|
+
pip install "patchllm[url]"
|
147
|
+
|
148
|
+
# For voice commands (in headless mode)
|
149
|
+
pip install "patchllm[voice]"
|
150
|
+
```
|
151
|
+
|
152
|
+
## License
|
153
|
+
This project is licensed under the MIT License.
|
patchllm-1.0.0/README.md
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
<p align="center">
|
2
|
+
<picture>
|
3
|
+
<source srcset="./assets/logo_dark.png" media="(prefers-color-scheme: dark)">
|
4
|
+
<source srcset="./assets/logo_light.png" media="(prefers-color-scheme: light)">
|
5
|
+
<img src="./assets/logo_light.png" alt="PatchLLM Logo" height="200">
|
6
|
+
</picture>
|
7
|
+
</p>
|
8
|
+
|
9
|
+
## About
|
10
|
+
PatchLLM is an interactive command-line agent that helps you modify your codebase. It uses an LLM to plan and execute changes, allowing you to review and approve every step.
|
11
|
+
|
12
|
+
## Key Features
|
13
|
+
- **Interactive Planning:** The agent proposes a step-by-step plan before writing any code. You stay in control.
|
14
|
+
- **Dynamic Context:** Build and modify the code context on-the-fly using powerful scope definitions (`@git:staged`, `@dir:src`, etc.).
|
15
|
+
- **Mobile-First TUI:** A clean, command-driven interface with autocompletion makes it easy to use on any device.
|
16
|
+
- **Resilient Sessions:** Automatically saves your progress so you can resume if you get disconnected.
|
17
|
+
|
18
|
+
## Getting Started
|
19
|
+
|
20
|
+
**1. Initialize a configuration file (optional):**
|
21
|
+
This creates a `scopes.py` file to define reusable file collections.
|
22
|
+
```bash
|
23
|
+
patchllm --init
|
24
|
+
```
|
25
|
+
|
26
|
+
**2. Start the Agent:**
|
27
|
+
Running `patchllm` with no arguments drops you into the interactive agentic TUI.
|
28
|
+
```bash
|
29
|
+
patchllm
|
30
|
+
```
|
31
|
+
|
32
|
+
**3. Follow the Agent Workflow:**
|
33
|
+
Inside the TUI, you direct the agent with simple slash commands.
|
34
|
+
|
35
|
+
```bash
|
36
|
+
# 1. Set the goal
|
37
|
+
>>> /task Add a health check endpoint to the API
|
38
|
+
|
39
|
+
# 2. Build the context
|
40
|
+
>>> /context @dir:src/api
|
41
|
+
|
42
|
+
# 3. Ask the agent to generate a plan
|
43
|
+
>>> /plan
|
44
|
+
1. Add a new route `/health` to `src/api/routes.py`.
|
45
|
+
2. Implement the health check logic to return a 200 OK status.
|
46
|
+
|
47
|
+
# 4. Execute the first step and review the proposed changes
|
48
|
+
>>> /run
|
49
|
+
|
50
|
+
# 5. If the changes look good, approve them
|
51
|
+
>>> /approve
|
52
|
+
```
|
53
|
+
|
54
|
+
## Agent Commands (TUI)
|
55
|
+
| Command | Description |
|
56
|
+
|---|---|
|
57
|
+
| `/task <goal>` | Sets the high-level goal for the agent. |
|
58
|
+
| `/plan [management]` | Generates a plan, or opens an interactive TUI to edit/add/remove steps. |
|
59
|
+
| `/run [all]` | Executes the next step, or all remaining steps with `/run all`. |
|
60
|
+
| `/approve` | Interactively select and apply changes from the last run. |
|
61
|
+
| `/diff [all \| file]`| Shows the full diff for the proposed changes. |
|
62
|
+
| `/retry <feedback>`| Retries the last step with new feedback. |
|
63
|
+
| `/skip` | Skips the current step and moves to the next. |
|
64
|
+
| `/revert` | Reverts the changes from the last `/approve`. |
|
65
|
+
| `/context <scope>` | Replaces the context with files from a scope. |
|
66
|
+
| `/scopes` | Opens an interactive menu to manage your saved scopes. |
|
67
|
+
| `/ask <question>` | Ask a question about the plan or code context. |
|
68
|
+
| `/refine <feedback>`| Refine the plan based on new feedback or ideas. |
|
69
|
+
| `/show [state]` | Shows the current state (goal, plan, context, history, step). |
|
70
|
+
| `/settings` | Configure the model and API keys. |
|
71
|
+
| `/help` | Shows the detailed help message. |
|
72
|
+
| `/exit` | Exits the agent session. |
|
73
|
+
|
74
|
+
## Headless Mode Flags
|
75
|
+
For scripting or single-shot edits, you can still use the original flags.
|
76
|
+
|
77
|
+
| Flag | Alias | Description |
|
78
|
+
|---|---|---|
|
79
|
+
| `-p`, `--patch` | **Main action:** Query the LLM and apply file changes. |
|
80
|
+
| `-t`, `--task` | Provide a specific instruction to the LLM. |
|
81
|
+
| `-s`, `--scope` | Use a static scope from `scopes.py` or a dynamic one. |
|
82
|
+
| `-r`, `--recipe` | Use a predefined task from `recipes.py`. |
|
83
|
+
| `-in`, `--interactive` | Interactively build the context by selecting files. |
|
84
|
+
| `-i`, `--init` | Create a new `scopes.py` file. |
|
85
|
+
| `-sl`, `--list-scopes`| List all available scopes. |
|
86
|
+
| `-ff`, `--from-file` | Apply patches from a local file. |
|
87
|
+
| `-fc`, `--from-clipboard` | Apply patches from the system clipboard. |
|
88
|
+
| `-m`, `--model` | Specify a different model (default: `gemini/gemini-1.5-flash`). |
|
89
|
+
| `-v`, `--voice` | Enable voice interaction (requires voice dependencies). |
|
90
|
+
|
91
|
+
## Setup
|
92
|
+
PatchLLM uses [LiteLLM](https://github.com/BerriAI/litellm). Set up your API keys (e.g., `OPENAI_API_KEY`, `GEMINI_API_KEY`) in a `.env` file.
|
93
|
+
|
94
|
+
The interactive TUI requires `prompt_toolkit` and `InquirerPy`. You can install all core dependencies with:```bash
|
95
|
+
pip install -r requirements.txt
|
96
|
+
```
|
97
|
+
|
98
|
+
Optional features require extra dependencies:
|
99
|
+
```bash
|
100
|
+
# For URL support in scopes
|
101
|
+
pip install "patchllm[url]"
|
102
|
+
|
103
|
+
# For voice commands (in headless mode)
|
104
|
+
pip install "patchllm[voice]"
|
105
|
+
```
|
106
|
+
|
107
|
+
## License
|
108
|
+
This project is licensed under the MIT License.
|
File without changes
|
File without changes
|
@@ -0,0 +1,73 @@
|
|
1
|
+
import subprocess
|
2
|
+
from rich.console import Console
|
3
|
+
from rich.panel import Panel
|
4
|
+
|
5
|
+
console = Console()
|
6
|
+
|
7
|
+
def run_tests():
|
8
|
+
"""
|
9
|
+
Runs tests using pytest and displays the output.
|
10
|
+
"""
|
11
|
+
console.print("\n--- Running Tests ---", style="bold yellow")
|
12
|
+
try:
|
13
|
+
process = subprocess.run(
|
14
|
+
["pytest"],
|
15
|
+
capture_output=True,
|
16
|
+
text=True,
|
17
|
+
check=False # We don't want to crash if tests fail
|
18
|
+
)
|
19
|
+
|
20
|
+
output = process.stdout + process.stderr
|
21
|
+
|
22
|
+
if process.returncode == 0:
|
23
|
+
title = "[bold green]✅ Tests Passed[/bold green]"
|
24
|
+
border_style = "green"
|
25
|
+
else:
|
26
|
+
title = "[bold red]❌ Tests Failed[/bold red]"
|
27
|
+
border_style = "red"
|
28
|
+
|
29
|
+
console.print(Panel(output, title=title, border_style=border_style, expand=True))
|
30
|
+
|
31
|
+
except FileNotFoundError:
|
32
|
+
console.print("❌ 'pytest' command not found. Is it installed and in your PATH?", style="red")
|
33
|
+
except Exception as e:
|
34
|
+
console.print(f"❌ An unexpected error occurred while running tests: {e}", style="red")
|
35
|
+
|
36
|
+
|
37
|
+
def stage_files(files_to_stage: list[str] = None):
|
38
|
+
"""
|
39
|
+
Stages files using git. If no files are specified, stages all changes.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
files_to_stage (list[str], optional): A list of specific files to stage. Defaults to None.
|
43
|
+
"""
|
44
|
+
command = ["git", "add"]
|
45
|
+
action_desc = "all changes"
|
46
|
+
if files_to_stage:
|
47
|
+
command.extend(files_to_stage)
|
48
|
+
action_desc = f"{len(files_to_stage)} file(s)"
|
49
|
+
else:
|
50
|
+
command.append(".")
|
51
|
+
|
52
|
+
console.print(f"\n--- Staging {action_desc} ---", style="bold yellow")
|
53
|
+
try:
|
54
|
+
process = subprocess.run(
|
55
|
+
command,
|
56
|
+
capture_output=True,
|
57
|
+
text=True,
|
58
|
+
check=True
|
59
|
+
)
|
60
|
+
|
61
|
+
output = process.stdout + process.stderr
|
62
|
+
if output:
|
63
|
+
console.print(output, style="dim")
|
64
|
+
|
65
|
+
console.print("✅ Files staged successfully.", style="green")
|
66
|
+
|
67
|
+
except FileNotFoundError:
|
68
|
+
console.print("❌ 'git' command not found. Is it installed and in your PATH?", style="red")
|
69
|
+
except subprocess.CalledProcessError as e:
|
70
|
+
console.print("❌ Failed to stage files.", style="red")
|
71
|
+
console.print(e.stderr)
|
72
|
+
except Exception as e:
|
73
|
+
console.print(f"❌ An unexpected error occurred while staging files: {e}", style="red")
|
@@ -0,0 +1,57 @@
|
|
1
|
+
from ..llm import run_llm_query
|
2
|
+
from ..parser import summarize_changes, get_diff_for_file, parse_change_summary
|
3
|
+
|
4
|
+
def execute_step(step_instruction: str, history: list[dict], context: str | None, context_images: list | None, model_name: str) -> dict | None:
|
5
|
+
"""
|
6
|
+
Executes a single step of the plan by calling the LLM.
|
7
|
+
|
8
|
+
Args:
|
9
|
+
step_instruction (str): The instruction for the current step.
|
10
|
+
history (list[dict]): The full conversation history.
|
11
|
+
context (str | None): The file context for the LLM.
|
12
|
+
context_images (list | None): A list of image data dictionaries for multimodal context.
|
13
|
+
model_name (str): The name of the LLM to use.
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
A dictionary containing the instruction, response, and diffs, or None if it fails.
|
17
|
+
"""
|
18
|
+
|
19
|
+
prompt_text = f"## Current Task:\n{step_instruction}"
|
20
|
+
if context:
|
21
|
+
prompt_text = f"## Context:\n{context}\n\n---\n\n{prompt_text}"
|
22
|
+
|
23
|
+
user_content = [{"type": "text", "text": prompt_text}]
|
24
|
+
|
25
|
+
if context_images:
|
26
|
+
for image_info in context_images:
|
27
|
+
user_content.append({
|
28
|
+
"type": "image_url",
|
29
|
+
"image_url": {
|
30
|
+
"url": f"data:{image_info['mime_type']};base64,{image_info['content_base64']}"
|
31
|
+
}
|
32
|
+
})
|
33
|
+
|
34
|
+
# Create a temporary message history for this specific call
|
35
|
+
messages = history + [{"role": "user", "content": user_content}]
|
36
|
+
|
37
|
+
llm_response = run_llm_query(messages, model_name)
|
38
|
+
|
39
|
+
if not llm_response:
|
40
|
+
return None
|
41
|
+
|
42
|
+
change_summary = parse_change_summary(llm_response)
|
43
|
+
summary = summarize_changes(llm_response)
|
44
|
+
all_files = summary.get("modified", []) + summary.get("created", [])
|
45
|
+
|
46
|
+
diffs = []
|
47
|
+
for file_path in all_files:
|
48
|
+
diff_text = get_diff_for_file(file_path, llm_response)
|
49
|
+
diffs.append({"file_path": file_path, "diff_text": diff_text})
|
50
|
+
|
51
|
+
return {
|
52
|
+
"instruction": step_instruction,
|
53
|
+
"llm_response": llm_response,
|
54
|
+
"summary": summary,
|
55
|
+
"diffs": diffs,
|
56
|
+
"change_summary": change_summary,
|
57
|
+
}
|
@@ -0,0 +1,76 @@
|
|
1
|
+
import re
|
2
|
+
from ..llm import run_llm_query
|
3
|
+
|
4
|
+
def _get_planning_prompt(goal: str, context_tree: str) -> list[dict]:
|
5
|
+
"""Constructs the initial prompt for the planning phase."""
|
6
|
+
|
7
|
+
system_prompt = (
|
8
|
+
"You are an expert software architect. Your task is to create a high-level, milestone-focused plan to "
|
9
|
+
"accomplish a user's goal. Break down the goal into logical, sequential steps that represent significant "
|
10
|
+
"pieces of functionality or architectural changes."
|
11
|
+
"\n\n"
|
12
|
+
"IMPORTANT RULES:\n"
|
13
|
+
"- DO NOT list individual file modifications. Instead, group related changes into a single milestone.\n"
|
14
|
+
"- For example, instead of a plan like '1. Add route to api.py, 2. Create logic in services.py', a better, "
|
15
|
+
"milestone-focused step would be '1. Implement the user authentication endpoint, including routes and server actions'.\n"
|
16
|
+
"- Do not write any code or implementation details in the plan.\n"
|
17
|
+
"- Each step should be a clear, actionable instruction for a developer.\n"
|
18
|
+
"- The final plan must be a numbered list."
|
19
|
+
)
|
20
|
+
|
21
|
+
user_prompt = (
|
22
|
+
"Based on my goal and the project structure below, create your plan.\n\n"
|
23
|
+
f"## Project Structure:\n```\n{context_tree}\n```\n\n"
|
24
|
+
f"## Goal:\n{goal}"
|
25
|
+
)
|
26
|
+
|
27
|
+
return [
|
28
|
+
{"role": "system", "content": system_prompt},
|
29
|
+
{"role": "user", "content": user_prompt}
|
30
|
+
]
|
31
|
+
|
32
|
+
def _get_refine_prompt(history: list[dict], feedback: str) -> list[dict]:
|
33
|
+
"""Constructs the prompt for refining an existing plan."""
|
34
|
+
refine_instruction = (
|
35
|
+
"The user has provided feedback or a new idea on the plan you created. "
|
36
|
+
"Carefully review the entire conversation and their latest feedback. "
|
37
|
+
"Your task is to generate a new, complete, and improved step-by-step plan that incorporates their feedback. "
|
38
|
+
"The new plan should be a single, cohesive, numbered list. Do not just add to the old plan; create a new one from scratch."
|
39
|
+
f"\n\n## User Feedback:\n{feedback}"
|
40
|
+
)
|
41
|
+
|
42
|
+
return history + [{"role": "user", "content": refine_instruction}]
|
43
|
+
|
44
|
+
def parse_plan_from_response(response_text: str) -> list[str] | None:
|
45
|
+
"""Finds all lines that start with a number and a period (e.g., "1.", "2.")."""
|
46
|
+
if not response_text:
|
47
|
+
return None
|
48
|
+
# This is more robust than splitting by newline.
|
49
|
+
plan = re.findall(r"^\s*\d+\.\s+(.*)", response_text, re.MULTILINE)
|
50
|
+
return plan if plan else None
|
51
|
+
|
52
|
+
def generate_plan_and_history(goal: str, context_tree: str, model_name: str) -> tuple[list[dict], str | None]:
|
53
|
+
"""
|
54
|
+
Calls the LLM to generate an initial plan and returns the history and response.
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
A tuple containing the initial planning history (list of messages) and the LLM's raw response text.
|
58
|
+
"""
|
59
|
+
messages = _get_planning_prompt(goal, context_tree)
|
60
|
+
response_text = run_llm_query(messages, model_name)
|
61
|
+
|
62
|
+
if response_text:
|
63
|
+
messages.append({"role": "assistant", "content": response_text})
|
64
|
+
|
65
|
+
return messages, response_text
|
66
|
+
|
67
|
+
def generate_refined_plan(history: list[dict], feedback: str, model_name: str) -> str | None:
|
68
|
+
"""
|
69
|
+
Calls the LLM to refine a plan based on conversation history and new feedback.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
The LLM's raw response text containing the new plan.
|
73
|
+
"""
|
74
|
+
messages = _get_refine_prompt(history, feedback)
|
75
|
+
response_text = run_llm_query(messages, model_name)
|
76
|
+
return response_text
|