patchllm 0.1.1__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
patchllm/__init__.py CHANGED
@@ -1 +0,0 @@
1
- from .main import Assistant
patchllm/context.py CHANGED
@@ -1,8 +1,11 @@
1
- import os
2
1
  import glob
3
2
  import textwrap
4
- import sys
3
+ import subprocess
4
+ import shutil
5
5
  from pathlib import Path
6
+ from rich.console import Console
7
+
8
+ console = Console()
6
9
 
7
10
  # --- Default Settings & Templates ---
8
11
 
@@ -29,12 +32,19 @@ BASE_TEMPLATE = textwrap.dedent('''
29
32
  ```
30
33
  {{source_tree}}
31
34
  ```
32
-
35
+ {{url_contents}}
33
36
  Relevant Files:
34
37
  ---------------
35
38
  {{files_content}}
36
39
  ''')
37
40
 
41
+ URL_CONTENT_TEMPLATE = textwrap.dedent('''
42
+ URL Contents:
43
+ -------------
44
+ {{content}}
45
+ ''')
46
+
47
+
38
48
  # --- Helper Functions (File Discovery, Filtering, Tree Generation) ---
39
49
 
40
50
  def find_files(base_path: Path, include_patterns: list[str], exclude_patterns: list[str] | None = None) -> list[Path]:
@@ -70,11 +80,10 @@ def filter_files_by_keyword(file_paths: list[Path], search_words: list[str]) ->
70
80
  matching_files = []
71
81
  for file_path in file_paths:
72
82
  try:
73
- # Using pathlib's read_text for cleaner code
74
83
  if any(word in file_path.read_text(encoding='utf-8', errors='ignore') for word in search_words):
75
84
  matching_files.append(file_path)
76
85
  except Exception as e:
77
- print(f"Warning: Could not read {file_path} for keyword search: {e}", file=sys.stderr)
86
+ console.print(f"⚠️ Could not read {file_path} for keyword search: {e}", style="yellow")
78
87
  return matching_files
79
88
 
80
89
 
@@ -86,11 +95,8 @@ def generate_source_tree(base_path: Path, file_paths: list[Path]) -> str:
86
95
  tree = {}
87
96
  for path in file_paths:
88
97
  try:
89
- # Create a path relative to the intended base_path for the tree structure
90
98
  rel_path = path.relative_to(base_path)
91
99
  except ValueError:
92
- # This occurs if a file (from an absolute pattern) is outside the base_path.
93
- # In this case, we use the absolute path as a fallback.
94
100
  rel_path = path
95
101
 
96
102
  level = tree
@@ -112,25 +118,80 @@ def generate_source_tree(base_path: Path, file_paths: list[Path]) -> str:
112
118
  return f"{base_path.name}\n" + "\n".join(_format_tree(tree))
113
119
 
114
120
 
121
+ def fetch_and_process_urls(urls: list[str]) -> str:
122
+ """Downloads and converts a list of URLs to text, returning a formatted string."""
123
+ if not urls:
124
+ return ""
125
+
126
+ try:
127
+ import html2text
128
+ except ImportError:
129
+ console.print("⚠️ To use the URL feature, please install the required extras:", style="yellow")
130
+ console.print(" pip install patchllm[url]", style="cyan")
131
+ return ""
132
+
133
+ downloader = None
134
+ if shutil.which("curl"):
135
+ downloader = "curl"
136
+ elif shutil.which("wget"):
137
+ downloader = "wget"
138
+
139
+ if not downloader:
140
+ console.print("⚠️ Cannot fetch URL content: 'curl' or 'wget' not found in PATH.", style="yellow")
141
+ return ""
142
+
143
+ h = html2text.HTML2Text()
144
+ h.ignore_links = True
145
+ h.ignore_images = True
146
+
147
+ all_url_contents = []
148
+
149
+ console.print("\n--- Fetching URL Content... ---", style="bold")
150
+ for url in urls:
151
+ try:
152
+ console.print(f"Fetching [cyan]{url}[/cyan]...")
153
+ if downloader == "curl":
154
+ command = ["curl", "-s", "-L", url]
155
+ else: # wget
156
+ command = ["wget", "-q", "-O", "-", url]
157
+
158
+ result = subprocess.run(command, capture_output=True, text=True, check=True, timeout=15)
159
+ html_content = result.stdout
160
+ text_content = h.handle(html_content)
161
+ all_url_contents.append(f"<url_content:{url}>\n```\n{text_content}\n```")
162
+
163
+ except subprocess.CalledProcessError as e:
164
+ console.print(f"❌ Failed to fetch {url}: {e.stderr}", style="red")
165
+ except subprocess.TimeoutExpired:
166
+ console.print(f"❌ Failed to fetch {url}: Request timed out.", style="red")
167
+ except Exception as e:
168
+ console.print(f"❌ An unexpected error occurred while fetching {url}: {e}", style="red")
169
+
170
+ if not all_url_contents:
171
+ return ""
172
+
173
+ content_str = "\n\n".join(all_url_contents)
174
+ return URL_CONTENT_TEMPLATE.replace("{{content}}", content_str)
175
+
115
176
  # --- Main Context Building Function ---
116
177
 
117
- def build_context(config: dict) -> dict | None:
178
+ def build_context(scope: dict) -> dict | None:
118
179
  """
119
- Builds the context string from files specified in the config.
180
+ Builds the context string from files specified in the scope.
120
181
 
121
182
  Args:
122
- config (dict): The configuration for file searching.
183
+ scope (dict): The scope for file searching.
123
184
 
124
185
  Returns:
125
186
  dict: A dictionary with the source tree and formatted context, or None.
126
187
  """
127
- # Resolve the base path immediately to get a predictable absolute path.
128
- base_path = Path(config.get("path", ".")).resolve()
188
+ base_path = Path(scope.get("path", ".")).resolve()
129
189
 
130
- include_patterns = config.get("include_patterns", [])
131
- exclude_patterns = config.get("exclude_patterns", [])
132
- exclude_extensions = config.get("exclude_extensions", DEFAULT_EXCLUDE_EXTENSIONS)
133
- search_words = config.get("search_words", [])
190
+ include_patterns = scope.get("include_patterns", [])
191
+ exclude_patterns = scope.get("exclude_patterns", [])
192
+ exclude_extensions = scope.get("exclude_extensions", DEFAULT_EXCLUDE_EXTENSIONS)
193
+ search_words = scope.get("search_words", [])
194
+ urls = scope.get("urls", [])
134
195
 
135
196
  # Step 1: Find files
136
197
  relevant_files = find_files(base_path, include_patterns, exclude_patterns)
@@ -140,20 +201,18 @@ def build_context(config: dict) -> dict | None:
140
201
  norm_ext = {ext.lower() for ext in exclude_extensions}
141
202
  relevant_files = [p for p in relevant_files if p.suffix.lower() not in norm_ext]
142
203
  if count_before_ext > len(relevant_files):
143
- print(f"Filtered {count_before_ext - len(relevant_files)} files by extension.")
204
+ console.print(f"Filtered {count_before_ext - len(relevant_files)} files by extension.", style="cyan")
144
205
 
145
206
  # Step 3: Filter by keyword
146
207
  if search_words:
147
208
  count_before_kw = len(relevant_files)
148
209
  relevant_files = filter_files_by_keyword(relevant_files, search_words)
149
- print(f"Filtered {count_before_kw - len(relevant_files)} files by keyword search.")
210
+ console.print(f"Filtered {count_before_kw - len(relevant_files)} files by keyword search.", style="cyan")
150
211
 
151
- if not relevant_files:
152
- print("\nNo files matched the specified criteria.")
212
+ if not relevant_files and not urls:
213
+ console.print("\n⚠️ No files or URLs matched the specified criteria.", style="yellow")
153
214
  return None
154
215
 
155
- print(f"\nFinal count of relevant files: {len(relevant_files)}.")
156
-
157
216
  # Generate source tree and file content blocks
158
217
  source_tree_str = generate_source_tree(base_path, relevant_files)
159
218
 
@@ -164,12 +223,16 @@ def build_context(config: dict) -> dict | None:
164
223
  content = file_path.read_text(encoding='utf-8')
165
224
  file_contents.append(f"<file_path:{display_path}>\n```\n{content}\n```")
166
225
  except Exception as e:
167
- print(f"Warning: Could not read file {file_path}: {e}", file=sys.stderr)
226
+ console.print(f"⚠️ Could not read file {file_path}: {e}", style="yellow")
168
227
 
169
228
  files_content_str = "\n\n".join(file_contents)
170
229
 
230
+ # Fetch and process URL contents
231
+ url_contents_str = fetch_and_process_urls(urls)
232
+
171
233
  # Assemble the final context using the base template
172
234
  final_context = BASE_TEMPLATE.replace("{{source_tree}}", source_tree_str)
235
+ final_context = final_context.replace("{{url_contents}}", url_contents_str)
173
236
  final_context = final_context.replace("{{files_content}}", files_content_str)
174
237
 
175
238
  return {"tree": source_tree_str, "context": final_context}
patchllm/listener.py CHANGED
@@ -1,11 +1,13 @@
1
1
  import speech_recognition as sr
2
2
  import pyttsx3
3
+ from rich.console import Console
3
4
 
5
+ console = Console()
4
6
  recognizer = sr.Recognizer()
5
7
  tts_engine = pyttsx3.init()
6
8
 
7
9
  def speak(text):
8
- print("🤖 Speaking:", text)
10
+ console.print(f"🤖 Speaking: {text}", style="magenta")
9
11
  tts_engine.say(text)
10
12
  tts_engine.runAndWait()
11
13
 
@@ -13,11 +15,11 @@ def listen(prompt=None, timeout=5):
13
15
  with sr.Microphone() as source:
14
16
  if prompt:
15
17
  speak(prompt)
16
- print("🎙 Listening...")
18
+ console.print("🎙 Listening...", style="cyan")
17
19
  try:
18
20
  audio = recognizer.listen(source, timeout=timeout)
19
21
  text = recognizer.recognize_google(audio)
20
- print(f"🗣 Recognized: {text}")
22
+ console.print(f"🗣 Recognized: {text}", style="cyan")
21
23
  return text
22
24
  except sr.WaitTimeoutError:
23
25
  speak("No speech detected.")
@@ -25,4 +27,4 @@ def listen(prompt=None, timeout=5):
25
27
  speak("Sorry, I didn’t catch that.")
26
28
  except sr.RequestError:
27
29
  speak("Speech recognition failed. Check your internet.")
28
- return None
30
+ return None
patchllm/main.py CHANGED
@@ -1,235 +1,326 @@
1
- import sys
1
+ import textwrap
2
+ import argparse
3
+ import litellm
4
+ import pprint
5
+ import os
6
+ from dotenv import load_dotenv
7
+ from rich.console import Console
8
+ from rich.panel import Panel
2
9
 
3
10
  from .context import build_context
4
11
  from .parser import paste_response
5
12
  from .utils import load_from_py_file
6
- import textwrap
7
- import argparse
8
- import litellm
9
13
 
10
- from dotenv import load_dotenv
14
+ console = Console()
11
15
 
12
- load_dotenv()
16
+ # --- Core Functions ---
13
17
 
14
- class Assistant:
18
+ def collect_context(scope_name, scopes):
19
+ """Builds the code context from a provided scope dictionary."""
20
+ console.print("\n--- Building Code Context... ---", style="bold")
21
+ if not scopes:
22
+ raise FileNotFoundError("Could not find a 'scopes.py' file.")
23
+ selected_scope = scopes.get(scope_name)
24
+ if selected_scope is None:
25
+ raise KeyError(f"Context scope '{scope_name}' not found in provided scopes file.")
26
+
27
+ context_object = build_context(selected_scope)
28
+ if context_object:
29
+ tree, context = context_object.values()
30
+ console.print("--- Context Building Finished. The following files were extracted ---", style="bold")
31
+ console.print(tree)
32
+ return context
33
+ else:
34
+ console.print("--- Context Building Failed (No files found) ---", style="yellow")
35
+ return None
36
+
37
+ def run_llm_query(task_instructions, model_name, history, context=None):
15
38
  """
16
- An assistant that builds context, interacts with an LLM, and applies code changes.
39
+ Assembles the final prompt, sends it to the LLM, and returns the response.
17
40
  """
18
- def __init__(
19
- self,
20
- model_name="gemini/gemini-2.5-flash",
21
- configs: dict = None,
22
- configs_file = "./configs.py",
23
- ):
24
- """
25
- Initializes the Assistant.
26
- Args:
27
- model_name (str): The alias for the generative model to use (must be a litellm supported model string).
28
- configs (Dict[str]): A dictionary of configurations to use for building the code context.
29
- configs_file (str): The path to the configurations file.
30
- """
31
- self.model_name = model_name
32
- if configs:
33
- self.configs = configs
34
- else:
35
- self.configs = load_from_py_file(configs_file, "configs")
36
- system_prompt = textwrap.dedent("""
37
- You are an expert pair programmer. Your purpose is to help users by modifying files based on their instructions.
38
-
39
- Follow these rules strictly:
40
- Your output should be a single file including all the updated files. For each file-block:
41
- 1. Only include code for files that need to be updated / edited.
42
- 2. For updated files, do not exclude any code even if it is unchanged code; assume the file code will be copy-pasted full in the file.
43
- 3. Do not include verbose inline comments explaining what every small change does. Try to keep comments concise but informative, if any.
44
- 4. Only update the relevant parts of each file relative to the provided task; do not make irrelevant edits even if you notice areas of improvements elsewhere.
45
- 5. Do not use diffs.
46
- 6. Make sure each file-block is returned in the following exact format. No additional text, comments, or explanations should be outside these blocks.
47
-
48
- Expected format for a modified or new file:
49
- <file_path:/absolute/path/to/your/file.py>
50
- ```python
51
- # The full, complete content of /absolute/path/to/your/file.py goes here.
52
- def example_function():
53
- return "Hello, World!"
54
- ```
55
-
56
- Example of multiple files:
57
- <file_path:/home/user/project/src/main.py>
58
- ```python
59
- print("Main application start")
60
- ```
61
-
62
- <file_path:/home/user/project/tests/test_main.py>
63
- ```python
64
- def test_main():
65
- assert True
66
- ```
67
- """)
68
- self.history = [{"role": "system", "content": system_prompt}]
69
-
70
- def collect(self, config_name):
71
- """Builds the code context from a provided configuration dictionary."""
72
- print("\n--- Building Code Context... ---")
73
- selected_config = self.configs.get(config_name)
74
- if selected_config is None:
75
- raise KeyError(f"Context config '{config_name}' not found in provided configs file.")
76
- context_object = build_context(selected_config)
77
- if context_object:
78
- tree, context = context_object.values()
79
- print("--- Context Building Finished. The following files were extracted ---", file=sys.stderr)
80
- print(tree)
81
- return context
82
- else:
83
- print("--- Context Building Failed (No files found) ---", file=sys.stderr)
41
+ console.print("\n--- Sending Prompt to LLM... ---", style="bold")
42
+ final_prompt = task_instructions
43
+ if context:
44
+ final_prompt = f"{context}\n\n{task_instructions}"
45
+
46
+ history.append({"role": "user", "content": final_prompt})
47
+
48
+ try:
49
+ with console.status("[bold cyan]Waiting for LLM response...", spinner="dots"):
50
+ response = litellm.completion(model=model_name, messages=history)
51
+
52
+ assistant_response_content = response.choices[0].message.content
53
+ history.append({"role": "assistant", "content": assistant_response_content})
54
+
55
+ if not assistant_response_content or not assistant_response_content.strip():
56
+ console.print("⚠️ Response is empty. Nothing to process.", style="yellow")
84
57
  return None
58
+
59
+ return assistant_response_content
85
60
 
86
- def update(self, task_instructions, context=None):
87
- """
88
- Assembles the final prompt and sends it to the LLM to generate code,
89
- then in-place update the files from the response.
90
- Args:
91
- task_instructions (str): Specific instructions for this run.
92
- context (str, optional): The code context. If None, only the task is sent.
93
- """
94
- print("\n--- Sending Prompt to LLM... ---")
95
- final_prompt = task_instructions
96
- if context:
97
- final_prompt = f"{context}\n\n{task_instructions}"
61
+ except Exception as e:
62
+ history.pop() # Keep history clean on error
63
+ raise RuntimeError(f"An error occurred while communicating with the LLM via litellm: {e}") from e
64
+
65
+ def write_to_file(file_path, content):
66
+ """Utility function to write content to a file."""
67
+ console.print(f"Writing to {file_path}..", style="cyan")
68
+ try:
69
+ with open(file_path, "w", encoding="utf-8") as file:
70
+ file.write(content)
71
+ console.print(f'✅ Content saved to {file_path}', style="green")
72
+ except Exception as e:
73
+ raise RuntimeError(f"Failed to write to file {file_path}: {e}") from e
74
+
75
+ def read_from_file(file_path):
76
+ """Utility function to read and return the content of a file."""
77
+ console.print(f"Importing from {file_path}..", style="cyan")
78
+ try:
79
+ with open(file_path, "r", encoding="utf-8") as file:
80
+ content = file.read()
81
+ console.print("✅ Finished reading file.", style="green")
82
+ return content
83
+ except Exception as e:
84
+ raise RuntimeError(f"Failed to read from file {file_path}: {e}") from e
85
+
86
+ def create_new_scope(scopes, scopes_file_str):
87
+ """Interactively creates a new scope and saves it to the specified scopes file."""
88
+ console.print(f"\n--- Creating a new scope in '{scopes_file_str}' ---", style="bold")
89
+
90
+ try:
91
+ name = console.input("[bold]Enter a name for the new scope: [/]").strip()
92
+ if not name:
93
+ console.print("❌ Scope name cannot be empty.", style="red")
94
+ return
95
+
96
+ if name in scopes:
97
+ overwrite = console.input(f"Scope '[bold]{name}[/]' already exists. Overwrite? (y/n): ").lower()
98
+ if overwrite not in ['y', 'yes']:
99
+ console.print("Operation cancelled.", style="yellow")
100
+ return
101
+
102
+ path = console.input("[bold]Enter the base path[/] (e.g., '.' for current directory): ").strip() or "."
98
103
 
99
- self.history.append({"role": "user", "content": final_prompt})
104
+ console.print("\nEnter comma-separated glob patterns for files to include.")
105
+ include_raw = console.input('[cyan]> (e.g., "[bold]**/*.py, src/**/*.js[/]"): [/]').strip()
106
+ include_patterns = [p.strip() for p in include_raw.split(',') if p.strip()]
107
+
108
+ console.print("\nEnter comma-separated glob patterns for files to exclude (optional).")
109
+ exclude_raw = console.input('[cyan]> (e.g., "[bold]**/tests/*, venv/*[/]"): [/]').strip()
110
+ exclude_patterns = [p.strip() for p in exclude_raw.split(',') if p.strip()]
100
111
 
101
- try:
102
- response = litellm.completion(model=self.model_name, messages=self.history)
103
-
104
- # Extract the message content from the response
105
- assistant_response_content = response.choices[0].message.content
106
-
107
- # Add the assistant's response to the history for future context
108
- self.history.append({"role": "assistant", "content": assistant_response_content})
112
+ new_scope_data = {
113
+ "path": path,
114
+ "include_patterns": include_patterns,
115
+ "exclude_patterns": exclude_patterns
116
+ }
109
117
 
110
- if not assistant_response_content or not assistant_response_content.strip():
111
- print("Response is empty. Nothing to paste.")
112
- return
113
-
114
- print("\n--- Updating files ---")
115
- paste_response(assistant_response_content)
116
- print("--- File Update Process Finished ---")
118
+ scopes[name] = new_scope_data
117
119
 
118
- except Exception as e:
119
- # If an error occurs, remove the last user message to keep history clean
120
- self.history.pop()
121
- raise RuntimeError(f"An error occurred while communicating with the LLM via litellm: {e}") from e
122
-
123
- def write(self, file_path, context):
124
- """Utility function to write the context to a file"""
125
- print("Exporting context..")
126
- with open(file_path, "w") as file:
127
- file.write(context)
128
- print(f'Context exported to {file_path.split("/")[-1]}')
129
-
130
- def read(self, file_path):
131
- """Utility function to read and return the content of a file."""
132
- print("Importing from file..")
133
- try:
134
- with open(file_path, "r") as file:
135
- print("Finished reading")
136
- content = file.read()
137
- return content
138
- except Exception as e:
139
- raise RuntimeError(f"Failed to read from file {file_path}: {e}") from e
120
+ with open(scopes_file_str, "w", encoding="utf-8") as f:
121
+ f.write("# scopes.py\n")
122
+ f.write("scopes = ")
123
+ f.write(pprint.pformat(scopes, indent=4))
124
+ f.write("\n")
140
125
 
126
+ console.print(f"\n✅ Successfully created and saved scope '[bold]{name}[/]' in '[bold]{scopes_file_str}[/]'.", style="green")
127
+
128
+ except KeyboardInterrupt:
129
+ console.print("\n\n⚠️ Scope creation cancelled by user.", style="yellow")
130
+ return
131
+
141
132
  def main():
133
+ """
134
+ Main entry point for the patchllm command-line tool.
135
+ """
136
+ load_dotenv()
137
+
138
+ scopes_file_path = os.getenv("PATCHLLM_SCOPES_FILE", "./scopes.py")
139
+
142
140
  parser = argparse.ArgumentParser(
143
- description="Run the Assistant tool to apply code changes using an LLM."
144
- )
145
- parser.add_argument(
146
- "--config",
147
- type=str,
148
- default=None,
149
- help="Name of the config key to use from the configs.py file."
150
- )
151
- parser.add_argument(
152
- "--task",
153
- type=str,
154
- default=None,
155
- help="The task instructions to guide the assistant."
156
- )
157
- parser.add_argument(
158
- "--context-out",
159
- type=str,
160
- default=None,
161
- help="Optional path to export the generated context to a file."
162
- )
163
- parser.add_argument(
164
- "--context-in",
165
- type=str,
166
- default=None,
167
- help="Optional path to import a previously saved context from a file."
168
- )
169
- parser.add_argument(
170
- "--model",
171
- type=str,
172
- default="gemini/gemini-2.5-flash",
173
- help="Optional model name to override the default model."
174
- )
175
- parser.add_argument(
176
- "--from-file",
177
- type=str,
178
- default=None,
179
- help="File path for a file with pre-formatted updates."
180
- )
181
- parser.add_argument(
182
- "--update",
183
- type=str,
184
- default="True",
185
- help="Whether to pass the input context to the llm to update the files."
186
- )
187
- parser.add_argument(
188
- "--voice",
189
- type=str,
190
- default="False",
191
- help="Whether to interact with the script using voice commands."
141
+ description="A CLI tool to apply code changes using an LLM.",
142
+ formatter_class=argparse.RawTextHelpFormatter
192
143
  )
193
144
 
145
+ # --- Group: Core Patching Flow ---
146
+ patch_group = parser.add_argument_group('Core Patching Flow')
147
+ patch_group.add_argument("-s", "--scope", type=str, default=None, help="Name of the scope to use from the scopes file.")
148
+ patch_group.add_argument("-t", "--task", type=str, default=None, help="The task instructions to guide the assistant.")
149
+ patch_group.add_argument("-p", "--patch", action="store_true", help="Query the LLM and directly apply the file updates from the response. Requires --task.")
150
+
151
+ # --- Group: Scope Management ---
152
+ scope_group = parser.add_argument_group('Scope Management')
153
+ scope_group.add_argument("-i", "--init", action="store_true", help="Create a new scope interactively.")
154
+ scope_group.add_argument("-sl", "--list-scopes", action="store_true", help="List all available scopes from the scopes file and exit.")
155
+ scope_group.add_argument("-ss", "--show-scope", type=str, help="Display the settings for a specific scope and exit.")
156
+
157
+ # --- Group: I/O Utils---
158
+ code_io = parser.add_argument_group('Code I/O')
159
+ code_io.add_argument("-co", "--context-out", nargs='?', const="context.md", default=None, help="Export the generated context to a file. Defaults to 'context.md'.")
160
+ code_io.add_argument("-ci", "--context-in", type=str, default=None, help="Import a previously saved context from a file.")
161
+ code_io.add_argument("-tf", "--to-file", nargs='?', const="response.md", default=None, help="Query the LLM and save the response to a file. Requires --task. Defaults to 'response.md'.")
162
+ code_io.add_argument("-tc", "--to-clipboard", action="store_true", help="Query the LLM and save the response to the clipboard. Requires --task.")
163
+ code_io.add_argument("-ff", "--from-file", type=str, default=None, help="Apply code updates directly from a file.")
164
+ code_io.add_argument("-fc", "--from-clipboard", action="store_true", help="Apply code updates directly from the clipboard.")
165
+
166
+ # --- Group: General Options ---
167
+ options_group = parser.add_argument_group('General Options')
168
+ options_group.add_argument("-m", "--model", type=str, default="gemini/gemini-2.5-flash", help="Model name to use (e.g., 'gpt-4o', 'claude-3-sonnet').")
169
+ options_group.add_argument("-v", "--voice", type=str, default="False", help="Enable voice interaction for providing task instructions. (True/False)")
170
+
194
171
  args = parser.parse_args()
195
172
 
196
- assistant = Assistant(model_name=args.model)
173
+ try:
174
+ scopes = load_from_py_file(scopes_file_path, "scopes")
175
+ except FileNotFoundError:
176
+ scopes = {}
177
+ if not any([args.init, args.list_scopes, args.show_scope]):
178
+ console.print(f"⚠️ Scope file '{scopes_file_path}' not found. You can create one with the --init flag.", style="yellow")
197
179
 
198
- # Handle voice input
180
+
181
+ if args.list_scopes:
182
+ console.print(f"Available scopes in '[bold]{scopes_file_path}[/]':", style="bold")
183
+ if not scopes:
184
+ console.print(f" -> No scopes found or '{scopes_file_path}' is missing.")
185
+ else:
186
+ for scope_name in scopes:
187
+ console.print(f" - {scope_name}")
188
+ return
189
+
190
+ if args.show_scope:
191
+ scope_name = args.show_scope
192
+ if not scopes:
193
+ console.print(f"⚠️ Scope file '{scopes_file_path}' not found or is empty.", style="yellow")
194
+ return
195
+
196
+ scope_data = scopes.get(scope_name)
197
+ if scope_data:
198
+ pretty_scope = pprint.pformat(scope_data, indent=2)
199
+ console.print(
200
+ Panel(
201
+ pretty_scope,
202
+ title=f"[bold cyan]Scope: '{scope_name}'[/]",
203
+ subtitle=f"[dim]from {scopes_file_path}[/dim]",
204
+ border_style="blue"
205
+ )
206
+ )
207
+ else:
208
+ console.print(f"❌ Scope '[bold]{scope_name}[/]' not found in '{scopes_file_path}'.", style="red")
209
+ return
210
+
211
+ if args.init:
212
+ create_new_scope(scopes, scopes_file_path)
213
+ return
214
+
215
+ if args.from_clipboard:
216
+ try:
217
+ import pyperclip
218
+ updates = pyperclip.paste()
219
+ if updates:
220
+ console.print("--- Parsing updates from clipboard ---", style="bold")
221
+ paste_response(updates)
222
+ else:
223
+ console.print("⚠️ Clipboard is empty. Nothing to parse.", style="yellow")
224
+ except ImportError:
225
+ console.print("❌ The 'pyperclip' library is required for clipboard functionality.", style="red")
226
+ console.print("Please install it using: pip install pyperclip", style="cyan")
227
+ except Exception as e:
228
+ console.print(f"❌ An error occurred while reading from the clipboard: {e}", style="red")
229
+ return
230
+
231
+ if args.from_file:
232
+ updates = read_from_file(args.from_file)
233
+ paste_response(updates)
234
+ return
235
+
236
+ system_prompt = textwrap.dedent("""
237
+ You are an expert pair programmer. Your purpose is to help users by modifying files based on their instructions.
238
+ Follow these rules strictly:
239
+ Your output should be a single file including all the updated files. For each file-block:
240
+ 1. Only include code for files that need to be updated / edited.
241
+ 2. For updated files, do not exclude any code even if it is unchanged code; assume the file code will be copy-pasted full in the file.
242
+ 3. Do not include verbose inline comments explaining what every small change does. Try to keep comments concise but informative, if any.
243
+ 4. Only update the relevant parts of each file relative to the provided task; do not make irrelevant edits even if you notice areas of improvements elsewhere.
244
+ 5. Do not use diffs.
245
+ 6. Make sure each file-block is returned in the following exact format. No additional text, comments, or explanations should be outside these blocks.
246
+ Expected format for a modified or new file:
247
+ <file_path:/absolute/path/to/your/file.py>
248
+ ```python
249
+ # The full, complete content of /absolute/path/to/your/file.py goes here.
250
+ def example_function():
251
+ return "Hello, World!"
252
+ ```
253
+ """)
254
+ history = [{"role": "system", "content": system_prompt}]
255
+
256
+ context = None
199
257
  if args.voice not in ["False", "false"]:
200
258
  from .listener import listen, speak
201
-
202
259
  speak("Say your task instruction.")
203
260
  task = listen()
204
261
  if not task:
205
262
  speak("No instruction heard. Exiting.")
206
263
  return
207
-
208
264
  speak(f"You said: {task}. Should I proceed?")
209
265
  confirm = listen()
210
266
  if confirm and "yes" in confirm.lower():
211
- context = assistant.collect(args.config)
212
- assistant.update(task_instructions=task, context=context)
213
- speak("Changes applied.")
267
+ if not args.scope:
268
+ parser.error("A --scope name is required when using --voice.")
269
+ context = collect_context(args.scope, scopes)
270
+ llm_response = run_llm_query(task, args.model, history, context)
271
+ if llm_response:
272
+ paste_response(llm_response)
273
+ speak("Changes applied.")
214
274
  else:
215
275
  speak("Cancelled.")
216
276
  return
217
277
 
218
- # Parse updates from a local file
219
- if args.from_file:
220
- updates = assistant.read(args.from_file)
221
- paste_response(updates)
222
- return
278
+ # --- Main LLM Task Logic ---
279
+ if args.task:
280
+ action_flags = [args.patch, args.to_file is not None, args.to_clipboard]
281
+ if sum(action_flags) == 0:
282
+ parser.error("A task was provided, but no action was specified. Use --patch, --to-file, or --to-clipboard.")
283
+ if sum(action_flags) > 1:
284
+ parser.error("Please specify only one action: --patch, --to-file, or --to-clipboard.")
223
285
 
224
- # Otherwise generate updates from llm response
225
- if args.context_in:
226
- context = assistant.read(args.context_in)
227
- else:
228
- context = assistant.collect(args.config)
229
- if args.context_out:
230
- assistant.write(args.context_out, context)
231
- if not args.update in ["False", "false"]:
232
- assistant.update(task_instructions=args.task, context=context)
286
+ if args.context_in:
287
+ context = read_from_file(args.context_in)
288
+ else:
289
+ if not args.scope:
290
+ parser.error("A --scope name is required to build context for a task.")
291
+ context = collect_context(args.scope, scopes)
292
+ if context and args.context_out:
293
+ write_to_file(args.context_out, context)
294
+
295
+ if not context:
296
+ console.print("Proceeding with task but without any file context.", style="yellow")
297
+
298
+ llm_response = run_llm_query(args.task, args.model, history, context)
299
+
300
+ if llm_response:
301
+ if args.patch:
302
+ console.print("\n--- Updating files ---", style="bold")
303
+ paste_response(llm_response)
304
+ console.print("--- File Update Process Finished ---", style="bold")
305
+
306
+ elif args.to_file is not None:
307
+ write_to_file(args.to_file, llm_response)
308
+
309
+ elif args.to_clipboard:
310
+ try:
311
+ import pyperclip
312
+ pyperclip.copy(llm_response)
313
+ console.print("✅ Copied LLM response to clipboard.", style="green")
314
+ except ImportError:
315
+ console.print("❌ The 'pyperclip' library is required for clipboard functionality.", style="red")
316
+ console.print("Please install it using: pip install pyperclip", style="cyan")
317
+ except Exception as e:
318
+ console.print(f"❌ An error occurred while copying to the clipboard: {e}", style="red")
319
+
320
+ elif args.scope and args.context_out:
321
+ context = collect_context(args.scope, scopes)
322
+ if context:
323
+ write_to_file(args.context_out, context)
233
324
 
234
325
  if __name__ == "__main__":
235
326
  main()
patchllm/parser.py CHANGED
@@ -1,5 +1,10 @@
1
1
  import re
2
2
  from pathlib import Path
3
+ from rich.console import Console
4
+ from rich.panel import Panel
5
+ from rich.text import Text
6
+
7
+ console = Console()
3
8
 
4
9
  def paste_response(response_content):
5
10
  """
@@ -15,7 +20,10 @@ def paste_response(response_content):
15
20
  )
16
21
 
17
22
  matches = pattern.finditer(response_content)
18
- files_processed = 0
23
+
24
+ files_written = []
25
+ files_skipped = []
26
+ files_failed = []
19
27
  found_matches = False
20
28
 
21
29
  for match in matches:
@@ -24,49 +32,54 @@ def paste_response(response_content):
24
32
  code_content = match.group(2)
25
33
 
26
34
  if not file_path_str:
27
- print("Warning: Found a code block with an empty file path. Skipping.")
35
+ console.print("⚠️ Found a code block with an empty file path. Skipping.", style="yellow")
28
36
  continue
29
37
 
30
- print(f"Found path in response: '{file_path_str}'")
38
+ console.print(f"Found path in response: '[cyan]{file_path_str}[/]'")
31
39
  raw_path = Path(file_path_str)
32
40
 
33
- # Determine the final target path.
34
- # If the path from the LLM is absolute, use it directly.
35
- # If it's relative, resolve it against the current working directory.
36
41
  if raw_path.is_absolute():
37
42
  target_path = raw_path
38
43
  else:
39
44
  target_path = Path.cwd() / raw_path
40
45
 
41
- # Normalize the path to resolve any ".." or "." segments.
42
46
  target_path = target_path.resolve()
43
47
 
44
48
  try:
45
- # Ensure parent directory exists
46
49
  target_path.parent.mkdir(parents=True, exist_ok=True)
47
50
 
48
- # If file exists, compare content to avoid unnecessary overwrites
49
51
  if target_path.exists():
50
52
  with open(target_path, 'r', encoding='utf-8') as existing_file:
51
53
  if existing_file.read() == code_content:
52
- print(f" -> No changes for '{target_path}', skipping.")
54
+ console.print(f" -> No changes for '[cyan]{target_path}[/]', skipping.", style="dim")
55
+ files_skipped.append(target_path)
53
56
  continue
54
57
 
55
- # Write the extracted code to the file
56
58
  with open(target_path, 'w', encoding='utf-8') as outfile:
57
59
  outfile.write(code_content)
58
60
 
59
- print(f" -> Wrote {len(code_content)} bytes to '{target_path}'")
60
- files_processed += 1
61
+ console.print(f" -> Wrote {len(code_content)} bytes to '[cyan]{target_path}[/]'", style="green")
62
+ files_written.append(target_path)
61
63
 
62
64
  except OSError as e:
63
- print(f" -> Error writing file '{target_path}': {e}")
65
+ console.print(f" -> Error writing file '[cyan]{target_path}[/]': {e}", style="red")
66
+ files_failed.append(target_path)
64
67
  except Exception as e:
65
- print(f" -> An unexpected error occurred for file '{target_path}': {e}")
68
+ console.print(f" -> An unexpected error occurred for file '[cyan]{target_path}[/]': {e}", style="red")
69
+ files_failed.append(target_path)
66
70
 
71
+ summary_text = Text()
67
72
  if not found_matches:
68
- print("\nNo file paths and code blocks matching the expected format were found in the response.")
69
- elif files_processed > 0:
70
- print(f"\nSuccessfully processed {files_processed} file(s).")
73
+ summary_text.append("No file paths and code blocks matching the expected format were found in the response.", style="yellow")
71
74
  else:
72
- print("\nFound matching blocks, but no files were written.")
75
+ if files_written:
76
+ summary_text.append(f"Successfully wrote {len(files_written)} file(s).\n", style="green")
77
+ if files_skipped:
78
+ summary_text.append(f"Skipped {len(files_skipped)} file(s) (no changes).\n", style="cyan")
79
+ if files_failed:
80
+ summary_text.append(f"Failed to write {len(files_failed)} file(s).\n", style="red")
81
+
82
+ if not any([files_written, files_skipped, files_failed]):
83
+ summary_text.append("Found matching blocks, but no files were processed.", style="yellow")
84
+
85
+ console.print(Panel(summary_text, title="[bold]Summary[/bold]", border_style="blue"))
@@ -0,0 +1,129 @@
1
+ Metadata-Version: 2.4
2
+ Name: patchllm
3
+ Version: 0.2.2
4
+ Summary: Lightweight tool to manage contexts and update code with LLMs
5
+ Author: nassimberrada
6
+ License: MIT License
7
+
8
+ Copyright (c) 2025 nassimberrada
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the “Software”), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Requires-Python: >=3.8
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE
30
+ Requires-Dist: litellm
31
+ Requires-Dist: python-dotenv
32
+ Requires-Dist: rich
33
+ Provides-Extra: voice
34
+ Requires-Dist: SpeechRecognition; extra == "voice"
35
+ Requires-Dist: pyttsx3; extra == "voice"
36
+ Provides-Extra: url
37
+ Requires-Dist: html2text; extra == "url"
38
+ Dynamic: license-file
39
+
40
+ <p align="center">
41
+ <picture>
42
+ <source srcset="./assets/logo_dark.png" media="(prefers-color-scheme: dark)">
43
+ <source srcset="./assets/logo_light.png" media="(prefers-color-scheme: light)">
44
+ <img src="./assets/logo_light.png" alt="PatchLLM Logo" height="200">
45
+ </picture>
46
+ </p>
47
+
48
+ ## About
49
+ PatchLLM is a command-line tool that lets you flexibly build LLM context from your codebase using glob patterns, URLs, and keyword searches. It then automatically applies file edits directly from the LLM's response.
50
+
51
+ ## Usage
52
+ PatchLLM is designed to be used directly from your terminal. The core workflow is to define a **scope** of files, provide a **task**, and choose an **action** (like patching files directly).
53
+
54
+ ### 1. Initialize a Scope
55
+ The easiest way to get started is to run the interactive initializer. This will create a `scopes.py` file for you, which holds your saved scopes.
56
+
57
+ ```bash
58
+ patchllm --init
59
+ ```
60
+
61
+ This will guide you through creating your first scope, including setting a base path and file patterns. You can add multiple scopes to this file for different projects or tasks.
62
+
63
+ A generated `scopes.py` might look like this:
64
+ ```python
65
+ # scopes.py
66
+ scopes = {
67
+ "default": {
68
+ "path": ".",
69
+ "include_patterns": ["**/*.py"],
70
+ "exclude_patterns": ["**/tests/*", "venv/*"],
71
+ "urls": ["https://docs.python.org/3/library/argparse.html"]
72
+ },
73
+ "docs": {
74
+ "path": "./docs",
75
+ "include_patterns": ["**/*.md"],
76
+ }
77
+ }
78
+ ```
79
+
80
+ ### 2. Run a Task
81
+ Use the `patchllm` command with a scope, a task, and an action flag like `--patch` (`-p`).
82
+
83
+ ```bash
84
+ # Apply a change using the 'default' scope and the --patch action
85
+ patchllm -s default -t "Add type hints to the main function in main.py" -p
86
+ ```
87
+
88
+ The tool will then:
89
+ 1. Build a context from the files and URLs matching your `default` scope.
90
+ 2. Send the context and your task to the configured LLM.
91
+ 3. Parse the response and automatically write the changes to the relevant files.
92
+
93
+ ### All Commands & Options
94
+
95
+ #### Core Patching Flow
96
+ * `-s, --scope <name>`: Name of the scope to use from your `scopes.py` file.
97
+ * `-t, --task "<instruction>"`: The task instruction for the LLM.
98
+ * `-p, --patch`: Query the LLM and directly apply the file updates from the response. **This is the main action flag.**
99
+
100
+ #### Scope Management
101
+ * `-i, --init`: Create a new scope interactively.
102
+ * `-sl, --list-scopes`: List all available scopes from your `scopes.py` file.
103
+ * `-ss, --show-scope <name>`: Display the settings for a specific scope.
104
+
105
+ #### I/O & Context Management
106
+ * `-co, --context-out [filename]`: Export the generated context to a file (defaults to `context.md`) instead of running a task.
107
+ * `-ci, --context-in <filename>`: Use a previously saved context file as input for a task.
108
+ * `-tf, --to-file [filename]`: Send the LLM response to a file (defaults to `response.md`) instead of patching directly.
109
+ * `-tc, --to-clipboard`: Copy the LLM response to the clipboard.
110
+ * `-ff, --from-file <filename>`: Apply patches from a local file instead of an LLM response.
111
+ * `-fc, --from-clipboard`: Apply patches directly from your clipboard content.
112
+
113
+ #### General Options
114
+ * `--model <model_name>`: Specify a different model (e.g., `gpt-4o`). Defaults to `gemini/gemini-1.5-flash`.
115
+ * `--voice`: Enable voice recognition to provide the task instruction.
116
+
117
+ ### Setup
118
+
119
+ PatchLLM uses [LiteLLM](https://github.com/BerriAI/litellm) under the hood. Please refer to their documentation for setting up API keys (e.g., `OPENAI_API_KEY`, `GEMINI_API_KEY`) in a `.env` file and for a full list of available models.
120
+
121
+ To use the voice feature (`--voice`), you will need to install extra dependencies:
122
+ ```bash
123
+ pip install "speechrecognition>=3.10" "pyttsx3>=2.90"
124
+ # Note: speechrecognition may require PyAudio, which might have system-level dependencies.
125
+ ```
126
+
127
+ ## License
128
+
129
+ This project is licensed under the MIT License. See the `LICENSE` file for details.
@@ -0,0 +1,12 @@
1
+ patchllm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ patchllm/context.py,sha256=_05amx0WgmHGhJrjB72K-QaWEP6u7vIAanhNjqL7YtQ,8503
3
+ patchllm/listener.py,sha256=VjQ_CrSRT4-PolXAAradPKyt8NSUaUQwvgPNH7Oi9q0,968
4
+ patchllm/main.py,sha256=y5OGNXEvRWTlUpxj9N6j3Ryhko-XVy_NkQ5axhCzljI,14944
5
+ patchllm/parser.py,sha256=DNcf9iUH8umExfK78CSIwac1Bbu7K9iE3754y7CvYzs,3229
6
+ patchllm/utils.py,sha256=hz28hd017gRGT632VQAYLPdX0KAS1GLvZzeUDCKbLc0,647
7
+ patchllm-0.2.2.dist-info/licenses/LICENSE,sha256=vZxgIRNxffjkTV2NWLemgYjDRu0hSMTyFXCZ1zEWbUc,1077
8
+ patchllm-0.2.2.dist-info/METADATA,sha256=xr-iByWSfelUa5xwGgd7G_mROO17ZtxU9QVILbfZQEc,5689
9
+ patchllm-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
+ patchllm-0.2.2.dist-info/entry_points.txt,sha256=xm-W7FKOQd3o9RgK_4krVnO2sC8phpYxDCobf0htLiU,48
11
+ patchllm-0.2.2.dist-info/top_level.txt,sha256=SLIZj9EhBXbSnYrbnV8EjL-OfNz-hXRwABCPCjE5Fas,9
12
+ patchllm-0.2.2.dist-info/RECORD,,
@@ -1,87 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: patchllm
3
- Version: 0.1.1
4
- Summary: Lightweight tool to manage contexts and update code with LLMs
5
- Author: nassimberrada
6
- License: MIT License
7
-
8
- Copyright (c) 2025 nassimberrada
9
-
10
- Permission is hereby granted, free of charge, to any person obtaining a copy
11
- of this software and associated documentation files (the “Software”), to deal
12
- in the Software without restriction, including without limitation the rights
13
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
- copies of the Software, and to permit persons to whom the Software is
15
- furnished to do so, subject to the following conditions:
16
-
17
- The above copyright notice and this permission notice shall be included in all
18
- copies or substantial portions of the Software.
19
-
20
- THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
- SOFTWARE.
27
- Requires-Python: >=3.8
28
- Description-Content-Type: text/markdown
29
- License-File: LICENSE
30
- Requires-Dist: litellm
31
- Requires-Dist: python-dotenv
32
- Provides-Extra: voice
33
- Requires-Dist: SpeechRecognition; extra == "voice"
34
- Requires-Dist: pyttsx3; extra == "voice"
35
- Dynamic: license-file
36
-
37
- <p align="center">
38
- <picture>
39
- <source srcset="./assets/logo_dark.png" media="(prefers-color-scheme: dark)">
40
- <source srcset="./assets/logo_light.png" media="(prefers-color-scheme: light)">
41
- <img src="./assets/logo_light.png" alt="PatchLLM Logo" height="200">
42
- </picture>
43
- </p>
44
-
45
- ## About
46
- PatchLLM lets you flexibly build LLM context from your codebase using search patterns, and automatically edit files from the LLM response in a couple lines of code.
47
-
48
- ## Usage
49
- Here's a basic example of how to use the `Assistant` class:
50
-
51
- ```python
52
- from main import Assistant
53
-
54
- assistant = Assistant()
55
-
56
- context = assistant.collect(config_name="default")
57
- >> The following files were extracted:
58
- >> my_project
59
- >> ├── README.md
60
- >> ├── configs.py
61
- >> ├── context.py
62
- >> ├── main.py
63
- >> ├── parser.py
64
- >> ├── requirements.txt
65
- >> ├── systems.py
66
- >> └── utils.py
67
-
68
- assistant.update("Fix any bug in these files", context=context)
69
- >> Wrote 5438 bytes to '/my_project/context.py'
70
- >> Wrote 1999 bytes to '/my_project/utils.py'
71
- >> Wrote 2345 bytes to '/my_project/main.py'
72
- ```
73
-
74
- You can decide which files to include / exclude from the prompt by adding a config in `configs.py`, specifying:
75
- - `path`: The root path from which to perform the file search
76
- - `include_patterns`: A list of glob patterns for files to include. e.g `[./**/*]`
77
- - `exclude_patterns`: A list of glob patterns for files to exlucde. e.g `[./*.md]`
78
- - `search_word`: A list of keywords included in the target files. e.g `["config"]`
79
- - `exclude_extensions`: A list of file extensions to exclude. e.g `[.jpg]`
80
-
81
- ### Setup
82
-
83
- PatchLLM uses [LiteLLM](https://github.com/BerriAI/litellm) under the hood. Please refer to their documentation for environment variable naming and available models.
84
-
85
- ## License
86
-
87
- This project is licensed under the MIT License. See the `LICENSE` file for details.
@@ -1,12 +0,0 @@
1
- patchllm/__init__.py,sha256=rtpA9Og8Jm6IVwPB2r0xA57nririqM09DuvaIV6JipQ,27
2
- patchllm/context.py,sha256=zUrXf5l3cdxAbmxB7IjbShTAWA_ZEMBz8OGlaB-cofE,6450
3
- patchllm/listener.py,sha256=EdcceJCLEoSftX1dVSWxtwBsLaII2lcZ0VnllHwCGWI,845
4
- patchllm/main.py,sha256=ed7CNEt8UlOw2GmE-LVLMyVZddTBKR1R0U3YBrRg6UM,8782
5
- patchllm/parser.py,sha256=4wipa6deoE2gUIhYrvUZcbKTIr5j6lw5Z6bOItUH6YI,2629
6
- patchllm/utils.py,sha256=hz28hd017gRGT632VQAYLPdX0KAS1GLvZzeUDCKbLc0,647
7
- patchllm-0.1.1.dist-info/licenses/LICENSE,sha256=vZxgIRNxffjkTV2NWLemgYjDRu0hSMTyFXCZ1zEWbUc,1077
8
- patchllm-0.1.1.dist-info/METADATA,sha256=q-jcWSBhahwklDhaDyydRGFxX25co-tY3wyWhNAiuRI,3547
9
- patchllm-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
- patchllm-0.1.1.dist-info/entry_points.txt,sha256=xm-W7FKOQd3o9RgK_4krVnO2sC8phpYxDCobf0htLiU,48
11
- patchllm-0.1.1.dist-info/top_level.txt,sha256=SLIZj9EhBXbSnYrbnV8EjL-OfNz-hXRwABCPCjE5Fas,9
12
- patchllm-0.1.1.dist-info/RECORD,,