code-puppy 0.0.59__tar.gz → 0.0.60__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {code_puppy-0.0.59 → code_puppy-0.0.60}/PKG-INFO +1 -1
  2. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/agent.py +15 -4
  3. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/agent_prompts.py +1 -1
  4. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/meta_command_handler.py +15 -9
  5. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/prompt_toolkit_completion.py +43 -36
  6. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/model_factory.py +0 -108
  7. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/models.json +4 -31
  8. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/command_runner.py +7 -2
  9. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/file_modifications.py +137 -130
  10. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/file_operations.py +17 -0
  11. {code_puppy-0.0.59 → code_puppy-0.0.60}/pyproject.toml +7 -1
  12. {code_puppy-0.0.59 → code_puppy-0.0.60}/.gitignore +0 -0
  13. {code_puppy-0.0.59 → code_puppy-0.0.60}/LICENSE +0 -0
  14. {code_puppy-0.0.59 → code_puppy-0.0.60}/README.md +0 -0
  15. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/__init__.py +0 -0
  16. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/__init__.py +0 -0
  17. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/file_path_completion.py +0 -0
  18. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/model_picker_completion.py +0 -0
  19. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/command_line/utils.py +0 -0
  20. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/config.py +0 -0
  21. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/main.py +0 -0
  22. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/session_memory.py +0 -0
  23. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/__init__.py +0 -0
  24. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/code_map.py +0 -0
  25. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/common.py +0 -0
  26. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/tools/web_search.py +0 -0
  27. {code_puppy-0.0.59 → code_puppy-0.0.60}/code_puppy/version_checker.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.59
3
+ Version: 0.0.60
4
4
  Summary: Code generation agent
5
5
  Author: Michael Pfaffenberger
6
6
  License: MIT
@@ -18,12 +18,23 @@ from code_puppy.tools.common import console
18
18
 
19
19
  MODELS_JSON_PATH = os.environ.get("MODELS_JSON_PATH", None)
20
20
 
21
- # Load puppy rules if provided
21
+ # Puppy rules loader
22
22
  PUPPY_RULES_PATH = Path(".puppy_rules")
23
23
  PUPPY_RULES = None
24
- if PUPPY_RULES_PATH.exists():
25
- with open(PUPPY_RULES_PATH, "r") as f:
26
- PUPPY_RULES = f.read()
24
+
25
+
26
+ def load_puppy_rules(path=None):
27
+ global PUPPY_RULES
28
+ rules_path = Path(path) if path else PUPPY_RULES_PATH
29
+ if rules_path.exists():
30
+ with open(rules_path, "r") as f:
31
+ PUPPY_RULES = f.read()
32
+ else:
33
+ PUPPY_RULES = None
34
+
35
+
36
+ # Load at import
37
+ load_puppy_rules()
27
38
 
28
39
 
29
40
  class AgentResponse(pydantic.BaseModel):
@@ -49,7 +49,7 @@ Example (create):
49
49
  edit_file("src/example.py", "print('hello')\n")
50
50
  ```
51
51
 
52
- Example (replacement):
52
+ Example (replacement): -- YOU SHOULD PREFER THIS AS THE PRIMARY WAY TO EDIT FILES.
53
53
  ```json
54
54
  edit_file(
55
55
  "src/example.py",
@@ -6,6 +6,7 @@ from code_puppy.command_line.model_picker_completion import (
6
6
  load_model_names,
7
7
  update_model_in_input,
8
8
  )
9
+ from code_puppy.config import get_config_keys
9
10
  from code_puppy.command_line.utils import make_directory_table
10
11
 
11
12
  META_COMMANDS_HELP = """
@@ -20,6 +21,12 @@ META_COMMANDS_HELP = """
20
21
 
21
22
 
22
23
  def handle_meta_command(command: str, console: Console) -> bool:
24
+ """
25
+ Handle meta/config commands prefixed with '~'.
26
+ Returns True if the command was handled (even if just an error/help), False if not.
27
+ """
28
+ command = command.strip()
29
+
23
30
  # ~codemap (code structure visualization)
24
31
  if command.startswith("~codemap"):
25
32
  from code_puppy.tools.code_map import make_code_map
@@ -35,11 +42,7 @@ def handle_meta_command(command: str, console: Console) -> bool:
35
42
  except Exception as e:
36
43
  console.print(f"[red]Error generating code map:[/red] {e}")
37
44
  return True
38
- """
39
- Handle meta/config commands prefixed with '~'.
40
- Returns True if the command was handled (even if just an error/help), False if not.
41
- """
42
- command = command.strip()
45
+
43
46
  if command.startswith("~cd"):
44
47
  tokens = command.split()
45
48
  if len(tokens) == 1:
@@ -83,7 +86,7 @@ def handle_meta_command(command: str, console: Console) -> bool:
83
86
 
84
87
  if command.startswith("~set"):
85
88
  # Syntax: ~set KEY=VALUE or ~set KEY VALUE
86
- from code_puppy.config import get_config_keys, set_config_value
89
+ from code_puppy.config import set_config_value
87
90
 
88
91
  tokens = command.split(None, 2)
89
92
  argstr = command[len("~set") :].strip()
@@ -100,8 +103,9 @@ def handle_meta_command(command: str, console: Console) -> bool:
100
103
  key = tokens[1]
101
104
  value = ""
102
105
  else:
103
- console.print("[yellow]Usage:[/yellow] ~set KEY=VALUE or ~set KEY VALUE")
104
- console.print("Config keys: " + ", ".join(get_config_keys()))
106
+ console.print(
107
+ f"[yellow]Usage:[/yellow] ~set KEY=VALUE or ~set KEY VALUE\nConfig keys: {', '.join(get_config_keys())}"
108
+ )
105
109
  return True
106
110
  if key:
107
111
  set_config_value(key, value)
@@ -116,9 +120,11 @@ def handle_meta_command(command: str, console: Console) -> bool:
116
120
  # Try setting model and show confirmation
117
121
  new_input = update_model_in_input(command)
118
122
  if new_input is not None:
123
+ from code_puppy.command_line.model_picker_completion import get_active_model
119
124
  from code_puppy.agent import get_code_generation_agent
120
125
 
121
126
  model = get_active_model()
127
+ # Make sure this is called for the test
122
128
  get_code_generation_agent(force_reload=True)
123
129
  console.print(
124
130
  f"[bold green]Active model set and loaded:[/bold green] [cyan]{model}[/cyan]"
@@ -126,8 +132,8 @@ def handle_meta_command(command: str, console: Console) -> bool:
126
132
  return True
127
133
  # If no model matched, show available models
128
134
  model_names = load_model_names()
135
+ console.print("[yellow]Usage:[/yellow] ~m <model-name>")
129
136
  console.print(f"[yellow]Available models:[/yellow] {', '.join(model_names)}")
130
- console.print("[yellow]Usage:[/yellow] ~m <model_name>")
131
137
  return True
132
138
  if command in ("~help", "~h"):
133
139
  console.print(META_COMMANDS_HELP)
@@ -33,53 +33,60 @@ class SetCompleter(Completer):
33
33
  self.trigger = trigger
34
34
 
35
35
  def get_completions(self, document, complete_event):
36
- text = document.text_before_cursor
37
- if not text.strip().startswith(self.trigger):
36
+ text_before_cursor = document.text_before_cursor
37
+ stripped_text_for_trigger_check = text_before_cursor.lstrip()
38
+
39
+ if not stripped_text_for_trigger_check.startswith(self.trigger):
38
40
  return
39
- # If the only thing typed is exactly '~set', suggest space
40
- if text.strip() == self.trigger:
41
+
42
+ # Determine the part of the text that is relevant for this completer
43
+ # This handles cases like " ~set foo" where the trigger isn't at the start of the string
44
+ actual_trigger_pos = text_before_cursor.find(self.trigger)
45
+ effective_input = text_before_cursor[
46
+ actual_trigger_pos:
47
+ ] # e.g., "~set keypart" or "~set " or "~set"
48
+
49
+ tokens = effective_input.split()
50
+
51
+ # Case 1: Input is exactly the trigger (e.g., "~set") and nothing more (not even a trailing space on effective_input).
52
+ # Suggest adding a space.
53
+ if (
54
+ len(tokens) == 1
55
+ and tokens[0] == self.trigger
56
+ and not effective_input.endswith(" ")
57
+ ):
41
58
  yield Completion(
42
- self.trigger + " ",
43
- start_position=-len(self.trigger),
44
- display=f"{self.trigger} ",
45
- display_meta="set config",
59
+ text=self.trigger + " ", # Text to insert
60
+ start_position=-len(tokens[0]), # Replace the trigger itself
61
+ display=self.trigger + " ", # Visual display
62
+ display_meta="set config key",
46
63
  )
47
- tokens = text.strip().split()
48
- # completion for the first arg after ~set
49
- if len(tokens) == 1:
50
- # user just typed ~set <-- suggest config keys
51
- base = ""
52
- else:
53
- base = tokens[1]
64
+ return
65
+
66
+ # Case 2: Input is trigger + space (e.g., "~set ") or trigger + partial key (e.g., "~set partial")
67
+ base_to_complete = ""
68
+ if len(tokens) > 1: # e.g., ["~set", "partialkey"]
69
+ base_to_complete = tokens[1]
70
+ # If len(tokens) == 1, it implies effective_input was like "~set ", so base_to_complete remains ""
71
+ # This means we list all keys.
72
+
54
73
  # --- SPECIAL HANDLING FOR 'model' KEY ---
55
- if base == "model":
74
+ if base_to_complete == "model":
56
75
  # Don't return any completions -- let ModelNameCompleter handle it
57
76
  return
58
77
  for key in get_config_keys():
59
78
  if key == "model":
60
79
  continue # exclude 'model' from regular ~set completions
61
- if key.startswith(base):
80
+ if key.startswith(base_to_complete):
62
81
  prev_value = get_value(key)
63
- # Ensure there's a space after '~set' if it's the only thing typed
64
- if text.strip() == self.trigger or text.strip() == self.trigger + "":
65
- prefix = self.trigger + " " # Always enforce a space
66
- insert_text = (
67
- f"{prefix}{key} = {prev_value}"
68
- if prev_value is not None
69
- else f"{prefix}{key} = "
70
- )
71
- sp = -len(text)
72
- else:
73
- insert_text = (
74
- f"{key} = {prev_value}"
75
- if prev_value is not None
76
- else f"{key} = "
77
- )
78
- sp = -len(base)
79
- # Make it obvious the value part is from before
82
+ value_part = f" = {prev_value}" if prev_value is not None else " = "
83
+ completion_text = f"{key}{value_part}"
84
+
80
85
  yield Completion(
81
- insert_text,
82
- start_position=sp,
86
+ completion_text,
87
+ start_position=-len(
88
+ base_to_complete
89
+ ), # Correctly replace only the typed part of the key
83
90
  display_meta=f"puppy.cfg key (was: {prev_value})"
84
91
  if prev_value is not None
85
92
  else "puppy.cfg key",
@@ -1,14 +1,9 @@
1
- import asyncio
2
1
  import json
3
2
  import os
4
- import threading
5
- import time
6
- from collections import deque
7
3
  from typing import Any, Dict
8
4
 
9
5
  import httpx
10
6
  from anthropic import AsyncAnthropic
11
- from httpx import Response
12
7
  from openai import AsyncAzureOpenAI # For Azure OpenAI client
13
8
  from pydantic_ai.models.anthropic import AnthropicModel
14
9
  from pydantic_ai.models.gemini import GeminiModel
@@ -27,98 +22,6 @@ from pydantic_ai.providers.openai import OpenAIProvider
27
22
  # Example: "X-Api-Key": "$OPENAI_API_KEY" will use the value from os.environ.get("OPENAI_API_KEY")
28
23
 
29
24
 
30
- def make_client(
31
- max_requests_per_minute: int = 10, max_retries: int = 3, retry_base_delay: int = 10
32
- ) -> httpx.AsyncClient:
33
- # Create a rate limiter using a token bucket approach
34
- class RateLimiter:
35
- def __init__(self, max_requests_per_minute):
36
- self.max_requests_per_minute = max_requests_per_minute
37
- self.interval = (
38
- 60.0 / max_requests_per_minute
39
- ) # Time between requests in seconds
40
- self.request_times = deque(maxlen=max_requests_per_minute)
41
- self.lock = threading.Lock()
42
-
43
- async def acquire(self):
44
- """Wait until a request can be made according to the rate limit."""
45
- while True:
46
- with self.lock:
47
- now = time.time()
48
-
49
- # Remove timestamps older than 1 minute
50
- while self.request_times and now - self.request_times[0] > 60:
51
- self.request_times.popleft()
52
-
53
- # If we haven't reached the limit, add the timestamp and proceed
54
- if len(self.request_times) < self.max_requests_per_minute:
55
- self.request_times.append(now)
56
- return
57
-
58
- # Otherwise, calculate the wait time until we can make another request
59
- oldest = self.request_times[0]
60
- wait_time = max(0, oldest + 60 - now)
61
-
62
- if wait_time > 0:
63
- print(
64
- f"Rate limit would be exceeded. Waiting {wait_time:.2f} seconds before sending request."
65
- )
66
- await asyncio.sleep(wait_time)
67
- else:
68
- # Try again immediately
69
- continue
70
-
71
- # Create the rate limiter instance
72
- rate_limiter = RateLimiter(max_requests_per_minute)
73
-
74
- def should_retry(response: Response) -> bool:
75
- return response.status_code == 429 or (500 <= response.status_code < 600)
76
-
77
- async def request_hook(request):
78
- # Wait until we can make a request according to our rate limit
79
- await rate_limiter.acquire()
80
- return request
81
-
82
- async def response_hook(response: Response) -> Response:
83
- retries = getattr(response.request, "_retries", 0)
84
-
85
- if should_retry(response) and retries < max_retries:
86
- setattr(response.request, "_retries", retries + 1)
87
-
88
- delay = retry_base_delay * (2**retries)
89
-
90
- if response.status_code == 429:
91
- print(
92
- f"Rate limit exceeded. Retrying in {delay:.2f} seconds (attempt {retries + 1}/{max_retries})"
93
- )
94
- else:
95
- print(
96
- f"Server error {response.status_code}. Retrying in {delay:.2f} seconds (attempt {retries + 1}/{max_retries})"
97
- )
98
-
99
- await asyncio.sleep(delay)
100
-
101
- new_request = response.request.copy()
102
- async with httpx.AsyncClient() as client:
103
- # Apply rate limiting to the retry request as well
104
- await rate_limiter.acquire()
105
- new_response = await client.request(
106
- new_request.method,
107
- str(new_request.url),
108
- headers=new_request.headers,
109
- content=new_request.content,
110
- params=dict(new_request.url.params),
111
- )
112
- return new_response
113
- return response
114
-
115
- # Setup both request and response hooks
116
- event_hooks = {"request": [request_hook], "response": [response_hook]}
117
-
118
- client = httpx.AsyncClient(event_hooks=event_hooks)
119
- return client
120
-
121
-
122
25
  def get_custom_config(model_config):
123
26
  custom_config = model_config.get("custom_endpoint", {})
124
27
  if not custom_config:
@@ -167,17 +70,6 @@ class ModelFactory:
167
70
 
168
71
  model_type = model_config.get("type")
169
72
 
170
- # Common configuration for rate limiting and retries
171
- max_requests_per_minute = model_config.get("max_requests_per_minute", 100)
172
- max_retries = model_config.get("max_retries", 3)
173
- retry_base_delay = model_config.get("retry_base_delay", 1.0)
174
-
175
- client = make_client(
176
- max_requests_per_minute=max_requests_per_minute,
177
- max_retries=max_retries,
178
- retry_base_delay=retry_base_delay,
179
- )
180
-
181
73
  if model_type == "gemini":
182
74
  provider = GoogleGLAProvider(api_key=os.environ.get("GEMINI_API_KEY", ""))
183
75
 
@@ -1,38 +1,23 @@
1
1
  {
2
2
  "gemini-2.5-flash-preview-05-20": {
3
3
  "type": "gemini",
4
- "name": "gemini-2.5-flash-preview-05-20",
5
- "max_requests_per_minute": 10,
6
- "max_retries": 3,
7
- "retry_base_delay": 10
4
+ "name": "gemini-2.5-flash-preview-05-20"
8
5
  },
9
6
  "gpt-4.1": {
10
7
  "type": "openai",
11
- "name": "gpt-4.1",
12
- "max_requests_per_minute": 100,
13
- "max_retries": 3,
14
- "retry_base_delay": 10
8
+ "name": "gpt-4.1"
15
9
  },
16
10
  "gpt-4.1-mini": {
17
11
  "type": "openai",
18
- "name": "gpt-4.1-mini",
19
- "max_requests_per_minute": 100,
20
- "max_retries": 3,
21
- "retry_base_delay": 10
12
+ "name": "gpt-4.1-mini"
22
13
  },
23
14
  "gpt-4.1-nano": {
24
15
  "type": "openai",
25
- "name": "gpt-4.1-nano",
26
- "max_requests_per_minute": 100,
27
- "max_retries": 3,
28
- "retry_base_delay": 10
16
+ "name": "gpt-4.1-nano"
29
17
  },
30
18
  "gpt-4.1-custom": {
31
19
  "type": "custom_openai",
32
20
  "name": "gpt-4.1-custom",
33
- "max_requests_per_minute": 100,
34
- "max_retries": 3,
35
- "retry_base_delay": 10,
36
21
  "custom_endpoint": {
37
22
  "url": "https://my.cute.endpoint:8080",
38
23
  "headers": {
@@ -44,9 +29,6 @@
44
29
  "ollama-llama3.3": {
45
30
  "type": "custom_openai",
46
31
  "name": "llama3.3",
47
- "max_requests_per_minute": 100,
48
- "max_retries": 3,
49
- "retry_base_delay": 5,
50
32
  "custom_endpoint": {
51
33
  "url": "http://localhost:11434/v1"
52
34
  }
@@ -54,9 +36,6 @@
54
36
  "meta-llama/Llama-3.3-70B-Instruct-Turbo": {
55
37
  "type": "custom_openai",
56
38
  "name": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
57
- "max_requests_per_minute": 100,
58
- "max_retries": 3,
59
- "retry_base_delay": 5,
60
39
  "custom_endpoint": {
61
40
  "url": "https://api.together.xyz/v1",
62
41
  "api_key": "$TOGETHER_API_KEY"
@@ -65,9 +44,6 @@
65
44
  "grok-3-mini-fast": {
66
45
  "type": "custom_openai",
67
46
  "name": "grok-3-mini-fast",
68
- "max_requests_per_minute": 100,
69
- "max_retries": 3,
70
- "retry_base_delay": 5,
71
47
  "custom_endpoint": {
72
48
  "url": "https://api.x.ai/v1",
73
49
  "api_key": "$XAI_API_KEY"
@@ -76,9 +52,6 @@
76
52
  "azure-gpt-4.1": {
77
53
  "type": "azure_openai",
78
54
  "name": "gpt-4.1",
79
- "max_requests_per_minute": 100,
80
- "max_retries": 3,
81
- "retry_base_delay": 5,
82
55
  "api_version": "2024-12-01-preview",
83
56
  "api_key": "$AZURE_OPENAI_API_KEY",
84
57
  "azure_endpoint": "$AZURE_OPENAI_ENDPOINT"
@@ -136,12 +136,17 @@ def run_shell_command(
136
136
  except Exception as e:
137
137
  console.print_exception(show_locals=True)
138
138
  console.print("[dim]" + "-" * 60 + "[/dim]\n")
139
+ # Ensure stdout and stderr are always defined
140
+ if "stdout" not in locals():
141
+ stdout = None
142
+ if "stderr" not in locals():
143
+ stderr = None
139
144
  return {
140
145
  "success": False,
141
146
  "command": command,
142
147
  "error": f"Error executing command: {str(e)}",
143
- "stdout": stdout[-1000:],
144
- "stderr": stderr[-1000:],
148
+ "stdout": stdout[-1000:] if stdout else None,
149
+ "stderr": stderr[-1000:] if stderr else None,
145
150
  "exit_code": -1,
146
151
  "timeout": False,
147
152
  }
@@ -205,145 +205,152 @@ def _write_to_file(
205
205
  return {"error": str(exc), "diff": ""}
206
206
 
207
207
 
208
- def register_file_modifications_tools(agent):
209
- """Attach file-editing tools to *agent* with mandatory diff rendering."""
208
+ def delete_snippet_from_file(
209
+ context: RunContext, file_path: str, snippet: str
210
+ ) -> Dict[str, Any]:
211
+ console.log(f"🗑️ Deleting snippet from file [bold red]{file_path}[/bold red]")
212
+ res = _delete_snippet_from_file(context, file_path, snippet)
213
+ diff = res.get("diff", "")
214
+ if diff:
215
+ _print_diff(diff)
216
+ return res
210
217
 
211
- def delete_snippet_from_file(
212
- context: RunContext, file_path: str, snippet: str
213
- ) -> Dict[str, Any]:
214
- console.log(f"🗑️ Deleting snippet from file [bold red]{file_path}[/bold red]")
215
- res = _delete_snippet_from_file(context, file_path, snippet)
216
- diff = res.get("diff", "")
217
- if diff:
218
- _print_diff(diff)
219
- return res
220
-
221
- def write_to_file(
222
- context: RunContext, path: str, content: str, overwrite: bool
223
- ) -> Dict[str, Any]:
224
- console.log(f"✏️ Writing file [bold blue]{path}[/bold blue]")
225
- res = _write_to_file(context, path, content, overwrite=overwrite)
226
- diff = res.get("diff", "")
227
- if diff:
228
- _print_diff(diff)
229
- return res
230
-
231
- def replace_in_file(
232
- context: RunContext, path: str, replacements: List[Dict[str, str]]
233
- ) -> Dict[str, Any]:
234
- console.log(f"♻️ Replacing text in [bold yellow]{path}[/bold yellow]")
235
- res = _replace_in_file(context, path, replacements)
236
- diff = res.get("diff", "")
237
- if diff:
238
- _print_diff(diff)
239
- return res
240
218
 
241
- @agent.tool(retries=5)
242
- def edit_file(context: RunContext, path: str, diff: str) -> Dict[str, Any]:
243
- """
244
- Unified file editing tool that can:
245
- - Create/write a new file when the target does not exist (using raw content or a JSON payload with a "content" key)
246
- - Replace text within an existing file via a JSON payload with "replacements" (delegates to internal replace logic)
247
- - Delete a snippet from an existing file via a JSON payload with "delete_snippet"
248
- Parameters
249
- ----------
250
- path : str
251
- Path to the target file (relative or absolute)
252
- diff : str
253
- Either:
254
- * Raw file content (for file creation)
255
- * A JSON string with one of the following shapes:
256
- {"content": "full file contents", "overwrite": true}
257
- {"replacements": [ {"old_str": "foo", "new_str": "bar"}, ... ] }
258
- {"delete_snippet": "text to remove"}
259
- The function auto-detects the payload type and routes to the appropriate internal helper.
260
- """
261
- console.print("\n[bold white on blue] EDIT FILE [/bold white on blue]")
262
- file_path = os.path.abspath(path)
263
- try:
264
- parsed_payload = json.loads(diff)
265
- except json.JSONDecodeError:
266
- try:
267
- console.print(
268
- "[bold yellow] JSON Parsing Failed! TRYING TO REPAIR! [/bold yellow]"
269
- )
270
- parsed_payload = json.loads(repair_json(diff))
271
- console.print(
272
- "[bold green on cyan] SUCCESS - WOOF! [/bold green on cyan]"
273
- )
274
- except Exception as e:
275
- console.print(
276
- f"[bold red] Unable to parse diff [/bold red] -- {str(e)}"
277
- )
278
- return {
279
- "success": False,
280
- "path": file_path,
281
- "message": f"Unable to parse diff JSON -- {str(e)}",
282
- "changed": False,
283
- "diff": "",
284
- }
219
+ def write_to_file(
220
+ context: RunContext, path: str, content: str, overwrite: bool
221
+ ) -> Dict[str, Any]:
222
+ console.log(f"✏️ Writing file [bold blue]{path}[/bold blue]")
223
+ res = _write_to_file(context, path, content, overwrite=overwrite)
224
+ diff = res.get("diff", "")
225
+ if diff:
226
+ _print_diff(diff)
227
+ return res
228
+
229
+
230
+ def replace_in_file(
231
+ context: RunContext, path: str, replacements: List[Dict[str, str]]
232
+ ) -> Dict[str, Any]:
233
+ console.log(f"♻️ Replacing text in [bold yellow]{path}[/bold yellow]")
234
+ res = _replace_in_file(context, path, replacements)
235
+ diff = res.get("diff", "")
236
+ if diff:
237
+ _print_diff(diff)
238
+ return res
239
+
240
+
241
+ def _edit_file(context: RunContext, path: str, diff: str) -> Dict[str, Any]:
242
+ """
243
+ Unified file editing tool that can:
244
+ - Create/write a new file when the target does not exist (using raw content or a JSON payload with a "content" key)
245
+ - Replace text within an existing file via a JSON payload with "replacements" (delegates to internal replace logic)
246
+ - Delete a snippet from an existing file via a JSON payload with "delete_snippet"
247
+ Parameters
248
+ ----------
249
+ path : str
250
+ Path to the target file (relative or absolute)
251
+ diff : str
252
+ Either:
253
+ * Raw file content (for file creation)
254
+ * A JSON string with one of the following shapes:
255
+ {"content": "full file contents", "overwrite": true}
256
+ {"replacements": [ {"old_str": "foo", "new_str": "bar"}, ... ] }
257
+ {"delete_snippet": "text to remove"}
258
+ The function auto-detects the payload type and routes to the appropriate internal helper.
259
+ """
260
+ console.print("\n[bold white on blue] EDIT FILE [/bold white on blue]")
261
+ file_path = os.path.abspath(path)
262
+ try:
263
+ parsed_payload = json.loads(diff)
264
+ except json.JSONDecodeError:
285
265
  try:
286
- if isinstance(parsed_payload, dict):
287
- if "delete_snippet" in parsed_payload:
288
- snippet = parsed_payload["delete_snippet"]
289
- return delete_snippet_from_file(context, file_path, snippet)
290
- if "replacements" in parsed_payload:
291
- replacements = parsed_payload["replacements"]
292
- return replace_in_file(context, file_path, replacements)
293
- if "content" in parsed_payload:
294
- content = parsed_payload["content"]
295
- overwrite = bool(parsed_payload.get("overwrite", False))
296
- file_exists = os.path.exists(file_path)
297
- if file_exists and not overwrite:
298
- return {
299
- "success": False,
300
- "path": file_path,
301
- "message": f"File '{file_path}' exists. Set 'overwrite': true to replace.",
302
- "changed": False,
303
- }
304
- return write_to_file(context, file_path, content, overwrite)
305
- return write_to_file(context, file_path, diff, overwrite=False)
306
- except Exception as e:
307
266
  console.print(
308
- "[bold red] Unable to route file modification tool call to sub-tool [/bold red]"
267
+ "[bold yellow] JSON Parsing Failed! TRYING TO REPAIR! [/bold yellow]"
309
268
  )
310
- console.print(str(e))
269
+ parsed_payload = json.loads(repair_json(diff))
270
+ console.print("[bold white on blue] SUCCESS - WOOF! [/bold white on blue]")
271
+ except Exception as e:
272
+ console.print(f"[bold red] Unable to parse diff [/bold red] -- {str(e)}")
311
273
  return {
312
274
  "success": False,
313
275
  "path": file_path,
314
- "message": f"Something went wrong in file editing: {str(e)}",
276
+ "message": f"Unable to parse diff JSON -- {str(e)}",
315
277
  "changed": False,
278
+ "diff": "",
279
+ }
280
+ try:
281
+ if isinstance(parsed_payload, dict):
282
+ if "delete_snippet" in parsed_payload:
283
+ snippet = parsed_payload["delete_snippet"]
284
+ return delete_snippet_from_file(context, file_path, snippet)
285
+ if "replacements" in parsed_payload:
286
+ replacements = parsed_payload["replacements"]
287
+ return replace_in_file(context, file_path, replacements)
288
+ if "content" in parsed_payload:
289
+ content = parsed_payload["content"]
290
+ overwrite = bool(parsed_payload.get("overwrite", False))
291
+ file_exists = os.path.exists(file_path)
292
+ if file_exists and not overwrite:
293
+ return {
294
+ "success": False,
295
+ "path": file_path,
296
+ "message": f"File '{file_path}' exists. Set 'overwrite': true to replace.",
297
+ "changed": False,
298
+ }
299
+ return write_to_file(context, file_path, content, overwrite)
300
+ return write_to_file(context, file_path, diff, overwrite=False)
301
+ except Exception as e:
302
+ console.print(
303
+ "[bold red] Unable to route file modification tool call to sub-tool [/bold red]"
304
+ )
305
+ console.print(str(e))
306
+ return {
307
+ "success": False,
308
+ "path": file_path,
309
+ "message": f"Something went wrong in file editing: {str(e)}",
310
+ "changed": False,
311
+ }
312
+
313
+
314
+ def _delete_file(context: RunContext, file_path: str) -> Dict[str, Any]:
315
+ console.log(f"🗑️ Deleting file [bold red]{file_path}[/bold red]")
316
+ file_path = os.path.abspath(file_path)
317
+ try:
318
+ if not os.path.exists(file_path) or not os.path.isfile(file_path):
319
+ res = {"error": f"File '{file_path}' does not exist.", "diff": ""}
320
+ else:
321
+ with open(file_path, "r", encoding="utf-8") as f:
322
+ original = f.read()
323
+ diff_text = "".join(
324
+ difflib.unified_diff(
325
+ original.splitlines(keepends=True),
326
+ [],
327
+ fromfile=f"a/{os.path.basename(file_path)}",
328
+ tofile=f"b/{os.path.basename(file_path)}",
329
+ n=3,
330
+ )
331
+ )
332
+ os.remove(file_path)
333
+ res = {
334
+ "success": True,
335
+ "path": file_path,
336
+ "message": f"File '{file_path}' deleted successfully.",
337
+ "changed": True,
338
+ "diff": diff_text,
316
339
  }
340
+ except Exception as exc:
341
+ _log_error("Unhandled exception in delete_file", exc)
342
+ res = {"error": str(exc), "diff": ""}
343
+ _print_diff(res.get("diff", ""))
344
+ return res
345
+
346
+
347
+ def register_file_modifications_tools(agent):
348
+ """Attach file-editing tools to *agent* with mandatory diff rendering."""
349
+
350
+ @agent.tool(retries=5)
351
+ def edit_file(context: RunContext, path: str, diff: str) -> Dict[str, Any]:
352
+ return _edit_file(context, path, diff)
317
353
 
318
- @agent.tool
354
+ @agent.tool(retries=5)
319
355
  def delete_file(context: RunContext, file_path: str) -> Dict[str, Any]:
320
- console.log(f"🗑️ Deleting file [bold red]{file_path}[/bold red]")
321
- file_path = os.path.abspath(file_path)
322
- try:
323
- if not os.path.exists(file_path) or not os.path.isfile(file_path):
324
- res = {"error": f"File '{file_path}' does not exist.", "diff": ""}
325
- else:
326
- with open(file_path, "r", encoding="utf-8") as f:
327
- original = f.read()
328
- diff_text = "".join(
329
- difflib.unified_diff(
330
- original.splitlines(keepends=True),
331
- [],
332
- fromfile=f"a/{os.path.basename(file_path)}",
333
- tofile=f"b/{os.path.basename(file_path)}",
334
- n=3,
335
- )
336
- )
337
- os.remove(file_path)
338
- res = {
339
- "success": True,
340
- "path": file_path,
341
- "message": f"File '{file_path}' deleted successfully.",
342
- "changed": True,
343
- "diff": diff_text,
344
- }
345
- except Exception as exc:
346
- _log_error("Unhandled exception in delete_file", exc)
347
- res = {"error": str(exc), "diff": ""}
348
- _print_diff(res.get("diff", ""))
349
- return res
356
+ return _delete_file(context, file_path)
@@ -12,16 +12,33 @@ from code_puppy.tools.common import console
12
12
  # ---------------------------------------------------------------------------
13
13
  IGNORE_PATTERNS = [
14
14
  "**/node_modules/**",
15
+ "**/node_modules/**/*.js",
16
+ "node_modules/**",
17
+ "node_modules",
15
18
  "**/.git/**",
19
+ "**/.git",
20
+ ".git/**",
21
+ ".git",
16
22
  "**/__pycache__/**",
23
+ "**/__pycache__",
24
+ "__pycache__/**",
25
+ "__pycache__",
17
26
  "**/.DS_Store",
27
+ ".DS_Store",
18
28
  "**/.env",
29
+ ".env",
19
30
  "**/.venv/**",
31
+ "**/.venv",
20
32
  "**/venv/**",
33
+ "**/venv",
21
34
  "**/.idea/**",
35
+ "**/.idea",
22
36
  "**/.vscode/**",
37
+ "**/.vscode",
23
38
  "**/dist/**",
39
+ "**/dist",
24
40
  "**/build/**",
41
+ "**/build",
25
42
  "**/*.pyc",
26
43
  "**/*.pyo",
27
44
  "**/*.pyd",
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "code-puppy"
7
- version = "0.0.59"
7
+ version = "0.0.60"
8
8
  description = "Code generation agent"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.10"
@@ -24,6 +24,12 @@ dependencies = [
24
24
  "rapidfuzz>=3.13.0",
25
25
  "json-repair>=0.46.2",
26
26
  ]
27
+ dev-dependencies = [
28
+ "pytest>=8.3.4",
29
+ "pytest-cov>=6.1.1",
30
+ "pytest-asyncio>=0.23.1",
31
+ "ruff>=0.11.11",
32
+ ]
27
33
  authors = [
28
34
  {name = "Michael Pfaffenberger"}
29
35
  ]
File without changes
File without changes
File without changes