code-puppy 0.0.80__py3-none-any.whl → 0.0.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +5 -3
- code_puppy/agent.py +1 -1
- code_puppy/message_history_processor.py +152 -61
- code_puppy/model_factory.py +74 -2
- code_puppy/models.json +73 -73
- code_puppy/state_management.py +13 -1
- code_puppy/summarization_agent.py +72 -0
- code_puppy/tools/command_runner.py +4 -6
- code_puppy/tools/file_modifications.py +25 -4
- code_puppy/tools/file_operations.py +22 -18
- {code_puppy-0.0.80.data → code_puppy-0.0.82.data}/data/code_puppy/models.json +73 -73
- {code_puppy-0.0.80.dist-info → code_puppy-0.0.82.dist-info}/METADATA +11 -17
- {code_puppy-0.0.80.dist-info → code_puppy-0.0.82.dist-info}/RECORD +16 -15
- {code_puppy-0.0.80.dist-info → code_puppy-0.0.82.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.80.dist-info → code_puppy-0.0.82.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.80.dist-info → code_puppy-0.0.82.dist-info}/licenses/LICENSE +0 -0
code_puppy/__init__.py
CHANGED
code_puppy/agent.py
CHANGED
|
@@ -1,78 +1,169 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import queue
|
|
2
3
|
from typing import List
|
|
4
|
+
import os
|
|
5
|
+
from pathlib import Path
|
|
3
6
|
|
|
4
|
-
|
|
7
|
+
import pydantic
|
|
8
|
+
import tiktoken
|
|
9
|
+
from pydantic_ai.messages import ModelMessage, ToolCallPart, ToolReturnPart, UserPromptPart, TextPart, ModelRequest, ModelResponse
|
|
5
10
|
|
|
6
11
|
from code_puppy.config import get_message_history_limit
|
|
7
12
|
from code_puppy.tools.common import console
|
|
13
|
+
from code_puppy.model_factory import ModelFactory
|
|
14
|
+
from code_puppy.config import get_model_name
|
|
8
15
|
|
|
16
|
+
# Import summarization agent
|
|
17
|
+
try:
|
|
18
|
+
from code_puppy.summarization_agent import get_summarization_agent as _get_summarization_agent
|
|
19
|
+
SUMMARIZATION_AVAILABLE = True
|
|
20
|
+
|
|
21
|
+
# Make the function available in this module's namespace for mocking
|
|
22
|
+
def get_summarization_agent():
|
|
23
|
+
return _get_summarization_agent()
|
|
24
|
+
|
|
25
|
+
except ImportError:
|
|
26
|
+
SUMMARIZATION_AVAILABLE = False
|
|
27
|
+
console.print("[yellow]Warning: Summarization agent not available. Message history will be truncated instead of summarized.[/yellow]")
|
|
28
|
+
def get_summarization_agent():
|
|
29
|
+
return None
|
|
9
30
|
|
|
10
|
-
|
|
31
|
+
|
|
32
|
+
def get_tokenizer_for_model(model_name: str):
|
|
11
33
|
"""
|
|
12
|
-
|
|
34
|
+
Always use cl100k_base tokenizer regardless of model type.
|
|
35
|
+
This is a simple approach that works reasonably well for most models.
|
|
36
|
+
"""
|
|
37
|
+
return tiktoken.get_encoding("cl100k_base")
|
|
13
38
|
|
|
14
|
-
This implementation:
|
|
15
|
-
- Uses the configurable message_history_limit from puppy.cfg (defaults to 40)
|
|
16
|
-
- Preserves system messages at the beginning
|
|
17
|
-
- Maintains tool call/response pairs together
|
|
18
|
-
- Follows PydanticAI best practices for message ordering
|
|
19
39
|
|
|
40
|
+
def stringify_message_part(part) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Convert a message part to a string representation for token estimation or other uses.
|
|
43
|
+
|
|
20
44
|
Args:
|
|
21
|
-
|
|
22
|
-
|
|
45
|
+
part: A message part that may contain content or be a tool call
|
|
46
|
+
|
|
23
47
|
Returns:
|
|
24
|
-
|
|
48
|
+
String representation of the message part
|
|
25
49
|
"""
|
|
26
|
-
|
|
27
|
-
|
|
50
|
+
result = ""
|
|
51
|
+
if hasattr(part, "part_kind"):
|
|
52
|
+
result += part.part_kind + ": "
|
|
53
|
+
else:
|
|
54
|
+
result += str(type(part)) + ": "
|
|
28
55
|
|
|
29
|
-
#
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
56
|
+
# Handle content
|
|
57
|
+
if hasattr(part, 'content') and part.content:
|
|
58
|
+
# Handle different content types
|
|
59
|
+
if isinstance(part.content, str):
|
|
60
|
+
result = part.content
|
|
61
|
+
elif isinstance(part.content, pydantic.BaseModel):
|
|
62
|
+
result = json.dumps(part.content.model_dump())
|
|
63
|
+
elif isinstance(part.content, dict):
|
|
64
|
+
result = json.dumps(part.content)
|
|
65
|
+
else:
|
|
66
|
+
result = str(part.content)
|
|
67
|
+
|
|
68
|
+
# Handle tool calls which may have additional token costs
|
|
69
|
+
# If part also has content, we'll process tool calls separately
|
|
70
|
+
if hasattr(part, 'tool_name') and part.tool_name:
|
|
71
|
+
# Estimate tokens for tool name and parameters
|
|
72
|
+
tool_text = part.tool_name
|
|
73
|
+
if hasattr(part, "args"):
|
|
74
|
+
tool_text += f" {str(part.args)}"
|
|
75
|
+
result += tool_text
|
|
76
|
+
|
|
77
|
+
return result
|
|
34
78
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
79
|
+
|
|
80
|
+
def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
81
|
+
"""
|
|
82
|
+
Estimate the number of tokens in a message using tiktoken with cl100k_base encoding.
|
|
83
|
+
This is more accurate than character-based estimation.
|
|
84
|
+
"""
|
|
85
|
+
tokenizer = get_tokenizer_for_model(get_model_name())
|
|
86
|
+
total_tokens = 0
|
|
87
|
+
|
|
88
|
+
for part in message.parts:
|
|
89
|
+
part_str = stringify_message_part(part)
|
|
90
|
+
if part_str:
|
|
91
|
+
tokens = tokenizer.encode(part_str)
|
|
92
|
+
total_tokens += len(tokens)
|
|
93
|
+
|
|
94
|
+
return max(1, total_tokens)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def summarize_messages(messages: List[ModelMessage]) -> ModelMessage:
|
|
98
|
+
|
|
99
|
+
# Get the summarization agent
|
|
100
|
+
summarization_agent = get_summarization_agent()
|
|
101
|
+
message_strings = []
|
|
102
|
+
|
|
103
|
+
for message in messages:
|
|
104
|
+
for part in message.parts:
|
|
105
|
+
message_strings.append(stringify_message_part(part))
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
summary_string = "\n".join(message_strings)
|
|
109
|
+
instructions = (
|
|
110
|
+
"Above I've given you a log of Agentic AI steps that have been taken"
|
|
111
|
+
" as well as user queries, etc. Summarize the contents of these steps."
|
|
112
|
+
" The high level details should remain but the bulk of the content from tool-call"
|
|
113
|
+
" responses should be compacted and summarized. For example if you see a tool-call"
|
|
114
|
+
" reading a file, and the file contents are large, then in your summary you might just"
|
|
115
|
+
" write: * used read_file on space_invaders.cpp - contents removed."
|
|
116
|
+
"\n Make sure your result is a bulleted list of all steps and interactions."
|
|
64
117
|
)
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
118
|
+
try:
|
|
119
|
+
# Run the summarization agent
|
|
120
|
+
result = summarization_agent.run_sync(f"{summary_string}\n{instructions}")
|
|
121
|
+
|
|
122
|
+
# Create a new message with the summarized content
|
|
123
|
+
summarized_parts = [TextPart(result.output)]
|
|
124
|
+
summarized_message = ModelResponse(parts=summarized_parts)
|
|
125
|
+
return summarized_message
|
|
126
|
+
except Exception as e:
|
|
127
|
+
console.print(f"Summarization failed during compaction: {e}")
|
|
128
|
+
# Return original message if summarization fails
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def get_model_context_length() -> int:
|
|
133
|
+
"""
|
|
134
|
+
Get the context length for the currently configured model from models.json
|
|
135
|
+
"""
|
|
136
|
+
# Load model configuration
|
|
137
|
+
models_path = os.environ.get("MODELS_JSON_PATH")
|
|
138
|
+
if not models_path:
|
|
139
|
+
models_path = Path(__file__).parent / "models.json"
|
|
140
|
+
else:
|
|
141
|
+
models_path = Path(models_path)
|
|
142
|
+
|
|
143
|
+
model_configs = ModelFactory.load_config(str(models_path))
|
|
144
|
+
model_name = get_model_name()
|
|
145
|
+
|
|
146
|
+
# Get context length from model config
|
|
147
|
+
model_config = model_configs.get(model_name, {})
|
|
148
|
+
context_length = model_config.get("context_length", 128000) # Default value
|
|
149
|
+
|
|
150
|
+
# Reserve 10% of context for response
|
|
151
|
+
return int(context_length)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
155
|
+
|
|
156
|
+
total_current_tokens = sum(estimate_tokens_for_message(msg) for msg in messages)
|
|
157
|
+
|
|
158
|
+
model_max = get_model_context_length()
|
|
159
|
+
|
|
160
|
+
proportion_used = total_current_tokens / model_max
|
|
161
|
+
console.print(f"[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used}")
|
|
162
|
+
|
|
163
|
+
if proportion_used > 0.9:
|
|
164
|
+
summary = summarize_messages(messages)
|
|
165
|
+
result_messages = [messages[0], summary]
|
|
166
|
+
final_token_count = sum(estimate_tokens_for_message(msg) for msg in result_messages)
|
|
167
|
+
console.print(f"Final token count after processing: {final_token_count}")
|
|
168
|
+
return result_messages
|
|
169
|
+
return messages
|
code_puppy/model_factory.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import os
|
|
3
|
+
import random
|
|
3
4
|
from typing import Any, Dict
|
|
4
5
|
|
|
5
6
|
import httpx
|
|
@@ -13,6 +14,8 @@ from pydantic_ai.providers.google_gla import GoogleGLAProvider
|
|
|
13
14
|
from pydantic_ai.providers.openai import OpenAIProvider
|
|
14
15
|
from pydantic_ai.providers.openrouter import OpenRouterProvider
|
|
15
16
|
|
|
17
|
+
from code_puppy.tools.common import console
|
|
18
|
+
|
|
16
19
|
# Environment variables used in this module:
|
|
17
20
|
# - GEMINI_API_KEY: API key for Google's Gemini models. Required when using Gemini models.
|
|
18
21
|
# - OPENAI_API_KEY: API key for OpenAI models. Required when using OpenAI models or custom_openai endpoints.
|
|
@@ -23,6 +26,53 @@ from pydantic_ai.providers.openrouter import OpenRouterProvider
|
|
|
23
26
|
# Example: "X-Api-Key": "$OPENAI_API_KEY" will use the value from os.environ.get("OPENAI_API_KEY")
|
|
24
27
|
|
|
25
28
|
|
|
29
|
+
def build_proxy_dict(proxy):
|
|
30
|
+
proxy_tokens = proxy.split(":")
|
|
31
|
+
structure = "{}:{}@{}:{}".format(
|
|
32
|
+
proxy_tokens[2], proxy_tokens[3], proxy_tokens[0], proxy_tokens[1]
|
|
33
|
+
)
|
|
34
|
+
proxies = {
|
|
35
|
+
"http": "http://{}/".format(structure),
|
|
36
|
+
"https": "http://{}".format(structure),
|
|
37
|
+
}
|
|
38
|
+
return proxies
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def build_httpx_proxy(proxy):
|
|
42
|
+
"""Build an httpx.Proxy object from a proxy string in format ip:port:username:password"""
|
|
43
|
+
proxy_tokens = proxy.split(":")
|
|
44
|
+
if len(proxy_tokens) != 4:
|
|
45
|
+
raise ValueError(f"Invalid proxy format: {proxy}. Expected format: ip:port:username:password")
|
|
46
|
+
|
|
47
|
+
ip, port, username, password = proxy_tokens
|
|
48
|
+
proxy_url = f"http://{ip}:{port}"
|
|
49
|
+
proxy_auth = (username, password)
|
|
50
|
+
|
|
51
|
+
# Log the proxy being used
|
|
52
|
+
console.log(f"Using proxy: {proxy_url} with username: {username}")
|
|
53
|
+
|
|
54
|
+
return httpx.Proxy(url=proxy_url, auth=proxy_auth)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_random_proxy_from_file(file_path):
|
|
58
|
+
"""Reads proxy file and returns a random proxy formatted for httpx.AsyncClient"""
|
|
59
|
+
if not os.path.exists(file_path):
|
|
60
|
+
raise ValueError(f"Proxy file '{file_path}' not found.")
|
|
61
|
+
|
|
62
|
+
with open(file_path, "r") as f:
|
|
63
|
+
proxies = [line.strip() for line in f.readlines() if line.strip()]
|
|
64
|
+
|
|
65
|
+
if not proxies:
|
|
66
|
+
raise ValueError(f"Proxy file '{file_path}' is empty or contains only whitespace.")
|
|
67
|
+
|
|
68
|
+
selected_proxy = random.choice(proxies)
|
|
69
|
+
try:
|
|
70
|
+
return build_httpx_proxy(selected_proxy)
|
|
71
|
+
except ValueError as e:
|
|
72
|
+
console.log(f"Warning: Malformed proxy '{selected_proxy}' found in file '{file_path}', ignoring and continuing without proxy.")
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
|
|
26
76
|
def get_custom_config(model_config):
|
|
27
77
|
custom_config = model_config.get("custom_endpoint", {})
|
|
28
78
|
if not custom_config:
|
|
@@ -97,7 +147,18 @@ class ModelFactory:
|
|
|
97
147
|
|
|
98
148
|
elif model_type == "custom_anthropic":
|
|
99
149
|
url, headers, ca_certs_path, api_key = get_custom_config(model_config)
|
|
100
|
-
|
|
150
|
+
|
|
151
|
+
# Check for proxy configuration
|
|
152
|
+
proxy_file_path = os.environ.get("CODE_PUPPY_PROXIES")
|
|
153
|
+
proxy = None
|
|
154
|
+
if proxy_file_path:
|
|
155
|
+
proxy = get_random_proxy_from_file(proxy_file_path)
|
|
156
|
+
|
|
157
|
+
# Only pass proxy to client if it's valid
|
|
158
|
+
client_args = {"headers": headers, "verify": ca_certs_path}
|
|
159
|
+
if proxy is not None:
|
|
160
|
+
client_args["proxy"] = proxy
|
|
161
|
+
client = httpx.AsyncClient(**client_args)
|
|
101
162
|
anthropic_client = AsyncAnthropic(
|
|
102
163
|
base_url=url,
|
|
103
164
|
http_client=client,
|
|
@@ -162,7 +223,18 @@ class ModelFactory:
|
|
|
162
223
|
|
|
163
224
|
elif model_type == "custom_openai":
|
|
164
225
|
url, headers, ca_certs_path, api_key = get_custom_config(model_config)
|
|
165
|
-
|
|
226
|
+
|
|
227
|
+
# Check for proxy configuration
|
|
228
|
+
proxy_file_path = os.environ.get("CODE_PUPPY_PROXIES")
|
|
229
|
+
proxy = None
|
|
230
|
+
if proxy_file_path:
|
|
231
|
+
proxy = get_random_proxy_from_file(proxy_file_path)
|
|
232
|
+
|
|
233
|
+
# Only pass proxy to client if it's valid
|
|
234
|
+
client_args = {"headers": headers, "verify": ca_certs_path}
|
|
235
|
+
if proxy is not None:
|
|
236
|
+
client_args["proxy"] = proxy
|
|
237
|
+
client = httpx.AsyncClient(**client_args)
|
|
166
238
|
provider_args = dict(
|
|
167
239
|
base_url=url,
|
|
168
240
|
http_client=client,
|
code_puppy/models.json
CHANGED
|
@@ -1,45 +1,59 @@
|
|
|
1
1
|
{
|
|
2
|
-
"gemini-2.5-flash-preview-05-20": {
|
|
3
|
-
"type": "gemini",
|
|
4
|
-
"name": "gemini-2.5-flash-preview-05-20"
|
|
5
|
-
},
|
|
6
|
-
"gpt-4.1": {
|
|
7
|
-
"type": "openai",
|
|
8
|
-
"name": "gpt-4.1"
|
|
9
|
-
},
|
|
10
|
-
"gpt-4.1-mini": {
|
|
11
|
-
"type": "openai",
|
|
12
|
-
"name": "gpt-4.1-mini"
|
|
13
|
-
},
|
|
14
2
|
"gpt-5": {
|
|
15
3
|
"type": "openai",
|
|
16
|
-
"name": "gpt-5"
|
|
4
|
+
"name": "gpt-5",
|
|
5
|
+
"context_length": 400000
|
|
17
6
|
},
|
|
18
|
-
"
|
|
19
|
-
"type": "
|
|
20
|
-
"name": "
|
|
7
|
+
"Cerebras-Qwen3-Coder-480b": {
|
|
8
|
+
"type": "custom_openai",
|
|
9
|
+
"name": "qwen-3-coder-480b",
|
|
10
|
+
"custom_endpoint": {
|
|
11
|
+
"url": "https://api.cerebras.ai/v1",
|
|
12
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
13
|
+
},
|
|
14
|
+
"context_length": 131072
|
|
21
15
|
},
|
|
22
|
-
"
|
|
23
|
-
"type": "
|
|
24
|
-
"name": "
|
|
16
|
+
"Cerebras-Qwen3-235b-a22b-instruct-2507": {
|
|
17
|
+
"type": "custom_openai",
|
|
18
|
+
"name": "qwen-3-235b-a22b-instruct-2507",
|
|
19
|
+
"custom_endpoint": {
|
|
20
|
+
"url": "https://api.cerebras.ai/v1",
|
|
21
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
22
|
+
},
|
|
23
|
+
"context_length": 64000
|
|
25
24
|
},
|
|
26
|
-
"gpt-
|
|
25
|
+
"Cerebras-gpt-oss-120b": {
|
|
27
26
|
"type": "custom_openai",
|
|
28
|
-
"name": "gpt-
|
|
27
|
+
"name": "gpt-oss-120b",
|
|
29
28
|
"custom_endpoint": {
|
|
30
|
-
"url": "https://
|
|
31
|
-
"
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
"ca_certs_path": "/path/to/cert.pem"
|
|
35
|
-
}
|
|
29
|
+
"url": "https://api.cerebras.ai/v1",
|
|
30
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
31
|
+
},
|
|
32
|
+
"context_length": 131072
|
|
36
33
|
},
|
|
37
|
-
"
|
|
34
|
+
"Cerebras-Qwen-3-32b": {
|
|
38
35
|
"type": "custom_openai",
|
|
39
|
-
"name": "
|
|
36
|
+
"name": "qwen-3-32b",
|
|
40
37
|
"custom_endpoint": {
|
|
41
|
-
"url": "
|
|
42
|
-
|
|
38
|
+
"url": "https://api.cerebras.ai/v1",
|
|
39
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
40
|
+
},
|
|
41
|
+
"context_length": 65536
|
|
42
|
+
},
|
|
43
|
+
"o3": {
|
|
44
|
+
"type": "openai",
|
|
45
|
+
"name": "o3",
|
|
46
|
+
"context_length": 200000
|
|
47
|
+
},
|
|
48
|
+
"gemini-2.5-flash-preview-05-20": {
|
|
49
|
+
"type": "gemini",
|
|
50
|
+
"name": "gemini-2.5-flash-preview-05-20",
|
|
51
|
+
"context_length": 1048576
|
|
52
|
+
},
|
|
53
|
+
"gpt-4.1": {
|
|
54
|
+
"type": "openai",
|
|
55
|
+
"name": "gpt-4.1",
|
|
56
|
+
"context_length": 1000000
|
|
43
57
|
},
|
|
44
58
|
"Qwen/Qwen3-235B-A22B-fp8-tput": {
|
|
45
59
|
"type": "custom_openai",
|
|
@@ -47,65 +61,51 @@
|
|
|
47
61
|
"custom_endpoint": {
|
|
48
62
|
"url": "https://api.together.xyz/v1",
|
|
49
63
|
"api_key": "$TOGETHER_API_KEY"
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
"grok-3-mini-fast": {
|
|
53
|
-
"type": "custom_openai",
|
|
54
|
-
"name": "grok-3-mini-fast",
|
|
55
|
-
"custom_endpoint": {
|
|
56
|
-
"url": "https://api.x.ai/v1",
|
|
57
|
-
"api_key": "$XAI_API_KEY"
|
|
58
|
-
}
|
|
64
|
+
},
|
|
65
|
+
"context_length": 64000
|
|
59
66
|
},
|
|
60
67
|
"openrouter": {
|
|
61
68
|
"type": "openrouter",
|
|
62
69
|
"name": "meta-llama/llama-4-maverick:free",
|
|
63
|
-
"api_key": "$OPENROUTER_API_KEY"
|
|
70
|
+
"api_key": "$OPENROUTER_API_KEY",
|
|
71
|
+
"context_length": 131072
|
|
64
72
|
},
|
|
65
73
|
"azure-gpt-4.1": {
|
|
66
74
|
"type": "azure_openai",
|
|
67
75
|
"name": "gpt-4.1",
|
|
68
76
|
"api_version": "2024-12-01-preview",
|
|
69
77
|
"api_key": "$AZURE_OPENAI_API_KEY",
|
|
70
|
-
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT"
|
|
78
|
+
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT",
|
|
79
|
+
"context_length": 128000
|
|
71
80
|
},
|
|
72
|
-
|
|
73
|
-
"type": "
|
|
74
|
-
"name": "
|
|
75
|
-
"
|
|
76
|
-
"api_key": "$AZURE_OPENAI_API_KEY",
|
|
77
|
-
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT"
|
|
78
|
-
},
|
|
79
|
-
"Cerebras-Qwen3-Coder-480b": {
|
|
80
|
-
"type": "custom_openai",
|
|
81
|
-
"name": "qwen-3-coder-480b",
|
|
82
|
-
"custom_endpoint": {
|
|
83
|
-
"url": "https://api.cerebras.ai/v1",
|
|
84
|
-
"api_key": "$CEREBRAS_API_KEY"
|
|
85
|
-
}
|
|
81
|
+
"gpt-4.1-mini": {
|
|
82
|
+
"type": "openai",
|
|
83
|
+
"name": "gpt-4.1-mini",
|
|
84
|
+
"context_length": 128000
|
|
86
85
|
},
|
|
87
|
-
"
|
|
88
|
-
"type": "
|
|
89
|
-
"name": "
|
|
90
|
-
"
|
|
91
|
-
"url": "https://api.cerebras.ai/v1",
|
|
92
|
-
"api_key": "$CEREBRAS_API_KEY"
|
|
93
|
-
}
|
|
86
|
+
"gpt-4.1-nano": {
|
|
87
|
+
"type": "openai",
|
|
88
|
+
"name": "gpt-4.1-nano",
|
|
89
|
+
"context_length": 128000
|
|
94
90
|
},
|
|
95
|
-
"
|
|
91
|
+
"gpt-4.1-custom": {
|
|
96
92
|
"type": "custom_openai",
|
|
97
|
-
"name": "gpt-
|
|
93
|
+
"name": "gpt-4.1-custom",
|
|
98
94
|
"custom_endpoint": {
|
|
99
|
-
"url": "https://
|
|
100
|
-
"
|
|
101
|
-
|
|
95
|
+
"url": "https://my.cute.endpoint:8080",
|
|
96
|
+
"headers": {
|
|
97
|
+
"X-Api-Key": "$OPENAI_API_KEY"
|
|
98
|
+
},
|
|
99
|
+
"ca_certs_path": "/path/to/cert.pem"
|
|
100
|
+
},
|
|
101
|
+
"context_length": 128000
|
|
102
102
|
},
|
|
103
|
-
"
|
|
103
|
+
"ollama-llama3.3": {
|
|
104
104
|
"type": "custom_openai",
|
|
105
|
-
"name": "
|
|
105
|
+
"name": "llama3.3",
|
|
106
106
|
"custom_endpoint": {
|
|
107
|
-
"url": "
|
|
108
|
-
|
|
109
|
-
|
|
107
|
+
"url": "http://localhost:11434/v1"
|
|
108
|
+
},
|
|
109
|
+
"context_length": 8192
|
|
110
110
|
}
|
|
111
111
|
}
|
code_puppy/state_management.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
from typing import Any, List
|
|
2
2
|
|
|
3
3
|
from code_puppy.tools.common import console
|
|
4
|
+
from code_puppy.message_history_processor import message_history_processor
|
|
4
5
|
|
|
5
6
|
_message_history: List[Any] = []
|
|
6
7
|
|
|
@@ -35,8 +36,19 @@ def hash_message(message):
|
|
|
35
36
|
|
|
36
37
|
|
|
37
38
|
def message_history_accumulator(messages: List[Any]):
|
|
39
|
+
global _message_history
|
|
40
|
+
|
|
38
41
|
message_history_hashes = set([hash_message(m) for m in _message_history])
|
|
39
42
|
for msg in messages:
|
|
40
43
|
if hash_message(msg) not in message_history_hashes:
|
|
41
44
|
_message_history.append(msg)
|
|
42
|
-
|
|
45
|
+
|
|
46
|
+
# Apply message history trimming using the main processor
|
|
47
|
+
# This ensures we maintain global state while still managing context limits
|
|
48
|
+
trimmed_messages = message_history_processor(_message_history)
|
|
49
|
+
|
|
50
|
+
# Update our global state with the trimmed version
|
|
51
|
+
# This preserves the state but keeps us within token limits
|
|
52
|
+
_message_history = trimmed_messages
|
|
53
|
+
|
|
54
|
+
return _message_history
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import pydantic
|
|
5
|
+
from pydantic_ai import Agent
|
|
6
|
+
from pydantic_ai.mcp import MCPServerSSE
|
|
7
|
+
|
|
8
|
+
from code_puppy.model_factory import ModelFactory
|
|
9
|
+
from code_puppy.tools.common import console
|
|
10
|
+
|
|
11
|
+
# Environment variables used in this module:
|
|
12
|
+
# - MODELS_JSON_PATH: Optional path to a custom models.json configuration file.
|
|
13
|
+
# If not set, uses the default file in the package directory.
|
|
14
|
+
# - MODEL_NAME: The model to use for code generation. Defaults to "gpt-4o".
|
|
15
|
+
# Must match a key in the models.json configuration.
|
|
16
|
+
|
|
17
|
+
MODELS_JSON_PATH = os.environ.get("MODELS_JSON_PATH", None)
|
|
18
|
+
|
|
19
|
+
_LAST_MODEL_NAME = None
|
|
20
|
+
_summarization_agent = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def reload_summarization_agent():
|
|
24
|
+
"""Create a specialized agent for summarizing messages when context limit is reached."""
|
|
25
|
+
global _summarization_agent, _LAST_MODEL_NAME
|
|
26
|
+
from code_puppy.config import get_model_name
|
|
27
|
+
|
|
28
|
+
model_name = get_model_name()
|
|
29
|
+
console.print(f"[bold cyan]Loading Summarization Model: {model_name}[/bold cyan]")
|
|
30
|
+
models_path = (
|
|
31
|
+
Path(MODELS_JSON_PATH)
|
|
32
|
+
if MODELS_JSON_PATH
|
|
33
|
+
else Path(__file__).parent / "models.json"
|
|
34
|
+
)
|
|
35
|
+
model = ModelFactory.get_model(model_name, ModelFactory.load_config(models_path))
|
|
36
|
+
|
|
37
|
+
# Specialized instructions for summarization
|
|
38
|
+
instructions = """You are a message summarization expert. Your task is to summarize conversation messages
|
|
39
|
+
while preserving important context and information. The summaries should be concise but capture the essential
|
|
40
|
+
content and intent of the original messages. This is to help manage token usage in a conversation history
|
|
41
|
+
while maintaining context for the AI to continue the conversation effectively.
|
|
42
|
+
|
|
43
|
+
When summarizing:
|
|
44
|
+
1. Keep summary brief but informative
|
|
45
|
+
2. Preserve key information and decisions
|
|
46
|
+
3. Keep any important technical details
|
|
47
|
+
4. Don't summarize the system message
|
|
48
|
+
5. Make sure all tool calls and responses are summarized, as they are vital"""
|
|
49
|
+
|
|
50
|
+
agent = Agent(
|
|
51
|
+
model=model,
|
|
52
|
+
instructions=instructions,
|
|
53
|
+
output_type=str,
|
|
54
|
+
retries=1 # Fewer retries for summarization
|
|
55
|
+
)
|
|
56
|
+
_summarization_agent = agent
|
|
57
|
+
_LAST_MODEL_NAME = model_name
|
|
58
|
+
return _summarization_agent
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def get_summarization_agent(force_reload=False):
|
|
62
|
+
"""
|
|
63
|
+
Retrieve the summarization agent with the currently set MODEL_NAME.
|
|
64
|
+
Forces a reload if the model has changed, or if force_reload is passed.
|
|
65
|
+
"""
|
|
66
|
+
global _summarization_agent, _LAST_MODEL_NAME
|
|
67
|
+
from code_puppy.config import get_model_name
|
|
68
|
+
|
|
69
|
+
model_name = get_model_name()
|
|
70
|
+
if _summarization_agent is None or _LAST_MODEL_NAME != model_name or force_reload:
|
|
71
|
+
return reload_summarization_agent()
|
|
72
|
+
return _summarization_agent
|
|
@@ -164,21 +164,19 @@ def run_shell_command(
|
|
|
164
164
|
|
|
165
165
|
class ReasoningOutput(BaseModel):
|
|
166
166
|
success: bool = True
|
|
167
|
-
reasoning: str = ""
|
|
168
|
-
next_steps: str = ""
|
|
169
167
|
|
|
170
168
|
|
|
171
169
|
def share_your_reasoning(
|
|
172
|
-
context: RunContext, reasoning: str, next_steps: str = None
|
|
170
|
+
context: RunContext, reasoning: str, next_steps: str | None = None
|
|
173
171
|
) -> ReasoningOutput:
|
|
174
172
|
console.print("\n[bold white on purple] AGENT REASONING [/bold white on purple]")
|
|
175
173
|
console.print("[bold cyan]Current reasoning:[/bold cyan]")
|
|
176
174
|
console.print(Markdown(reasoning))
|
|
177
|
-
if next_steps and next_steps.strip():
|
|
175
|
+
if next_steps is not None and next_steps.strip():
|
|
178
176
|
console.print("\n[bold cyan]Planned next steps:[/bold cyan]")
|
|
179
177
|
console.print(Markdown(next_steps))
|
|
180
178
|
console.print("[dim]" + "-" * 60 + "[/dim]\n")
|
|
181
|
-
return ReasoningOutput(**{"success": True
|
|
179
|
+
return ReasoningOutput(**{"success": True})
|
|
182
180
|
|
|
183
181
|
|
|
184
182
|
def register_command_runner_tools(agent):
|
|
@@ -190,6 +188,6 @@ def register_command_runner_tools(agent):
|
|
|
190
188
|
|
|
191
189
|
@agent.tool
|
|
192
190
|
def agent_share_your_reasoning(
|
|
193
|
-
context: RunContext, reasoning: str, next_steps: str = None
|
|
191
|
+
context: RunContext, reasoning: str, next_steps: str | None = None
|
|
194
192
|
) -> ReasoningOutput:
|
|
195
193
|
return share_your_reasoning(context, reasoning, next_steps)
|
|
@@ -58,12 +58,21 @@ def _delete_snippet_from_file(
|
|
|
58
58
|
diff_text = ""
|
|
59
59
|
try:
|
|
60
60
|
if not os.path.exists(file_path) or not os.path.isfile(file_path):
|
|
61
|
-
return {
|
|
61
|
+
return {
|
|
62
|
+
"success": False,
|
|
63
|
+
"path": file_path,
|
|
64
|
+
"message": f"File '{file_path}' does not exist.",
|
|
65
|
+
"changed": False,
|
|
66
|
+
"diff": diff_text,
|
|
67
|
+
}
|
|
62
68
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
63
69
|
original = f.read()
|
|
64
70
|
if snippet not in original:
|
|
65
71
|
return {
|
|
66
|
-
"
|
|
72
|
+
"success": False,
|
|
73
|
+
"path": file_path,
|
|
74
|
+
"message": f"Snippet not found in file '{file_path}'.",
|
|
75
|
+
"changed": False,
|
|
67
76
|
"diff": diff_text,
|
|
68
77
|
}
|
|
69
78
|
modified = original.replace(snippet, "")
|
|
@@ -317,7 +326,13 @@ def _delete_file(context: RunContext, file_path: str = "") -> Dict[str, Any]:
|
|
|
317
326
|
file_path = os.path.abspath(file_path)
|
|
318
327
|
try:
|
|
319
328
|
if not os.path.exists(file_path) or not os.path.isfile(file_path):
|
|
320
|
-
res = {
|
|
329
|
+
res = {
|
|
330
|
+
"success": False,
|
|
331
|
+
"path": file_path,
|
|
332
|
+
"message": f"File '{file_path}' does not exist.",
|
|
333
|
+
"changed": False,
|
|
334
|
+
"diff": "",
|
|
335
|
+
}
|
|
321
336
|
else:
|
|
322
337
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
323
338
|
original = f.read()
|
|
@@ -340,7 +355,13 @@ def _delete_file(context: RunContext, file_path: str = "") -> Dict[str, Any]:
|
|
|
340
355
|
}
|
|
341
356
|
except Exception as exc:
|
|
342
357
|
_log_error("Unhandled exception in delete_file", exc)
|
|
343
|
-
res = {
|
|
358
|
+
res = {
|
|
359
|
+
"success": False,
|
|
360
|
+
"path": file_path,
|
|
361
|
+
"message": str(exc),
|
|
362
|
+
"changed": False,
|
|
363
|
+
"diff": "",
|
|
364
|
+
}
|
|
344
365
|
_print_diff(res.get("diff", ""))
|
|
345
366
|
return res
|
|
346
367
|
|
|
@@ -41,11 +41,11 @@ def _list_files(
|
|
|
41
41
|
f"[bold red]Error:[/bold red] Directory '{directory}' does not exist"
|
|
42
42
|
)
|
|
43
43
|
console.print("[dim]" + "-" * 60 + "[/dim]\n")
|
|
44
|
-
return ListFileOutput(files=[ListedFile(
|
|
44
|
+
return ListFileOutput(files=[ListedFile(path=None, type=None, full_path=None, depth=None)])
|
|
45
45
|
if not os.path.isdir(directory):
|
|
46
46
|
console.print(f"[bold red]Error:[/bold red] '{directory}' is not a directory")
|
|
47
47
|
console.print("[dim]" + "-" * 60 + "[/dim]\n")
|
|
48
|
-
return ListFileOutput(files=[ListedFile(
|
|
48
|
+
return ListFileOutput(files=[ListedFile(path=None, type=None, full_path=None, depth=None)])
|
|
49
49
|
folder_structure = {}
|
|
50
50
|
file_list = []
|
|
51
51
|
for root, dirs, files in os.walk(directory):
|
|
@@ -266,22 +266,26 @@ def _grep(
|
|
|
266
266
|
f"[green]Found {len(matches)} match(es) for '{search_string}' in {directory}[/green]"
|
|
267
267
|
)
|
|
268
268
|
|
|
269
|
-
return GrepOutput(matches=
|
|
269
|
+
return GrepOutput(matches=matches)
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def list_files(
|
|
273
|
+
context: RunContext, directory: str = ".", recursive: bool = True
|
|
274
|
+
) -> ListFileOutput:
|
|
275
|
+
return _list_files(context, directory, recursive)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def read_file(context: RunContext, file_path: str = "") -> ReadFileOutput:
|
|
279
|
+
return _read_file(context, file_path)
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def grep(
|
|
283
|
+
context: RunContext, search_string: str = "", directory: str = "."
|
|
284
|
+
) -> GrepOutput:
|
|
285
|
+
return _grep(context, search_string, directory)
|
|
270
286
|
|
|
271
287
|
|
|
272
288
|
def register_file_operations_tools(agent):
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
) -> ListFileOutput:
|
|
277
|
-
return _list_files(context, directory, recursive)
|
|
278
|
-
|
|
279
|
-
@agent.tool
|
|
280
|
-
def read_file(context: RunContext, file_path: str = "") -> ReadFileOutput:
|
|
281
|
-
return _read_file(context, file_path)
|
|
282
|
-
|
|
283
|
-
@agent.tool
|
|
284
|
-
def grep(
|
|
285
|
-
context: RunContext, search_string: str = "", directory: str = "."
|
|
286
|
-
) -> GrepOutput:
|
|
287
|
-
return _grep(context, search_string, directory)
|
|
289
|
+
agent.tool(list_files)
|
|
290
|
+
agent.tool(read_file)
|
|
291
|
+
agent.tool(grep)
|
|
@@ -1,45 +1,59 @@
|
|
|
1
1
|
{
|
|
2
|
-
"gemini-2.5-flash-preview-05-20": {
|
|
3
|
-
"type": "gemini",
|
|
4
|
-
"name": "gemini-2.5-flash-preview-05-20"
|
|
5
|
-
},
|
|
6
|
-
"gpt-4.1": {
|
|
7
|
-
"type": "openai",
|
|
8
|
-
"name": "gpt-4.1"
|
|
9
|
-
},
|
|
10
|
-
"gpt-4.1-mini": {
|
|
11
|
-
"type": "openai",
|
|
12
|
-
"name": "gpt-4.1-mini"
|
|
13
|
-
},
|
|
14
2
|
"gpt-5": {
|
|
15
3
|
"type": "openai",
|
|
16
|
-
"name": "gpt-5"
|
|
4
|
+
"name": "gpt-5",
|
|
5
|
+
"context_length": 400000
|
|
17
6
|
},
|
|
18
|
-
"
|
|
19
|
-
"type": "
|
|
20
|
-
"name": "
|
|
7
|
+
"Cerebras-Qwen3-Coder-480b": {
|
|
8
|
+
"type": "custom_openai",
|
|
9
|
+
"name": "qwen-3-coder-480b",
|
|
10
|
+
"custom_endpoint": {
|
|
11
|
+
"url": "https://api.cerebras.ai/v1",
|
|
12
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
13
|
+
},
|
|
14
|
+
"context_length": 131072
|
|
21
15
|
},
|
|
22
|
-
"
|
|
23
|
-
"type": "
|
|
24
|
-
"name": "
|
|
16
|
+
"Cerebras-Qwen3-235b-a22b-instruct-2507": {
|
|
17
|
+
"type": "custom_openai",
|
|
18
|
+
"name": "qwen-3-235b-a22b-instruct-2507",
|
|
19
|
+
"custom_endpoint": {
|
|
20
|
+
"url": "https://api.cerebras.ai/v1",
|
|
21
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
22
|
+
},
|
|
23
|
+
"context_length": 64000
|
|
25
24
|
},
|
|
26
|
-
"gpt-
|
|
25
|
+
"Cerebras-gpt-oss-120b": {
|
|
27
26
|
"type": "custom_openai",
|
|
28
|
-
"name": "gpt-
|
|
27
|
+
"name": "gpt-oss-120b",
|
|
29
28
|
"custom_endpoint": {
|
|
30
|
-
"url": "https://
|
|
31
|
-
"
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
"ca_certs_path": "/path/to/cert.pem"
|
|
35
|
-
}
|
|
29
|
+
"url": "https://api.cerebras.ai/v1",
|
|
30
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
31
|
+
},
|
|
32
|
+
"context_length": 131072
|
|
36
33
|
},
|
|
37
|
-
"
|
|
34
|
+
"Cerebras-Qwen-3-32b": {
|
|
38
35
|
"type": "custom_openai",
|
|
39
|
-
"name": "
|
|
36
|
+
"name": "qwen-3-32b",
|
|
40
37
|
"custom_endpoint": {
|
|
41
|
-
"url": "
|
|
42
|
-
|
|
38
|
+
"url": "https://api.cerebras.ai/v1",
|
|
39
|
+
"api_key": "$CEREBRAS_API_KEY"
|
|
40
|
+
},
|
|
41
|
+
"context_length": 65536
|
|
42
|
+
},
|
|
43
|
+
"o3": {
|
|
44
|
+
"type": "openai",
|
|
45
|
+
"name": "o3",
|
|
46
|
+
"context_length": 200000
|
|
47
|
+
},
|
|
48
|
+
"gemini-2.5-flash-preview-05-20": {
|
|
49
|
+
"type": "gemini",
|
|
50
|
+
"name": "gemini-2.5-flash-preview-05-20",
|
|
51
|
+
"context_length": 1048576
|
|
52
|
+
},
|
|
53
|
+
"gpt-4.1": {
|
|
54
|
+
"type": "openai",
|
|
55
|
+
"name": "gpt-4.1",
|
|
56
|
+
"context_length": 1000000
|
|
43
57
|
},
|
|
44
58
|
"Qwen/Qwen3-235B-A22B-fp8-tput": {
|
|
45
59
|
"type": "custom_openai",
|
|
@@ -47,65 +61,51 @@
|
|
|
47
61
|
"custom_endpoint": {
|
|
48
62
|
"url": "https://api.together.xyz/v1",
|
|
49
63
|
"api_key": "$TOGETHER_API_KEY"
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
"grok-3-mini-fast": {
|
|
53
|
-
"type": "custom_openai",
|
|
54
|
-
"name": "grok-3-mini-fast",
|
|
55
|
-
"custom_endpoint": {
|
|
56
|
-
"url": "https://api.x.ai/v1",
|
|
57
|
-
"api_key": "$XAI_API_KEY"
|
|
58
|
-
}
|
|
64
|
+
},
|
|
65
|
+
"context_length": 64000
|
|
59
66
|
},
|
|
60
67
|
"openrouter": {
|
|
61
68
|
"type": "openrouter",
|
|
62
69
|
"name": "meta-llama/llama-4-maverick:free",
|
|
63
|
-
"api_key": "$OPENROUTER_API_KEY"
|
|
70
|
+
"api_key": "$OPENROUTER_API_KEY",
|
|
71
|
+
"context_length": 131072
|
|
64
72
|
},
|
|
65
73
|
"azure-gpt-4.1": {
|
|
66
74
|
"type": "azure_openai",
|
|
67
75
|
"name": "gpt-4.1",
|
|
68
76
|
"api_version": "2024-12-01-preview",
|
|
69
77
|
"api_key": "$AZURE_OPENAI_API_KEY",
|
|
70
|
-
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT"
|
|
78
|
+
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT",
|
|
79
|
+
"context_length": 128000
|
|
71
80
|
},
|
|
72
|
-
|
|
73
|
-
"type": "
|
|
74
|
-
"name": "
|
|
75
|
-
"
|
|
76
|
-
"api_key": "$AZURE_OPENAI_API_KEY",
|
|
77
|
-
"azure_endpoint": "$AZURE_OPENAI_ENDPOINT"
|
|
78
|
-
},
|
|
79
|
-
"Cerebras-Qwen3-Coder-480b": {
|
|
80
|
-
"type": "custom_openai",
|
|
81
|
-
"name": "qwen-3-coder-480b",
|
|
82
|
-
"custom_endpoint": {
|
|
83
|
-
"url": "https://api.cerebras.ai/v1",
|
|
84
|
-
"api_key": "$CEREBRAS_API_KEY"
|
|
85
|
-
}
|
|
81
|
+
"gpt-4.1-mini": {
|
|
82
|
+
"type": "openai",
|
|
83
|
+
"name": "gpt-4.1-mini",
|
|
84
|
+
"context_length": 128000
|
|
86
85
|
},
|
|
87
|
-
"
|
|
88
|
-
"type": "
|
|
89
|
-
"name": "
|
|
90
|
-
"
|
|
91
|
-
"url": "https://api.cerebras.ai/v1",
|
|
92
|
-
"api_key": "$CEREBRAS_API_KEY"
|
|
93
|
-
}
|
|
86
|
+
"gpt-4.1-nano": {
|
|
87
|
+
"type": "openai",
|
|
88
|
+
"name": "gpt-4.1-nano",
|
|
89
|
+
"context_length": 128000
|
|
94
90
|
},
|
|
95
|
-
"
|
|
91
|
+
"gpt-4.1-custom": {
|
|
96
92
|
"type": "custom_openai",
|
|
97
|
-
"name": "gpt-
|
|
93
|
+
"name": "gpt-4.1-custom",
|
|
98
94
|
"custom_endpoint": {
|
|
99
|
-
"url": "https://
|
|
100
|
-
"
|
|
101
|
-
|
|
95
|
+
"url": "https://my.cute.endpoint:8080",
|
|
96
|
+
"headers": {
|
|
97
|
+
"X-Api-Key": "$OPENAI_API_KEY"
|
|
98
|
+
},
|
|
99
|
+
"ca_certs_path": "/path/to/cert.pem"
|
|
100
|
+
},
|
|
101
|
+
"context_length": 128000
|
|
102
102
|
},
|
|
103
|
-
"
|
|
103
|
+
"ollama-llama3.3": {
|
|
104
104
|
"type": "custom_openai",
|
|
105
|
-
"name": "
|
|
105
|
+
"name": "llama3.3",
|
|
106
106
|
"custom_endpoint": {
|
|
107
|
-
"url": "
|
|
108
|
-
|
|
109
|
-
|
|
107
|
+
"url": "http://localhost:11434/v1"
|
|
108
|
+
},
|
|
109
|
+
"context_length": 8192
|
|
110
110
|
}
|
|
111
111
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: code-puppy
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.82
|
|
4
4
|
Summary: Code generation agent
|
|
5
5
|
Author: Michael Pfaffenberger
|
|
6
6
|
License: MIT
|
|
@@ -20,13 +20,14 @@ Requires-Dist: json-repair>=0.46.2
|
|
|
20
20
|
Requires-Dist: logfire>=0.7.1
|
|
21
21
|
Requires-Dist: pathspec>=0.11.0
|
|
22
22
|
Requires-Dist: prompt-toolkit>=3.0.38
|
|
23
|
-
Requires-Dist: pydantic-ai>=0.
|
|
23
|
+
Requires-Dist: pydantic-ai>=0.7.2
|
|
24
24
|
Requires-Dist: pydantic>=2.4.0
|
|
25
25
|
Requires-Dist: pytest-cov>=6.1.1
|
|
26
26
|
Requires-Dist: python-dotenv>=1.0.0
|
|
27
27
|
Requires-Dist: rapidfuzz>=3.13.0
|
|
28
28
|
Requires-Dist: rich>=13.4.2
|
|
29
29
|
Requires-Dist: ruff>=0.11.11
|
|
30
|
+
Requires-Dist: tiktoken>=0.11.0
|
|
30
31
|
Requires-Dist: tree-sitter-language-pack>=0.8.0
|
|
31
32
|
Requires-Dist: tree-sitter-typescript>=0.23.2
|
|
32
33
|
Description-Content-Type: text/markdown
|
|
@@ -51,6 +52,11 @@ Description-Content-Type: text/markdown
|
|
|
51
52
|
|
|
52
53
|
Code Puppy is an AI-powered code generation agent, designed to understand programming tasks, generate high-quality code, and explain its reasoning similar to tools like Windsurf and Cursor.
|
|
53
54
|
|
|
55
|
+
## Quick start
|
|
56
|
+
|
|
57
|
+
`uvx code-puppy -i`
|
|
58
|
+
|
|
59
|
+
|
|
54
60
|
## Features
|
|
55
61
|
|
|
56
62
|
- **Multi-language support**: Capable of generating code in various programming languages.
|
|
@@ -131,22 +137,10 @@ code-puppy "write me a C++ hello world program in /tmp/main.cpp then compile it
|
|
|
131
137
|
|
|
132
138
|
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
133
139
|
|
|
134
|
-
##
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
### Example of a Puppy Rule
|
|
138
|
-
For instance, if you want to ensure that your application follows a specific design guideline, like using a dark mode theme with teal accents, you can define a puppy rule like this:
|
|
140
|
+
## Agent Rules
|
|
141
|
+
We support AGENT.md files for defining coding standards and styles that your code should comply with. These rules can cover various aspects such as formatting, naming conventions, and even design guidelines.
|
|
139
142
|
|
|
140
|
-
|
|
141
|
-
# Puppy Rule: Dark Mode with Teal Accents
|
|
142
|
-
|
|
143
|
-
- theme: dark
|
|
144
|
-
- accent-color: teal
|
|
145
|
-
- background-color: #121212
|
|
146
|
-
- text-color: #e0e0e0
|
|
147
|
-
|
|
148
|
-
Ensure that all components follow these color schemes to promote consistency in design.
|
|
149
|
-
```
|
|
143
|
+
For examples and more information about agent rules, visit [https://agent.md](https://agent.md)
|
|
150
144
|
|
|
151
145
|
## Using MCP Servers for External Tools
|
|
152
146
|
|
|
@@ -1,13 +1,14 @@
|
|
|
1
|
-
code_puppy/__init__.py,sha256
|
|
2
|
-
code_puppy/agent.py,sha256=
|
|
1
|
+
code_puppy/__init__.py,sha256=oDE4GhaqOHsYi9XCGp6A2-PqhDqxJiYP_XmxmoKWoPU,168
|
|
2
|
+
code_puppy/agent.py,sha256=E-wXWYESbGvcEIqqZQm0gXeWec2eVlsvMG8RmtntFx4,3931
|
|
3
3
|
code_puppy/agent_prompts.py,sha256=13YIpTZa3R3lg60-fdkll7t7hgSBtQL0M53wcE1gzyQ,6834
|
|
4
4
|
code_puppy/config.py,sha256=r5nw5ChOP8xd_K5yo8U5OtO2gy2bFhARiyNtDp1JrwQ,5013
|
|
5
5
|
code_puppy/main.py,sha256=uKMG0WNrFjEbsiEb_OwL_fNJbqMyTgztGjPKIOoYdSs,10444
|
|
6
|
-
code_puppy/message_history_processor.py,sha256=
|
|
7
|
-
code_puppy/model_factory.py,sha256=
|
|
8
|
-
code_puppy/models.json,sha256=
|
|
6
|
+
code_puppy/message_history_processor.py,sha256=MNsr3irynhqS02n7nRtp4WKMVPJsXmjpgjMwvkjqtdA,6152
|
|
7
|
+
code_puppy/model_factory.py,sha256=3j7AcJfZAHbx_plL9oOxjGJO0MMTRaQFThCErg8VpH8,10909
|
|
8
|
+
code_puppy/models.json,sha256=jr0-LW87aJS79GosVwoZdHeeq5eflPzgdPoMbcqpVA8,2728
|
|
9
9
|
code_puppy/session_memory.py,sha256=4sgAAjbXdLSi8hETpd56tgtrG6hqMUuZWDlJOu6BQjA,2735
|
|
10
|
-
code_puppy/state_management.py,sha256=
|
|
10
|
+
code_puppy/state_management.py,sha256=1QycApDBbXjayxXsYRecJib8TQ-MYMTeYvN5P_1Ipdg,1747
|
|
11
|
+
code_puppy/summarization_agent.py,sha256=N1UZg_R3wJFb7ZdVexDqx7L_8yxQ5m5nMOwGsLNfvKM,2744
|
|
11
12
|
code_puppy/version_checker.py,sha256=aRGulzuY4C4CdFvU1rITduyL-1xTFsn4GiD1uSfOl_Y,396
|
|
12
13
|
code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
|
|
13
14
|
code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
|
|
@@ -17,14 +18,14 @@ code_puppy/command_line/motd.py,sha256=1qEPpEQb14XfEXj-_pmx7ad8VtzvP7JSmvvW_JWW-
|
|
|
17
18
|
code_puppy/command_line/prompt_toolkit_completion.py,sha256=_gP0FIOgHDNHTTWLNL0XNzr6sO0ISe7Mec1uQNo9kcM,8337
|
|
18
19
|
code_puppy/command_line/utils.py,sha256=7eyxDHjPjPB9wGDJQQcXV_zOsGdYsFgI0SGCetVmTqE,1251
|
|
19
20
|
code_puppy/tools/__init__.py,sha256=ozIGpLM7pKSjH4UeojkTodhfVYZeNzMsLtK_oyw41HA,456
|
|
20
|
-
code_puppy/tools/command_runner.py,sha256=
|
|
21
|
+
code_puppy/tools/command_runner.py,sha256=NFCL35x44McMzSUNHQyg5q4Zx7wkvqD-nH4_YAU8N2s,7229
|
|
21
22
|
code_puppy/tools/common.py,sha256=M53zhiXZAmPdvi1Y_bzCxgvEmifOvRRJvYPARYRZqHw,2253
|
|
22
|
-
code_puppy/tools/file_modifications.py,sha256=
|
|
23
|
-
code_puppy/tools/file_operations.py,sha256=
|
|
23
|
+
code_puppy/tools/file_modifications.py,sha256=nGI8gRD6Vtkg8EzBkErsv3khE3VI-_M1z_PdQLvjfLo,13847
|
|
24
|
+
code_puppy/tools/file_operations.py,sha256=eftkN-MxsRGQc8c1iIoNmN5r-Ppld5YJRT7a89kxpkM,11207
|
|
24
25
|
code_puppy/tools/ts_code_map.py,sha256=o-u8p5vsYwitfDtVEoPS-7MwWn2xHzwtIQLo1_WMhQs,17647
|
|
25
|
-
code_puppy-0.0.
|
|
26
|
-
code_puppy-0.0.
|
|
27
|
-
code_puppy-0.0.
|
|
28
|
-
code_puppy-0.0.
|
|
29
|
-
code_puppy-0.0.
|
|
30
|
-
code_puppy-0.0.
|
|
26
|
+
code_puppy-0.0.82.data/data/code_puppy/models.json,sha256=jr0-LW87aJS79GosVwoZdHeeq5eflPzgdPoMbcqpVA8,2728
|
|
27
|
+
code_puppy-0.0.82.dist-info/METADATA,sha256=soWr7TjXrbPa_kIwO-hgHgAJe6Ah4nDWYQ-bB7Y9hpE,6351
|
|
28
|
+
code_puppy-0.0.82.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
29
|
+
code_puppy-0.0.82.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
|
|
30
|
+
code_puppy-0.0.82.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
31
|
+
code_puppy-0.0.82.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|