code-puppy 0.0.165__py3-none-any.whl → 0.0.167__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/http_utils.py +110 -7
- code_puppy/mcp/managed_server.py +2 -4
- code_puppy/mcp/server_registry_catalog.py +5 -6
- code_puppy/message_history_processor.py +88 -123
- code_puppy/tools/command_runner.py +45 -10
- code_puppy/tools/file_operations.py +42 -7
- code_puppy/tui/components/custom_widgets.py +7 -2
- {code_puppy-0.0.165.dist-info → code_puppy-0.0.167.dist-info}/METADATA +2 -1
- {code_puppy-0.0.165.dist-info → code_puppy-0.0.167.dist-info}/RECORD +13 -13
- {code_puppy-0.0.165.data → code_puppy-0.0.167.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.165.dist-info → code_puppy-0.0.167.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.165.dist-info → code_puppy-0.0.167.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.165.dist-info → code_puppy-0.0.167.dist-info}/licenses/LICENSE +0 -0
code_puppy/http_utils.py
CHANGED
|
@@ -10,12 +10,34 @@ from typing import Dict, Optional, Union
|
|
|
10
10
|
|
|
11
11
|
import httpx
|
|
12
12
|
import requests
|
|
13
|
+
from tenacity import retry_if_exception_type, stop_after_attempt, wait_exponential
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
from pydantic_ai.retries import (
|
|
17
|
+
AsyncTenacityTransport,
|
|
18
|
+
RetryConfig,
|
|
19
|
+
TenacityTransport,
|
|
20
|
+
wait_retry_after,
|
|
21
|
+
)
|
|
22
|
+
except ImportError:
|
|
23
|
+
# Fallback if pydantic_ai.retries is not available
|
|
24
|
+
AsyncTenacityTransport = None
|
|
25
|
+
RetryConfig = None
|
|
26
|
+
TenacityTransport = None
|
|
27
|
+
wait_retry_after = None
|
|
13
28
|
|
|
14
29
|
try:
|
|
15
30
|
from .reopenable_async_client import ReopenableAsyncClient
|
|
16
31
|
except ImportError:
|
|
17
32
|
ReopenableAsyncClient = None
|
|
18
33
|
|
|
34
|
+
try:
|
|
35
|
+
from .messaging import emit_info
|
|
36
|
+
except ImportError:
|
|
37
|
+
# Fallback if messaging system is not available
|
|
38
|
+
def emit_info(content: str, **metadata):
|
|
39
|
+
pass # No-op if messaging system is not available
|
|
40
|
+
|
|
19
41
|
|
|
20
42
|
def get_cert_bundle_path() -> str:
|
|
21
43
|
# First check if SSL_CERT_FILE environment variable is set
|
|
@@ -28,22 +50,72 @@ def create_client(
|
|
|
28
50
|
timeout: int = 180,
|
|
29
51
|
verify: Union[bool, str] = None,
|
|
30
52
|
headers: Optional[Dict[str, str]] = None,
|
|
53
|
+
retry_status_codes: tuple = (429, 502, 503, 504),
|
|
31
54
|
) -> httpx.Client:
|
|
32
55
|
if verify is None:
|
|
33
56
|
verify = get_cert_bundle_path()
|
|
34
57
|
|
|
35
|
-
|
|
58
|
+
# If retry components are available, create a client with retry transport
|
|
59
|
+
if TenacityTransport and RetryConfig and wait_retry_after:
|
|
60
|
+
def should_retry_status(response):
|
|
61
|
+
"""Raise exceptions for retryable HTTP status codes."""
|
|
62
|
+
if response.status_code in retry_status_codes:
|
|
63
|
+
emit_info(f"HTTP retry: Retrying request due to status code {response.status_code}")
|
|
64
|
+
response.raise_for_status()
|
|
65
|
+
|
|
66
|
+
transport = TenacityTransport(
|
|
67
|
+
config=RetryConfig(
|
|
68
|
+
retry=lambda e: isinstance(e, httpx.HTTPStatusError) and e.response.status_code in retry_status_codes,
|
|
69
|
+
wait=wait_retry_after(
|
|
70
|
+
fallback_strategy=wait_exponential(multiplier=1, max=60),
|
|
71
|
+
max_wait=300
|
|
72
|
+
),
|
|
73
|
+
stop=stop_after_attempt(5),
|
|
74
|
+
reraise=True
|
|
75
|
+
),
|
|
76
|
+
validate_response=should_retry_status
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
return httpx.Client(transport=transport, verify=verify, headers=headers or {}, timeout=timeout)
|
|
80
|
+
else:
|
|
81
|
+
# Fallback to regular client if retry components are not available
|
|
82
|
+
return httpx.Client(verify=verify, headers=headers or {}, timeout=timeout)
|
|
36
83
|
|
|
37
84
|
|
|
38
85
|
def create_async_client(
|
|
39
86
|
timeout: int = 180,
|
|
40
87
|
verify: Union[bool, str] = None,
|
|
41
88
|
headers: Optional[Dict[str, str]] = None,
|
|
89
|
+
retry_status_codes: tuple = (429, 502, 503, 504),
|
|
42
90
|
) -> httpx.AsyncClient:
|
|
43
91
|
if verify is None:
|
|
44
92
|
verify = get_cert_bundle_path()
|
|
45
93
|
|
|
46
|
-
|
|
94
|
+
# If retry components are available, create a client with retry transport
|
|
95
|
+
if AsyncTenacityTransport and RetryConfig and wait_retry_after:
|
|
96
|
+
def should_retry_status(response):
|
|
97
|
+
"""Raise exceptions for retryable HTTP status codes."""
|
|
98
|
+
if response.status_code in retry_status_codes:
|
|
99
|
+
emit_info(f"HTTP retry: Retrying request due to status code {response.status_code}")
|
|
100
|
+
response.raise_for_status()
|
|
101
|
+
|
|
102
|
+
transport = AsyncTenacityTransport(
|
|
103
|
+
config=RetryConfig(
|
|
104
|
+
retry=lambda e: isinstance(e, httpx.HTTPStatusError) and e.response.status_code in retry_status_codes,
|
|
105
|
+
wait=wait_retry_after(
|
|
106
|
+
fallback_strategy=wait_exponential(multiplier=1, max=60),
|
|
107
|
+
max_wait=300
|
|
108
|
+
),
|
|
109
|
+
stop=stop_after_attempt(5),
|
|
110
|
+
reraise=True
|
|
111
|
+
),
|
|
112
|
+
validate_response=should_retry_status
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return httpx.AsyncClient(transport=transport, verify=verify, headers=headers or {}, timeout=timeout)
|
|
116
|
+
else:
|
|
117
|
+
# Fallback to regular client if retry components are not available
|
|
118
|
+
return httpx.AsyncClient(verify=verify, headers=headers or {}, timeout=timeout)
|
|
47
119
|
|
|
48
120
|
|
|
49
121
|
def create_requests_session(
|
|
@@ -90,17 +162,48 @@ def create_reopenable_async_client(
|
|
|
90
162
|
timeout: int = 180,
|
|
91
163
|
verify: Union[bool, str] = None,
|
|
92
164
|
headers: Optional[Dict[str, str]] = None,
|
|
165
|
+
retry_status_codes: tuple = (429, 502, 503, 504),
|
|
93
166
|
) -> Union[ReopenableAsyncClient, httpx.AsyncClient]:
|
|
94
167
|
if verify is None:
|
|
95
168
|
verify = get_cert_bundle_path()
|
|
96
169
|
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
170
|
+
# If retry components are available, create a client with retry transport
|
|
171
|
+
if AsyncTenacityTransport and RetryConfig and wait_retry_after:
|
|
172
|
+
def should_retry_status(response):
|
|
173
|
+
"""Raise exceptions for retryable HTTP status codes."""
|
|
174
|
+
if response.status_code in retry_status_codes:
|
|
175
|
+
emit_info(f"HTTP retry: Retrying request due to status code {response.status_code}")
|
|
176
|
+
response.raise_for_status()
|
|
177
|
+
|
|
178
|
+
transport = AsyncTenacityTransport(
|
|
179
|
+
config=RetryConfig(
|
|
180
|
+
retry=lambda e: isinstance(e, httpx.HTTPStatusError) and e.response.status_code in retry_status_codes,
|
|
181
|
+
wait=wait_retry_after(
|
|
182
|
+
fallback_strategy=wait_exponential(multiplier=1, max=60),
|
|
183
|
+
max_wait=300
|
|
184
|
+
),
|
|
185
|
+
stop=stop_after_attempt(5),
|
|
186
|
+
reraise=True
|
|
187
|
+
),
|
|
188
|
+
validate_response=should_retry_status
|
|
100
189
|
)
|
|
190
|
+
|
|
191
|
+
if ReopenableAsyncClient is not None:
|
|
192
|
+
return ReopenableAsyncClient(
|
|
193
|
+
transport=transport, verify=verify, headers=headers or {}, timeout=timeout
|
|
194
|
+
)
|
|
195
|
+
else:
|
|
196
|
+
# Fallback to regular AsyncClient if ReopenableAsyncClient is not available
|
|
197
|
+
return httpx.AsyncClient(transport=transport, verify=verify, headers=headers or {}, timeout=timeout)
|
|
101
198
|
else:
|
|
102
|
-
# Fallback to regular
|
|
103
|
-
|
|
199
|
+
# Fallback to regular clients if retry components are not available
|
|
200
|
+
if ReopenableAsyncClient is not None:
|
|
201
|
+
return ReopenableAsyncClient(
|
|
202
|
+
verify=verify, headers=headers or {}, timeout=timeout
|
|
203
|
+
)
|
|
204
|
+
else:
|
|
205
|
+
# Fallback to regular AsyncClient if ReopenableAsyncClient is not available
|
|
206
|
+
return httpx.AsyncClient(verify=verify, headers=headers or {}, timeout=timeout)
|
|
104
207
|
|
|
105
208
|
|
|
106
209
|
def is_cert_bundle_available() -> bool:
|
code_puppy/mcp/managed_server.py
CHANGED
|
@@ -226,11 +226,9 @@ class ManagedMCPServer:
|
|
|
226
226
|
http_kwargs["timeout"] = config["timeout"]
|
|
227
227
|
if "read_timeout" in config:
|
|
228
228
|
http_kwargs["read_timeout"] = config["read_timeout"]
|
|
229
|
-
if "
|
|
230
|
-
http_kwargs["
|
|
231
|
-
elif config.get("headers"):
|
|
229
|
+
if "headers" in config:
|
|
230
|
+
http_kwargs["headers"] = config.get("headers")
|
|
232
231
|
# Create HTTP client if headers are provided but no client specified
|
|
233
|
-
http_kwargs["http_client"] = self._get_http_client()
|
|
234
232
|
|
|
235
233
|
self._pydantic_server = MCPServerStreamableHTTP(
|
|
236
234
|
**http_kwargs, process_tool_call=process_tool_call
|
|
@@ -791,18 +791,17 @@ MCP_SERVER_REGISTRY: List[MCPServerTemplate] = [
|
|
|
791
791
|
description="Search and retrieve documentation from multiple sources with AI-powered context understanding",
|
|
792
792
|
category="Documentation",
|
|
793
793
|
tags=["documentation", "search", "context", "ai", "knowledge", "docs", "cloud"],
|
|
794
|
-
type="
|
|
794
|
+
type="http",
|
|
795
795
|
config={
|
|
796
|
-
"
|
|
797
|
-
|
|
798
|
-
|
|
796
|
+
"url": "https://mcp.context7.com/mcp",
|
|
797
|
+
"headers": {
|
|
798
|
+
"Authorization": "Bearer $CONTEXT7_API_KEY"
|
|
799
|
+
}
|
|
799
800
|
},
|
|
800
801
|
verified=True,
|
|
801
802
|
popular=True,
|
|
802
803
|
requires=MCPServerRequirements(
|
|
803
804
|
environment_vars=["CONTEXT7_API_KEY"],
|
|
804
|
-
required_tools=["node", "npx"],
|
|
805
|
-
package_dependencies=["@upstash/context7-mcp"],
|
|
806
805
|
),
|
|
807
806
|
example_usage="Cloud-based service - no local setup required",
|
|
808
807
|
),
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import queue
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, List, Set, Tuple
|
|
4
4
|
|
|
5
5
|
import pydantic
|
|
6
6
|
from pydantic_ai.messages import ModelMessage, ModelRequest, TextPart, ToolCallPart
|
|
7
7
|
|
|
8
8
|
from code_puppy.config import (
|
|
9
|
-
get_compaction_strategy,
|
|
10
|
-
get_compaction_threshold,
|
|
11
9
|
get_model_name,
|
|
12
10
|
get_protected_token_count,
|
|
11
|
+
get_compaction_threshold,
|
|
12
|
+
get_compaction_strategy,
|
|
13
13
|
)
|
|
14
14
|
from code_puppy.messaging import emit_error, emit_info, emit_warning
|
|
15
15
|
from code_puppy.model_factory import ModelFactory
|
|
@@ -82,9 +82,7 @@ def estimate_tokens_for_message(message: ModelMessage) -> int:
|
|
|
82
82
|
|
|
83
83
|
|
|
84
84
|
def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
85
|
-
|
|
86
|
-
deduplicated = deduplicate_tool_returns(messages)
|
|
87
|
-
filtered = [m for m in deduplicated if estimate_tokens_for_message(m) < 50000]
|
|
85
|
+
filtered = [m for m in messages if estimate_tokens_for_message(m) < 50000]
|
|
88
86
|
pruned = prune_interrupted_tool_calls(filtered)
|
|
89
87
|
return pruned
|
|
90
88
|
|
|
@@ -150,6 +148,81 @@ def split_messages_for_protected_summarization(
|
|
|
150
148
|
return messages_to_summarize, protected_messages
|
|
151
149
|
|
|
152
150
|
|
|
151
|
+
def deduplicate_tool_returns(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
152
|
+
"""
|
|
153
|
+
Remove duplicate tool returns while preserving the first occurrence for each tool_call_id.
|
|
154
|
+
|
|
155
|
+
This function identifies tool-return parts that share the same tool_call_id and
|
|
156
|
+
removes duplicates, keeping only the first return for each id. This prevents
|
|
157
|
+
conversation corruption from duplicate tool_result blocks.
|
|
158
|
+
"""
|
|
159
|
+
if not messages:
|
|
160
|
+
return messages
|
|
161
|
+
|
|
162
|
+
seen_tool_returns: Set[str] = set()
|
|
163
|
+
deduplicated: List[ModelMessage] = []
|
|
164
|
+
removed_count = 0
|
|
165
|
+
|
|
166
|
+
for msg in messages:
|
|
167
|
+
# Check if this message has any parts we need to filter
|
|
168
|
+
if not hasattr(msg, "parts") or not msg.parts:
|
|
169
|
+
deduplicated.append(msg)
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
# Filter parts within this message
|
|
173
|
+
filtered_parts = []
|
|
174
|
+
msg_had_duplicates = False
|
|
175
|
+
|
|
176
|
+
for part in msg.parts:
|
|
177
|
+
tool_call_id = getattr(part, "tool_call_id", None)
|
|
178
|
+
part_kind = getattr(part, "part_kind", None)
|
|
179
|
+
|
|
180
|
+
# Check if this is a tool-return part
|
|
181
|
+
if tool_call_id and part_kind in {
|
|
182
|
+
"tool-return",
|
|
183
|
+
"tool-result",
|
|
184
|
+
"tool_result",
|
|
185
|
+
}:
|
|
186
|
+
if tool_call_id in seen_tool_returns:
|
|
187
|
+
# This is a duplicate return, skip it
|
|
188
|
+
msg_had_duplicates = True
|
|
189
|
+
removed_count += 1
|
|
190
|
+
continue
|
|
191
|
+
else:
|
|
192
|
+
# First occurrence of this return, keep it
|
|
193
|
+
seen_tool_returns.add(tool_call_id)
|
|
194
|
+
filtered_parts.append(part)
|
|
195
|
+
else:
|
|
196
|
+
# Not a tool return, always keep
|
|
197
|
+
filtered_parts.append(part)
|
|
198
|
+
|
|
199
|
+
# If we filtered out parts, create a new message with filtered parts
|
|
200
|
+
if msg_had_duplicates and filtered_parts:
|
|
201
|
+
# Create a new message with the same attributes but filtered parts
|
|
202
|
+
new_msg = type(msg)(parts=filtered_parts)
|
|
203
|
+
# Copy over other attributes if they exist
|
|
204
|
+
for attr_name in dir(msg):
|
|
205
|
+
if (
|
|
206
|
+
not attr_name.startswith("_")
|
|
207
|
+
and attr_name != "parts"
|
|
208
|
+
and hasattr(msg, attr_name)
|
|
209
|
+
):
|
|
210
|
+
try:
|
|
211
|
+
setattr(new_msg, attr_name, getattr(msg, attr_name))
|
|
212
|
+
except (AttributeError, TypeError):
|
|
213
|
+
# Skip attributes that can't be set
|
|
214
|
+
pass
|
|
215
|
+
deduplicated.append(new_msg)
|
|
216
|
+
elif filtered_parts: # No duplicates but has parts
|
|
217
|
+
deduplicated.append(msg)
|
|
218
|
+
# If no parts remain after filtering, drop the entire message
|
|
219
|
+
|
|
220
|
+
if removed_count > 0:
|
|
221
|
+
emit_warning(f"Removed {removed_count} duplicate tool-return part(s)")
|
|
222
|
+
|
|
223
|
+
return deduplicated
|
|
224
|
+
|
|
225
|
+
|
|
153
226
|
def summarize_messages(
|
|
154
227
|
messages: List[ModelMessage], with_protection=True
|
|
155
228
|
) -> Tuple[List[ModelMessage], List[ModelMessage]]:
|
|
@@ -236,100 +309,21 @@ def get_model_context_length() -> int:
|
|
|
236
309
|
return int(context_length)
|
|
237
310
|
|
|
238
311
|
|
|
239
|
-
def deduplicate_tool_returns(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
240
|
-
"""
|
|
241
|
-
Remove duplicate tool returns while preserving the first occurrence for each tool_call_id.
|
|
242
|
-
|
|
243
|
-
This function identifies tool-return parts that share the same tool_call_id and
|
|
244
|
-
removes duplicates, keeping only the first return for each id. This prevents
|
|
245
|
-
conversation corruption from duplicate tool_result blocks.
|
|
246
|
-
"""
|
|
247
|
-
if not messages:
|
|
248
|
-
return messages
|
|
249
|
-
|
|
250
|
-
seen_tool_returns: Set[str] = set()
|
|
251
|
-
deduplicated: List[ModelMessage] = []
|
|
252
|
-
removed_count = 0
|
|
253
|
-
|
|
254
|
-
for msg in messages:
|
|
255
|
-
# Check if this message has any parts we need to filter
|
|
256
|
-
if not hasattr(msg, "parts") or not msg.parts:
|
|
257
|
-
deduplicated.append(msg)
|
|
258
|
-
continue
|
|
259
|
-
|
|
260
|
-
# Filter parts within this message
|
|
261
|
-
filtered_parts = []
|
|
262
|
-
msg_had_duplicates = False
|
|
263
|
-
|
|
264
|
-
for part in msg.parts:
|
|
265
|
-
tool_call_id = getattr(part, "tool_call_id", None)
|
|
266
|
-
part_kind = getattr(part, "part_kind", None)
|
|
267
|
-
|
|
268
|
-
# Check if this is a tool-return part
|
|
269
|
-
if tool_call_id and part_kind in {
|
|
270
|
-
"tool-return",
|
|
271
|
-
"tool-result",
|
|
272
|
-
"tool_result",
|
|
273
|
-
}:
|
|
274
|
-
if tool_call_id in seen_tool_returns:
|
|
275
|
-
# This is a duplicate return, skip it
|
|
276
|
-
msg_had_duplicates = True
|
|
277
|
-
removed_count += 1
|
|
278
|
-
continue
|
|
279
|
-
else:
|
|
280
|
-
# First occurrence of this return, keep it
|
|
281
|
-
seen_tool_returns.add(tool_call_id)
|
|
282
|
-
filtered_parts.append(part)
|
|
283
|
-
else:
|
|
284
|
-
# Not a tool return, always keep
|
|
285
|
-
filtered_parts.append(part)
|
|
286
|
-
|
|
287
|
-
# If we filtered out parts, create a new message with filtered parts
|
|
288
|
-
if msg_had_duplicates and filtered_parts:
|
|
289
|
-
# Create a new message with the same attributes but filtered parts
|
|
290
|
-
new_msg = type(msg)(parts=filtered_parts)
|
|
291
|
-
# Copy over other attributes if they exist
|
|
292
|
-
for attr_name in dir(msg):
|
|
293
|
-
if (
|
|
294
|
-
not attr_name.startswith("_")
|
|
295
|
-
and attr_name != "parts"
|
|
296
|
-
and hasattr(msg, attr_name)
|
|
297
|
-
):
|
|
298
|
-
try:
|
|
299
|
-
setattr(new_msg, attr_name, getattr(msg, attr_name))
|
|
300
|
-
except (AttributeError, TypeError):
|
|
301
|
-
# Skip attributes that can't be set
|
|
302
|
-
pass
|
|
303
|
-
deduplicated.append(new_msg)
|
|
304
|
-
elif filtered_parts: # No duplicates but has parts
|
|
305
|
-
deduplicated.append(msg)
|
|
306
|
-
# If no parts remain after filtering, drop the entire message
|
|
307
|
-
|
|
308
|
-
if removed_count > 0:
|
|
309
|
-
emit_warning(f"Removed {removed_count} duplicate tool-return part(s)")
|
|
310
|
-
|
|
311
|
-
return deduplicated
|
|
312
|
-
|
|
313
|
-
|
|
314
312
|
def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
315
313
|
"""
|
|
316
314
|
Remove any messages that participate in mismatched tool call sequences.
|
|
317
315
|
|
|
318
316
|
A mismatched tool call id is one that appears in a ToolCall (model/tool request)
|
|
319
|
-
without a corresponding tool return, or vice versa. We
|
|
320
|
-
|
|
321
|
-
messages that contain parts referencing mismatched tool_call_ids.
|
|
317
|
+
without a corresponding tool return, or vice versa. We preserve original order
|
|
318
|
+
and only drop messages that contain parts referencing mismatched tool_call_ids.
|
|
322
319
|
"""
|
|
323
320
|
if not messages:
|
|
324
321
|
return messages
|
|
325
322
|
|
|
326
|
-
|
|
327
|
-
|
|
323
|
+
tool_call_ids: Set[str] = set()
|
|
324
|
+
tool_return_ids: Set[str] = set()
|
|
328
325
|
|
|
329
|
-
|
|
330
|
-
tool_return_counts: Dict[str, int] = {}
|
|
331
|
-
|
|
332
|
-
# First pass: count occurrences of each tool_call_id for calls vs returns
|
|
326
|
+
# First pass: collect ids for calls vs returns
|
|
333
327
|
for msg in messages:
|
|
334
328
|
for part in getattr(msg, "parts", []) or []:
|
|
335
329
|
tool_call_id = getattr(part, "tool_call_id", None)
|
|
@@ -338,25 +332,11 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
|
|
|
338
332
|
# Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
|
|
339
333
|
# consider it a call; otherwise it's a return/result.
|
|
340
334
|
if part.part_kind == "tool-call":
|
|
341
|
-
|
|
342
|
-
tool_call_counts.get(tool_call_id, 0) + 1
|
|
343
|
-
)
|
|
335
|
+
tool_call_ids.add(tool_call_id)
|
|
344
336
|
else:
|
|
345
|
-
|
|
346
|
-
tool_return_counts.get(tool_call_id, 0) + 1
|
|
347
|
-
)
|
|
348
|
-
|
|
349
|
-
# Find mismatched tool_call_ids (not exactly 1:1 ratio)
|
|
350
|
-
all_tool_ids = set(tool_call_counts.keys()) | set(tool_return_counts.keys())
|
|
351
|
-
mismatched: Set[str] = set()
|
|
352
|
-
|
|
353
|
-
for tool_id in all_tool_ids:
|
|
354
|
-
call_count = tool_call_counts.get(tool_id, 0)
|
|
355
|
-
return_count = tool_return_counts.get(tool_id, 0)
|
|
356
|
-
# Enforce strict 1:1 ratio - both must be exactly 1
|
|
357
|
-
if call_count != 1 or return_count != 1:
|
|
358
|
-
mismatched.add(tool_id)
|
|
337
|
+
tool_return_ids.add(tool_call_id)
|
|
359
338
|
|
|
339
|
+
mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
|
|
360
340
|
if not mismatched:
|
|
361
341
|
return messages
|
|
362
342
|
|
|
@@ -382,10 +362,7 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
|
|
|
382
362
|
|
|
383
363
|
|
|
384
364
|
def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
|
|
385
|
-
# First,
|
|
386
|
-
messages = deduplicate_tool_returns(messages)
|
|
387
|
-
|
|
388
|
-
# Then, prune any interrupted/mismatched tool-call conversations
|
|
365
|
+
# First, prune any interrupted/mismatched tool-call conversations
|
|
389
366
|
total_current_tokens = sum(estimate_tokens_for_message(msg) for msg in messages)
|
|
390
367
|
|
|
391
368
|
model_max = get_model_context_length()
|
|
@@ -477,8 +454,6 @@ def truncation(
|
|
|
477
454
|
messages: List[ModelMessage], protected_tokens: int
|
|
478
455
|
) -> List[ModelMessage]:
|
|
479
456
|
emit_info("Truncating message history to manage token usage")
|
|
480
|
-
# First deduplicate tool returns to clean up any duplicates
|
|
481
|
-
messages = deduplicate_tool_returns(messages)
|
|
482
457
|
result = [messages[0]] # Always keep the first message (system prompt)
|
|
483
458
|
num_tokens = 0
|
|
484
459
|
stack = queue.LifoQueue()
|
|
@@ -501,10 +476,6 @@ def truncation(
|
|
|
501
476
|
|
|
502
477
|
def message_history_accumulator(messages: List[Any]):
|
|
503
478
|
_message_history = get_message_history()
|
|
504
|
-
|
|
505
|
-
# Deduplicate tool returns in current history before processing new messages
|
|
506
|
-
_message_history = deduplicate_tool_returns(_message_history)
|
|
507
|
-
|
|
508
479
|
message_history_hashes = set([hash_message(m) for m in _message_history])
|
|
509
480
|
for msg in messages:
|
|
510
481
|
if (
|
|
@@ -513,12 +484,6 @@ def message_history_accumulator(messages: List[Any]):
|
|
|
513
484
|
):
|
|
514
485
|
_message_history.append(msg)
|
|
515
486
|
|
|
516
|
-
# Deduplicate tool returns again after adding new messages to ensure no duplicates
|
|
517
|
-
_message_history = deduplicate_tool_returns(_message_history)
|
|
518
|
-
|
|
519
|
-
# Update the message history with deduplicated messages
|
|
520
|
-
set_message_history(_message_history)
|
|
521
|
-
|
|
522
487
|
# Apply message history trimming using the main processor
|
|
523
488
|
# This ensures we maintain global state while still managing context limits
|
|
524
489
|
message_history_processor(_message_history)
|
|
@@ -22,6 +22,17 @@ from code_puppy.messaging import (
|
|
|
22
22
|
from code_puppy.state_management import is_tui_mode
|
|
23
23
|
from code_puppy.tools.common import generate_group_id
|
|
24
24
|
|
|
25
|
+
# Maximum line length for shell command output to prevent massive token usage
|
|
26
|
+
# This helps avoid exceeding model context limits when commands produce very long lines
|
|
27
|
+
MAX_LINE_LENGTH = 256
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _truncate_line(line: str) -> str:
|
|
31
|
+
"""Truncate a line to MAX_LINE_LENGTH if it exceeds the limit."""
|
|
32
|
+
if len(line) > MAX_LINE_LENGTH:
|
|
33
|
+
return line[:MAX_LINE_LENGTH] + "... [truncated]"
|
|
34
|
+
return line
|
|
35
|
+
|
|
25
36
|
_AWAITING_USER_INPUT = False
|
|
26
37
|
|
|
27
38
|
_CONFIRMATION_LOCK = threading.Lock()
|
|
@@ -188,6 +199,8 @@ def run_shell_command_streaming(
|
|
|
188
199
|
for line in iter(process.stdout.readline, ""):
|
|
189
200
|
if line:
|
|
190
201
|
line = line.rstrip("\n\r")
|
|
202
|
+
# Limit line length to prevent massive token usage
|
|
203
|
+
line = _truncate_line(line)
|
|
191
204
|
stdout_lines.append(line)
|
|
192
205
|
emit_system_message(line, message_group=group_id)
|
|
193
206
|
last_output_time[0] = time.time()
|
|
@@ -199,6 +212,8 @@ def run_shell_command_streaming(
|
|
|
199
212
|
for line in iter(process.stderr.readline, ""):
|
|
200
213
|
if line:
|
|
201
214
|
line = line.rstrip("\n\r")
|
|
215
|
+
# Limit line length to prevent massive token usage
|
|
216
|
+
line = _truncate_line(line)
|
|
202
217
|
stderr_lines.append(line)
|
|
203
218
|
emit_system_message(line, message_group=group_id)
|
|
204
219
|
last_output_time[0] = time.time()
|
|
@@ -252,8 +267,8 @@ def run_shell_command_streaming(
|
|
|
252
267
|
**{
|
|
253
268
|
"success": False,
|
|
254
269
|
"command": command,
|
|
255
|
-
"stdout": "\n".join(stdout_lines[-
|
|
256
|
-
"stderr": "\n".join(stderr_lines[-
|
|
270
|
+
"stdout": "\n".join(stdout_lines[-256:]),
|
|
271
|
+
"stderr": "\n".join(stderr_lines[-256:]),
|
|
257
272
|
"exit_code": -9,
|
|
258
273
|
"execution_time": execution_time,
|
|
259
274
|
"timeout": True,
|
|
@@ -315,23 +330,31 @@ def run_shell_command_streaming(
|
|
|
315
330
|
)
|
|
316
331
|
emit_info(f"Took {execution_time:.2f}s", message_group=group_id)
|
|
317
332
|
time.sleep(1)
|
|
333
|
+
# Apply line length limits to stdout/stderr before returning
|
|
334
|
+
truncated_stdout = [_truncate_line(line) for line in stdout_lines[-256:]]
|
|
335
|
+
truncated_stderr = [_truncate_line(line) for line in stderr_lines[-256:]]
|
|
336
|
+
|
|
318
337
|
return ShellCommandOutput(
|
|
319
338
|
success=False,
|
|
320
339
|
command=command,
|
|
321
340
|
error="""The process didn't exit cleanly! If the user_interrupted flag is true,
|
|
322
341
|
please stop all execution and ask the user for clarification!""",
|
|
323
|
-
stdout="\n".join(
|
|
324
|
-
stderr="\n".join(
|
|
342
|
+
stdout="\n".join(truncated_stdout),
|
|
343
|
+
stderr="\n".join(truncated_stderr),
|
|
325
344
|
exit_code=exit_code,
|
|
326
345
|
execution_time=execution_time,
|
|
327
346
|
timeout=False,
|
|
328
347
|
user_interrupted=process.pid in _USER_KILLED_PROCESSES,
|
|
329
348
|
)
|
|
349
|
+
# Apply line length limits to stdout/stderr before returning
|
|
350
|
+
truncated_stdout = [_truncate_line(line) for line in stdout_lines[-256:]]
|
|
351
|
+
truncated_stderr = [_truncate_line(line) for line in stderr_lines[-256:]]
|
|
352
|
+
|
|
330
353
|
return ShellCommandOutput(
|
|
331
354
|
success=exit_code == 0,
|
|
332
355
|
command=command,
|
|
333
|
-
stdout="\n".join(
|
|
334
|
-
stderr="\n".join(
|
|
356
|
+
stdout="\n".join(truncated_stdout),
|
|
357
|
+
stderr="\n".join(truncated_stderr),
|
|
335
358
|
exit_code=exit_code,
|
|
336
359
|
execution_time=execution_time,
|
|
337
360
|
timeout=False,
|
|
@@ -453,12 +476,24 @@ def run_shell_command(
|
|
|
453
476
|
stdout = None
|
|
454
477
|
if "stderr" not in locals():
|
|
455
478
|
stderr = None
|
|
479
|
+
|
|
480
|
+
# Apply line length limits to stdout/stderr if they exist
|
|
481
|
+
truncated_stdout = None
|
|
482
|
+
if stdout:
|
|
483
|
+
stdout_lines = stdout.split("\n")
|
|
484
|
+
truncated_stdout = "\n".join([_truncate_line(line) for line in stdout_lines[-256:]])
|
|
485
|
+
|
|
486
|
+
truncated_stderr = None
|
|
487
|
+
if stderr:
|
|
488
|
+
stderr_lines = stderr.split("\n")
|
|
489
|
+
truncated_stderr = "\n".join([_truncate_line(line) for line in stderr_lines[-256:]])
|
|
490
|
+
|
|
456
491
|
return ShellCommandOutput(
|
|
457
492
|
success=False,
|
|
458
493
|
command=command,
|
|
459
494
|
error=f"Error executing command {str(e)}",
|
|
460
|
-
stdout=
|
|
461
|
-
stderr=
|
|
495
|
+
stdout=truncated_stdout,
|
|
496
|
+
stderr=truncated_stderr,
|
|
462
497
|
exit_code=-1,
|
|
463
498
|
timeout=False,
|
|
464
499
|
)
|
|
@@ -520,8 +555,8 @@ def register_agent_run_shell_command(agent):
|
|
|
520
555
|
- success (bool): True if command executed successfully (exit code 0)
|
|
521
556
|
- command (str | None): The executed command string
|
|
522
557
|
- error (str | None): Error message if execution failed
|
|
523
|
-
- stdout (str | None): Standard output from the command (last
|
|
524
|
-
- stderr (str | None): Standard error from the command (last
|
|
558
|
+
- stdout (str | None): Standard output from the command (last 256 lines)
|
|
559
|
+
- stderr (str | None): Standard error from the command (last 256 lines)
|
|
525
560
|
- exit_code (int | None): Process exit code
|
|
526
561
|
- execution_time (float | None): Total execution time in seconds
|
|
527
562
|
- timeout (bool | None): True if command was terminated due to timeout
|
|
@@ -208,17 +208,22 @@ def _list_files(
|
|
|
208
208
|
files = result.stdout.strip().split("\n") if result.stdout.strip() else []
|
|
209
209
|
|
|
210
210
|
# Create ListedFile objects with metadata
|
|
211
|
-
for
|
|
212
|
-
if not
|
|
211
|
+
for full_path in files:
|
|
212
|
+
if not full_path: # Skip empty lines
|
|
213
213
|
continue
|
|
214
214
|
|
|
215
|
-
full_path = os.path.join(directory, file_path)
|
|
216
|
-
|
|
217
215
|
# Skip if file doesn't exist (though it should)
|
|
218
216
|
if not os.path.exists(full_path):
|
|
219
217
|
continue
|
|
220
218
|
|
|
219
|
+
# Extract relative path from the full path
|
|
220
|
+
if full_path.startswith(directory):
|
|
221
|
+
file_path = full_path[len(directory):].lstrip(os.sep)
|
|
222
|
+
else:
|
|
223
|
+
file_path = full_path
|
|
224
|
+
|
|
221
225
|
# For non-recursive mode, skip files in subdirectories
|
|
226
|
+
# Only check the relative path, not the full path
|
|
222
227
|
if not recursive and os.sep in file_path:
|
|
223
228
|
continue
|
|
224
229
|
|
|
@@ -242,7 +247,7 @@ def _list_files(
|
|
|
242
247
|
if entry_type == "file":
|
|
243
248
|
size = actual_size
|
|
244
249
|
|
|
245
|
-
# Calculate depth
|
|
250
|
+
# Calculate depth based on the relative path
|
|
246
251
|
depth = file_path.count(os.sep)
|
|
247
252
|
|
|
248
253
|
# Add directory entries if needed for files
|
|
@@ -281,6 +286,33 @@ def _list_files(
|
|
|
281
286
|
except (FileNotFoundError, PermissionError, OSError):
|
|
282
287
|
# Skip files we can't access
|
|
283
288
|
continue
|
|
289
|
+
|
|
290
|
+
# In non-recursive mode, we also need to explicitly list directories in the target directory
|
|
291
|
+
# ripgrep's --files option only returns files, not directories
|
|
292
|
+
if not recursive:
|
|
293
|
+
try:
|
|
294
|
+
entries = os.listdir(directory)
|
|
295
|
+
for entry in entries:
|
|
296
|
+
full_entry_path = os.path.join(directory, entry)
|
|
297
|
+
# Skip if it doesn't exist or if it's a file (since files are already listed by ripgrep)
|
|
298
|
+
if not os.path.exists(full_entry_path) or os.path.isfile(full_entry_path):
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
# For non-recursive mode, only include directories that are directly in the target directory
|
|
302
|
+
if os.path.isdir(full_entry_path):
|
|
303
|
+
# Create a ListedFile for the directory
|
|
304
|
+
results.append(
|
|
305
|
+
ListedFile(
|
|
306
|
+
path=entry,
|
|
307
|
+
type="directory",
|
|
308
|
+
size=0,
|
|
309
|
+
full_path=full_entry_path,
|
|
310
|
+
depth=0,
|
|
311
|
+
)
|
|
312
|
+
)
|
|
313
|
+
except (FileNotFoundError, PermissionError, OSError):
|
|
314
|
+
# Skip directories we can't access
|
|
315
|
+
pass
|
|
284
316
|
except subprocess.TimeoutExpired:
|
|
285
317
|
error_msg = (
|
|
286
318
|
"[red bold]Error:[/red bold] List files command timed out after 30 seconds"
|
|
@@ -337,9 +369,12 @@ def _list_files(
|
|
|
337
369
|
else:
|
|
338
370
|
return "\U0001f4c4"
|
|
339
371
|
|
|
372
|
+
# Count items in results
|
|
340
373
|
dir_count = sum(1 for item in results if item.type == "directory")
|
|
341
374
|
file_count = sum(1 for item in results if item.type == "file")
|
|
342
375
|
total_size = sum(item.size for item in results if item.type == "file")
|
|
376
|
+
|
|
377
|
+
|
|
343
378
|
|
|
344
379
|
# Build the directory header section
|
|
345
380
|
dir_name = os.path.basename(directory) or directory
|
|
@@ -393,8 +428,8 @@ def _list_files(
|
|
|
393
428
|
final_divider = "[dim]" + "─" * 100 + "\n" + "[/dim]"
|
|
394
429
|
output_lines.append(final_divider)
|
|
395
430
|
|
|
396
|
-
# Return
|
|
397
|
-
return ListFileOutput(content="\n".join(output_lines)
|
|
431
|
+
# Return the content string
|
|
432
|
+
return ListFileOutput(content="\n".join(output_lines))
|
|
398
433
|
|
|
399
434
|
|
|
400
435
|
def _read_file(
|
|
@@ -21,8 +21,13 @@ class CustomTextArea(TextArea):
|
|
|
21
21
|
|
|
22
22
|
def on_key(self, event):
|
|
23
23
|
"""Handle key events before they reach the internal _on_key handler."""
|
|
24
|
-
#
|
|
25
|
-
if event.key == "
|
|
24
|
+
# Let the binding system handle alt+enter
|
|
25
|
+
if event.key == "alt+enter":
|
|
26
|
+
# Don't prevent default - let the binding system handle it
|
|
27
|
+
return
|
|
28
|
+
|
|
29
|
+
# Handle escape+enter manually
|
|
30
|
+
if event.key == "escape+enter":
|
|
26
31
|
self.action_insert_newline()
|
|
27
32
|
event.prevent_default()
|
|
28
33
|
event.stop()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: code-puppy
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.167
|
|
4
4
|
Summary: Code generation agent
|
|
5
5
|
Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
|
|
6
6
|
Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
|
|
@@ -33,6 +33,7 @@ Requires-Dist: rapidfuzz>=3.13.0
|
|
|
33
33
|
Requires-Dist: rich>=13.4.2
|
|
34
34
|
Requires-Dist: ripgrep>=14.1.0
|
|
35
35
|
Requires-Dist: ruff>=0.11.11
|
|
36
|
+
Requires-Dist: tenacity>=8.2.0
|
|
36
37
|
Requires-Dist: termcolor>=3.1.0
|
|
37
38
|
Requires-Dist: textual-dev>=1.7.0
|
|
38
39
|
Requires-Dist: textual>=5.0.0
|
|
@@ -3,9 +3,9 @@ code_puppy/__main__.py,sha256=pDVssJOWP8A83iFkxMLY9YteHYat0EyWDQqMkKHpWp4,203
|
|
|
3
3
|
code_puppy/agent.py,sha256=wCARhq7PcD38c4l83fuXm4Us_6c1TXVa0U7TnBnNPEY,8064
|
|
4
4
|
code_puppy/callbacks.py,sha256=6wYB6K_fGSCkKKEFaYOYkJT45WaV5W_NhUIzcvVH_nU,5060
|
|
5
5
|
code_puppy/config.py,sha256=J8XU0iOPtfzrkDD49b4TdrdHoQmW2kaP-25PPbGGdKU,16386
|
|
6
|
-
code_puppy/http_utils.py,sha256=
|
|
6
|
+
code_puppy/http_utils.py,sha256=JecBdukr9dr1xcjw-Zyx2iD65RDEd183r8e1NMU4HLo,8093
|
|
7
7
|
code_puppy/main.py,sha256=tYLfhUjPTJ-4S1r-pr-jSbn6kIU1iYvt2Z8lxI7zDFY,22220
|
|
8
|
-
code_puppy/message_history_processor.py,sha256=
|
|
8
|
+
code_puppy/message_history_processor.py,sha256=zzeOSUC1Wpsry-z2MD6pQWk2wt1axGSOKaGR2v22_qQ,18825
|
|
9
9
|
code_puppy/model_factory.py,sha256=z9vQbcGllgMwU0On8rPvzYxkygW2Uyd3NJmRzbKv-is,13759
|
|
10
10
|
code_puppy/models.json,sha256=iXmLZGflnQcu2DRh4WUlgAhoXdvoxUc7KBhB8YxawXM,3088
|
|
11
11
|
code_puppy/reopenable_async_client.py,sha256=4UJRaMp5np8cbef9F0zKQ7TPKOfyf5U-Kv-0zYUWDho,8274
|
|
@@ -59,11 +59,11 @@ code_puppy/mcp/config_wizard.py,sha256=SIsm8uhDInfRYy_2W8wPIZcmoxk24680ikhMT93Vu
|
|
|
59
59
|
code_puppy/mcp/dashboard.py,sha256=y6t6trrBZU-mr8W1_29VN5DeZI8VYvOsKNz1EXxlvUg,9022
|
|
60
60
|
code_puppy/mcp/error_isolation.py,sha256=mpPBiH17zTXPsOEAn9WmkbwQwnt4gmgiaWv87JBJbUo,12426
|
|
61
61
|
code_puppy/mcp/health_monitor.py,sha256=n5R6EeYOYbUucUFe74qGWCU3g6Mep5UEQbLF0wbT0dU,19688
|
|
62
|
-
code_puppy/mcp/managed_server.py,sha256=
|
|
62
|
+
code_puppy/mcp/managed_server.py,sha256=EZwWsQGxzgnjL6TGJTm58Pj6fw7kkYpLuolYQY-GbXY,14256
|
|
63
63
|
code_puppy/mcp/manager.py,sha256=Yx9zxukdXgdPDgeJiiQPYlPae0zQPofHWB-axuoMNc8,26426
|
|
64
64
|
code_puppy/mcp/registry.py,sha256=IvbIL-pETQ7HS7iRgsoT5j7eY8TOJXqYczSiigT2ofU,15752
|
|
65
65
|
code_puppy/mcp/retry_manager.py,sha256=evVxbtrsHNyo8UoI7zpO-NVDegibn82RLlgN8VKewA8,10665
|
|
66
|
-
code_puppy/mcp/server_registry_catalog.py,sha256=
|
|
66
|
+
code_puppy/mcp/server_registry_catalog.py,sha256=U0HJMgEeF8Ntq3ZWepA0Z9t8ZG82Toa9JCGBjiDAzh8,38760
|
|
67
67
|
code_puppy/mcp/status_tracker.py,sha256=uekxrzkzIWrv3OfSVgblaPuoGFcAh_dBYwCcaHZ_CrM,12183
|
|
68
68
|
code_puppy/mcp/system_tools.py,sha256=7_oR8k0c8YjtCcYF9g7A946oAGuKOf_i-92aJH7VmlQ,7331
|
|
69
69
|
code_puppy/mcp/examples/retry_example.py,sha256=En3LiqECYmG00gWaa7K9L9vvGk1VJyYtKWcv3p3gzCc,7220
|
|
@@ -78,10 +78,10 @@ code_puppy/messaging/spinner/textual_spinner.py,sha256=Omx9A-FSPkxYDMYgBXgYMBQnK
|
|
|
78
78
|
code_puppy/plugins/__init__.py,sha256=fksDqMUiXPJ5WNuMsYsVR8ulueQRCXPlvECEyicHPtQ,1312
|
|
79
79
|
code_puppy/tools/__init__.py,sha256=YiiXRqxU1BEJ5t0Oe163lSqOneI9sKtwDW0swCPgBt4,2119
|
|
80
80
|
code_puppy/tools/agent_tools.py,sha256=bHMrFIbYRhuubR41G_XdLsk3cUKWfIPl2O4bVzo2pE0,5591
|
|
81
|
-
code_puppy/tools/command_runner.py,sha256=
|
|
81
|
+
code_puppy/tools/command_runner.py,sha256=TeKbUY1BeHpUEYtBbmEXnYd4ESI-RGNXRlI-7I1UNHU,22427
|
|
82
82
|
code_puppy/tools/common.py,sha256=pL-9xcRs3rxU7Fl9X9EUgbDp2-csh2LLJ5DHH_KAHKY,10596
|
|
83
83
|
code_puppy/tools/file_modifications.py,sha256=EaDWcv6gi8wAvpgyeJdKSKPWg9fTpZoEkxQiLCE6rn4,23218
|
|
84
|
-
code_puppy/tools/file_operations.py,sha256=
|
|
84
|
+
code_puppy/tools/file_operations.py,sha256=dEnsGCbDF12ctegCm9Kiu-mgNCrvopf64ij_CQbikW4,32460
|
|
85
85
|
code_puppy/tools/tools_content.py,sha256=bsBqW-ppd1XNAS_g50B3UHDQBWEALC1UneH6-afz1zo,2365
|
|
86
86
|
code_puppy/tui/__init__.py,sha256=XesAxIn32zLPOmvpR2wIDxDAnnJr81a5pBJB4cZp1Xs,321
|
|
87
87
|
code_puppy/tui/app.py,sha256=nPOzwlusjdWzBfu__EbC3Q0etkPrqRq-2g-mk4IcfG4,39378
|
|
@@ -90,7 +90,7 @@ code_puppy/tui/components/__init__.py,sha256=uj5pnk3s6SEN3SbFI0ZnzaA2KK1NNg8TfUj
|
|
|
90
90
|
code_puppy/tui/components/chat_view.py,sha256=NfyNXuN2idPht1rKJB4YhHVXb1AIRNO5q_nLdt8Ocug,19913
|
|
91
91
|
code_puppy/tui/components/command_history_modal.py,sha256=pUPEQvoCWa2iUnuMgNwO22y8eUbyw0HpcPH3wAosHvU,7097
|
|
92
92
|
code_puppy/tui/components/copy_button.py,sha256=E4-OJYk5YNzDf-E81NyiVGKsTRPrUX-RnQ8qFuVnabw,4375
|
|
93
|
-
code_puppy/tui/components/custom_widgets.py,sha256=
|
|
93
|
+
code_puppy/tui/components/custom_widgets.py,sha256=y-ZodibwL_GNaWntmFMn7eeC93wJNss590D43rGMO6A,2122
|
|
94
94
|
code_puppy/tui/components/human_input_modal.py,sha256=isj-zrSIcK5iy3L7HJNgDFWN1zhxY4f3zvp4krbs07E,5424
|
|
95
95
|
code_puppy/tui/components/input_area.py,sha256=R4R32eXPZ2R8KFisIbldNGq60KMk7kCxWrdbeTgJUr8,4395
|
|
96
96
|
code_puppy/tui/components/sidebar.py,sha256=nGtCiYzZalPmiFaJ4dwj2S4EJBu5wQZVzhoigYYY7U4,10369
|
|
@@ -104,9 +104,9 @@ code_puppy/tui/screens/help.py,sha256=eJuPaOOCp7ZSUlecearqsuX6caxWv7NQszUh0tZJjB
|
|
|
104
104
|
code_puppy/tui/screens/mcp_install_wizard.py,sha256=xqwN5omltMkfxWZwXj3D2PbXbtrxUi1dT0XT77oxOKk,27685
|
|
105
105
|
code_puppy/tui/screens/settings.py,sha256=GMpv-qa08rorAE9mj3AjmqjZFPhmeJ_GWd-DBHG6iAA,10671
|
|
106
106
|
code_puppy/tui/screens/tools.py,sha256=3pr2Xkpa9Js6Yhf1A3_wQVRzFOui-KDB82LwrsdBtyk,1715
|
|
107
|
-
code_puppy-0.0.
|
|
108
|
-
code_puppy-0.0.
|
|
109
|
-
code_puppy-0.0.
|
|
110
|
-
code_puppy-0.0.
|
|
111
|
-
code_puppy-0.0.
|
|
112
|
-
code_puppy-0.0.
|
|
107
|
+
code_puppy-0.0.167.data/data/code_puppy/models.json,sha256=iXmLZGflnQcu2DRh4WUlgAhoXdvoxUc7KBhB8YxawXM,3088
|
|
108
|
+
code_puppy-0.0.167.dist-info/METADATA,sha256=kmno03NP1H0J8qL2UAKHSq200Bsv1XkwCk9GqfwsrXg,19598
|
|
109
|
+
code_puppy-0.0.167.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
110
|
+
code_puppy-0.0.167.dist-info/entry_points.txt,sha256=d8YkBvIUxF-dHNJAj-x4fPEqizbY5d_TwvYpc01U5kw,58
|
|
111
|
+
code_puppy-0.0.167.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
112
|
+
code_puppy-0.0.167.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|