code-puppy 0.0.345__py3-none-any.whl → 0.0.347__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agents/base_agent.py +139 -278
- code_puppy/agents/event_stream_handler.py +257 -0
- code_puppy/cli_runner.py +39 -3
- code_puppy/command_line/config_commands.py +10 -0
- code_puppy/config.py +23 -0
- code_puppy/summarization_agent.py +11 -1
- code_puppy/tools/agent_tools.py +55 -11
- code_puppy/tools/browser/vqa_agent.py +7 -1
- {code_puppy-0.0.345.dist-info → code_puppy-0.0.347.dist-info}/METADATA +23 -1
- {code_puppy-0.0.345.dist-info → code_puppy-0.0.347.dist-info}/RECORD +15 -14
- {code_puppy-0.0.345.data → code_puppy-0.0.347.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.345.data → code_puppy-0.0.347.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.345.dist-info → code_puppy-0.0.347.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.345.dist-info → code_puppy-0.0.347.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.345.dist-info → code_puppy-0.0.347.dist-info}/licenses/LICENSE +0 -0
code_puppy/agents/base_agent.py
CHANGED
|
@@ -7,7 +7,6 @@ import signal
|
|
|
7
7
|
import threading
|
|
8
8
|
import uuid
|
|
9
9
|
from abc import ABC, abstractmethod
|
|
10
|
-
from collections.abc import AsyncIterable
|
|
11
10
|
from typing import (
|
|
12
11
|
Any,
|
|
13
12
|
Callable,
|
|
@@ -24,16 +23,17 @@ from typing import (
|
|
|
24
23
|
import mcp
|
|
25
24
|
import pydantic
|
|
26
25
|
import pydantic_ai.models
|
|
26
|
+
from dbos import DBOS, SetWorkflowID
|
|
27
27
|
from pydantic_ai import Agent as PydanticAgent
|
|
28
28
|
from pydantic_ai import (
|
|
29
29
|
BinaryContent,
|
|
30
30
|
DocumentUrl,
|
|
31
31
|
ImageUrl,
|
|
32
|
-
PartEndEvent,
|
|
33
32
|
RunContext,
|
|
34
33
|
UsageLimitExceeded,
|
|
35
34
|
UsageLimits,
|
|
36
35
|
)
|
|
36
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
37
37
|
from pydantic_ai.messages import (
|
|
38
38
|
ModelMessage,
|
|
39
39
|
ModelRequest,
|
|
@@ -46,6 +46,8 @@ from pydantic_ai.messages import (
|
|
|
46
46
|
)
|
|
47
47
|
from rich.text import Text
|
|
48
48
|
|
|
49
|
+
from code_puppy.agents.event_stream_handler import event_stream_handler
|
|
50
|
+
|
|
49
51
|
# Consolidated relative imports
|
|
50
52
|
from code_puppy.config import (
|
|
51
53
|
get_agent_pinned_model,
|
|
@@ -54,6 +56,7 @@ from code_puppy.config import (
|
|
|
54
56
|
get_global_model_name,
|
|
55
57
|
get_message_limit,
|
|
56
58
|
get_protected_token_count,
|
|
59
|
+
get_use_dbos,
|
|
57
60
|
get_value,
|
|
58
61
|
)
|
|
59
62
|
from code_puppy.error_logging import log_error
|
|
@@ -97,9 +100,6 @@ class BaseAgent(ABC):
|
|
|
97
100
|
# Cache for MCP tool definitions (for token estimation)
|
|
98
101
|
# This is populated after the first successful run when MCP tools are retrieved
|
|
99
102
|
self._mcp_tool_definitions_cache: List[Dict[str, Any]] = []
|
|
100
|
-
# Shared console for streaming output - should be set by cli_runner
|
|
101
|
-
# to avoid conflicts between spinner's Live display and response streaming
|
|
102
|
-
self._console: Optional[Any] = None
|
|
103
103
|
|
|
104
104
|
@property
|
|
105
105
|
@abstractmethod
|
|
@@ -1209,25 +1209,59 @@ class BaseAgent(ABC):
|
|
|
1209
1209
|
|
|
1210
1210
|
self._last_model_name = resolved_model_name
|
|
1211
1211
|
# expose for run_with_mcp
|
|
1212
|
+
# Wrap it with DBOS, but handle MCP servers separately to avoid serialization issues
|
|
1212
1213
|
global _reload_count
|
|
1213
1214
|
_reload_count += 1
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1215
|
+
if get_use_dbos():
|
|
1216
|
+
# Don't pass MCP servers to the agent constructor when using DBOS
|
|
1217
|
+
# This prevents the "cannot pickle async_generator object" error
|
|
1218
|
+
# MCP servers will be handled separately in run_with_mcp
|
|
1219
|
+
agent_without_mcp = PydanticAgent(
|
|
1220
|
+
model=model,
|
|
1221
|
+
instructions=instructions,
|
|
1222
|
+
output_type=str,
|
|
1223
|
+
retries=3,
|
|
1224
|
+
toolsets=[], # Don't include MCP servers here
|
|
1225
|
+
history_processors=[self.message_history_accumulator],
|
|
1226
|
+
model_settings=model_settings,
|
|
1227
|
+
)
|
|
1228
|
+
|
|
1229
|
+
# Register regular tools (non-MCP) on the new agent
|
|
1230
|
+
agent_tools = self.get_available_tools()
|
|
1231
|
+
register_tools_for_agent(agent_without_mcp, agent_tools)
|
|
1232
|
+
|
|
1233
|
+
# Wrap with DBOS - pass event_stream_handler at construction time
|
|
1234
|
+
# so DBOSModel gets the handler for streaming output
|
|
1235
|
+
dbos_agent = DBOSAgent(
|
|
1236
|
+
agent_without_mcp,
|
|
1237
|
+
name=f"{self.name}-{_reload_count}",
|
|
1238
|
+
event_stream_handler=event_stream_handler,
|
|
1239
|
+
)
|
|
1240
|
+
self.pydantic_agent = dbos_agent
|
|
1241
|
+
self._code_generation_agent = dbos_agent
|
|
1227
1242
|
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1243
|
+
# Store filtered MCP servers separately for runtime use
|
|
1244
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1245
|
+
else:
|
|
1246
|
+
# Normal path without DBOS - include filtered MCP servers in the agent
|
|
1247
|
+
# Re-create agent with filtered MCP servers
|
|
1248
|
+
p_agent = PydanticAgent(
|
|
1249
|
+
model=model,
|
|
1250
|
+
instructions=instructions,
|
|
1251
|
+
output_type=str,
|
|
1252
|
+
retries=3,
|
|
1253
|
+
toolsets=filtered_mcp_servers,
|
|
1254
|
+
history_processors=[self.message_history_accumulator],
|
|
1255
|
+
model_settings=model_settings,
|
|
1256
|
+
)
|
|
1257
|
+
# Register regular tools on the agent
|
|
1258
|
+
agent_tools = self.get_available_tools()
|
|
1259
|
+
register_tools_for_agent(p_agent, agent_tools)
|
|
1260
|
+
|
|
1261
|
+
self.pydantic_agent = p_agent
|
|
1262
|
+
self._code_generation_agent = p_agent
|
|
1263
|
+
self._mcp_servers = filtered_mcp_servers
|
|
1264
|
+
self._mcp_servers = mcp_servers
|
|
1231
1265
|
return self._code_generation_agent
|
|
1232
1266
|
|
|
1233
1267
|
def _create_agent_with_output_type(self, output_type: Type[Any]) -> PydanticAgent:
|
|
@@ -1241,7 +1275,7 @@ class BaseAgent(ABC):
|
|
|
1241
1275
|
output_type: The Pydantic model or type for structured output.
|
|
1242
1276
|
|
|
1243
1277
|
Returns:
|
|
1244
|
-
A configured PydanticAgent with the custom output_type.
|
|
1278
|
+
A configured PydanticAgent (or DBOSAgent wrapper) with the custom output_type.
|
|
1245
1279
|
"""
|
|
1246
1280
|
from code_puppy.model_utils import prepare_prompt_for_model
|
|
1247
1281
|
from code_puppy.tools import register_tools_for_agent
|
|
@@ -1268,19 +1302,41 @@ class BaseAgent(ABC):
|
|
|
1268
1302
|
global _reload_count
|
|
1269
1303
|
_reload_count += 1
|
|
1270
1304
|
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1305
|
+
if get_use_dbos():
|
|
1306
|
+
temp_agent = PydanticAgent(
|
|
1307
|
+
model=model,
|
|
1308
|
+
instructions=instructions,
|
|
1309
|
+
output_type=output_type,
|
|
1310
|
+
retries=3,
|
|
1311
|
+
toolsets=[],
|
|
1312
|
+
history_processors=[self.message_history_accumulator],
|
|
1313
|
+
model_settings=model_settings,
|
|
1314
|
+
)
|
|
1315
|
+
agent_tools = self.get_available_tools()
|
|
1316
|
+
register_tools_for_agent(temp_agent, agent_tools)
|
|
1317
|
+
# Pass event_stream_handler at construction time for streaming output
|
|
1318
|
+
dbos_agent = DBOSAgent(
|
|
1319
|
+
temp_agent,
|
|
1320
|
+
name=f"{self.name}-structured-{_reload_count}",
|
|
1321
|
+
event_stream_handler=event_stream_handler,
|
|
1322
|
+
)
|
|
1323
|
+
return dbos_agent
|
|
1324
|
+
else:
|
|
1325
|
+
temp_agent = PydanticAgent(
|
|
1326
|
+
model=model,
|
|
1327
|
+
instructions=instructions,
|
|
1328
|
+
output_type=output_type,
|
|
1329
|
+
retries=3,
|
|
1330
|
+
toolsets=mcp_servers,
|
|
1331
|
+
history_processors=[self.message_history_accumulator],
|
|
1332
|
+
model_settings=model_settings,
|
|
1333
|
+
)
|
|
1334
|
+
agent_tools = self.get_available_tools()
|
|
1335
|
+
register_tools_for_agent(temp_agent, agent_tools)
|
|
1336
|
+
return temp_agent
|
|
1283
1337
|
|
|
1338
|
+
# It's okay to decorate it with DBOS.step even if not using DBOS; the decorator is a no-op in that case.
|
|
1339
|
+
@DBOS.step()
|
|
1284
1340
|
def message_history_accumulator(self, ctx: RunContext, messages: List[Any]):
|
|
1285
1341
|
_message_history = self.get_message_history()
|
|
1286
1342
|
message_history_hashes = set([self.hash_message(m) for m in _message_history])
|
|
@@ -1304,241 +1360,6 @@ class BaseAgent(ABC):
|
|
|
1304
1360
|
self.set_message_history(result_messages_filtered_empty_thinking)
|
|
1305
1361
|
return self.get_message_history()
|
|
1306
1362
|
|
|
1307
|
-
async def _event_stream_handler(
|
|
1308
|
-
self, ctx: RunContext, events: AsyncIterable[Any]
|
|
1309
|
-
) -> None:
|
|
1310
|
-
"""Handle streaming events from the agent run.
|
|
1311
|
-
|
|
1312
|
-
This method processes streaming events and emits TextPart, ThinkingPart,
|
|
1313
|
-
and ToolCallPart content with styled banners/tokens as they stream in.
|
|
1314
|
-
|
|
1315
|
-
Args:
|
|
1316
|
-
ctx: The run context.
|
|
1317
|
-
events: Async iterable of streaming events (PartStartEvent, PartDeltaEvent, etc.).
|
|
1318
|
-
"""
|
|
1319
|
-
from pydantic_ai import PartDeltaEvent, PartStartEvent
|
|
1320
|
-
from pydantic_ai.messages import (
|
|
1321
|
-
TextPartDelta,
|
|
1322
|
-
ThinkingPartDelta,
|
|
1323
|
-
ToolCallPartDelta,
|
|
1324
|
-
)
|
|
1325
|
-
from rich.console import Console
|
|
1326
|
-
from rich.markup import escape
|
|
1327
|
-
|
|
1328
|
-
from code_puppy.messaging.spinner import pause_all_spinners
|
|
1329
|
-
|
|
1330
|
-
# IMPORTANT: Use the shared console (set by cli_runner) to avoid conflicts
|
|
1331
|
-
# with the spinner's Live display. Multiple Console instances with separate
|
|
1332
|
-
# Live displays cause cursor positioning chaos and line duplication.
|
|
1333
|
-
if self._console is not None:
|
|
1334
|
-
console = self._console
|
|
1335
|
-
else:
|
|
1336
|
-
# Fallback if console not set (shouldn't happen in normal use)
|
|
1337
|
-
console = Console()
|
|
1338
|
-
|
|
1339
|
-
# Track which part indices we're currently streaming (for Text/Thinking/Tool parts)
|
|
1340
|
-
streaming_parts: set[int] = set()
|
|
1341
|
-
thinking_parts: set[int] = (
|
|
1342
|
-
set()
|
|
1343
|
-
) # Track which parts are thinking (for dim style)
|
|
1344
|
-
text_parts: set[int] = set() # Track which parts are text
|
|
1345
|
-
tool_parts: set[int] = set() # Track which parts are tool calls
|
|
1346
|
-
banner_printed: set[int] = set() # Track if banner was already printed
|
|
1347
|
-
token_count: dict[int, int] = {} # Track token count per text/tool part
|
|
1348
|
-
did_stream_anything = False # Track if we streamed any content
|
|
1349
|
-
|
|
1350
|
-
# Termflow streaming state for text parts
|
|
1351
|
-
from termflow import Parser as TermflowParser
|
|
1352
|
-
from termflow import Renderer as TermflowRenderer
|
|
1353
|
-
|
|
1354
|
-
termflow_parsers: dict[int, TermflowParser] = {}
|
|
1355
|
-
termflow_renderers: dict[int, TermflowRenderer] = {}
|
|
1356
|
-
termflow_line_buffers: dict[int, str] = {} # Buffer incomplete lines
|
|
1357
|
-
|
|
1358
|
-
def _print_thinking_banner() -> None:
|
|
1359
|
-
"""Print the THINKING banner with spinner pause and line clear."""
|
|
1360
|
-
nonlocal did_stream_anything
|
|
1361
|
-
import time
|
|
1362
|
-
|
|
1363
|
-
from code_puppy.config import get_banner_color
|
|
1364
|
-
|
|
1365
|
-
pause_all_spinners()
|
|
1366
|
-
time.sleep(0.1) # Delay to let spinner fully clear
|
|
1367
|
-
# Clear line and print newline before banner
|
|
1368
|
-
console.print(" " * 50, end="\r")
|
|
1369
|
-
console.print() # Newline before banner
|
|
1370
|
-
# Bold banner with configurable color and lightning bolt
|
|
1371
|
-
thinking_color = get_banner_color("thinking")
|
|
1372
|
-
console.print(
|
|
1373
|
-
Text.from_markup(
|
|
1374
|
-
f"[bold white on {thinking_color}] THINKING [/bold white on {thinking_color}] [dim]⚡ "
|
|
1375
|
-
),
|
|
1376
|
-
end="",
|
|
1377
|
-
)
|
|
1378
|
-
did_stream_anything = True
|
|
1379
|
-
|
|
1380
|
-
def _print_response_banner() -> None:
|
|
1381
|
-
"""Print the AGENT RESPONSE banner with spinner pause and line clear."""
|
|
1382
|
-
nonlocal did_stream_anything
|
|
1383
|
-
import time
|
|
1384
|
-
|
|
1385
|
-
from code_puppy.config import get_banner_color
|
|
1386
|
-
|
|
1387
|
-
pause_all_spinners()
|
|
1388
|
-
time.sleep(0.1) # Delay to let spinner fully clear
|
|
1389
|
-
# Clear line and print newline before banner
|
|
1390
|
-
console.print(" " * 50, end="\r")
|
|
1391
|
-
console.print() # Newline before banner
|
|
1392
|
-
response_color = get_banner_color("agent_response")
|
|
1393
|
-
console.print(
|
|
1394
|
-
Text.from_markup(
|
|
1395
|
-
f"[bold white on {response_color}] AGENT RESPONSE [/bold white on {response_color}]"
|
|
1396
|
-
)
|
|
1397
|
-
)
|
|
1398
|
-
did_stream_anything = True
|
|
1399
|
-
|
|
1400
|
-
async for event in events:
|
|
1401
|
-
# PartStartEvent - register the part but defer banner until content arrives
|
|
1402
|
-
if isinstance(event, PartStartEvent):
|
|
1403
|
-
part = event.part
|
|
1404
|
-
if isinstance(part, ThinkingPart):
|
|
1405
|
-
streaming_parts.add(event.index)
|
|
1406
|
-
thinking_parts.add(event.index)
|
|
1407
|
-
# If there's initial content, print banner + content now
|
|
1408
|
-
if part.content and part.content.strip():
|
|
1409
|
-
_print_thinking_banner()
|
|
1410
|
-
escaped = escape(part.content)
|
|
1411
|
-
console.print(f"[dim]{escaped}[/dim]", end="")
|
|
1412
|
-
banner_printed.add(event.index)
|
|
1413
|
-
elif isinstance(part, TextPart):
|
|
1414
|
-
streaming_parts.add(event.index)
|
|
1415
|
-
text_parts.add(event.index)
|
|
1416
|
-
# Initialize termflow streaming for this text part
|
|
1417
|
-
termflow_parsers[event.index] = TermflowParser()
|
|
1418
|
-
termflow_renderers[event.index] = TermflowRenderer(
|
|
1419
|
-
output=console.file, width=console.width
|
|
1420
|
-
)
|
|
1421
|
-
termflow_line_buffers[event.index] = ""
|
|
1422
|
-
# Handle initial content if present
|
|
1423
|
-
if part.content and part.content.strip():
|
|
1424
|
-
_print_response_banner()
|
|
1425
|
-
banner_printed.add(event.index)
|
|
1426
|
-
termflow_line_buffers[event.index] = part.content
|
|
1427
|
-
elif isinstance(part, ToolCallPart):
|
|
1428
|
-
streaming_parts.add(event.index)
|
|
1429
|
-
tool_parts.add(event.index)
|
|
1430
|
-
token_count[event.index] = 0 # Initialize token counter
|
|
1431
|
-
# Track tool name for display
|
|
1432
|
-
banner_printed.add(
|
|
1433
|
-
event.index
|
|
1434
|
-
) # Use banner_printed to track if we've shown tool info
|
|
1435
|
-
|
|
1436
|
-
# PartDeltaEvent - stream the content as it arrives
|
|
1437
|
-
elif isinstance(event, PartDeltaEvent):
|
|
1438
|
-
if event.index in streaming_parts:
|
|
1439
|
-
delta = event.delta
|
|
1440
|
-
if isinstance(delta, (TextPartDelta, ThinkingPartDelta)):
|
|
1441
|
-
if delta.content_delta:
|
|
1442
|
-
# For text parts, stream markdown with termflow
|
|
1443
|
-
if event.index in text_parts:
|
|
1444
|
-
# Print banner on first content
|
|
1445
|
-
if event.index not in banner_printed:
|
|
1446
|
-
_print_response_banner()
|
|
1447
|
-
banner_printed.add(event.index)
|
|
1448
|
-
|
|
1449
|
-
# Add content to line buffer
|
|
1450
|
-
termflow_line_buffers[event.index] += (
|
|
1451
|
-
delta.content_delta
|
|
1452
|
-
)
|
|
1453
|
-
|
|
1454
|
-
# Process complete lines
|
|
1455
|
-
parser = termflow_parsers[event.index]
|
|
1456
|
-
renderer = termflow_renderers[event.index]
|
|
1457
|
-
buffer = termflow_line_buffers[event.index]
|
|
1458
|
-
|
|
1459
|
-
while "\n" in buffer:
|
|
1460
|
-
line, buffer = buffer.split("\n", 1)
|
|
1461
|
-
events_to_render = parser.parse_line(line)
|
|
1462
|
-
renderer.render_all(events_to_render)
|
|
1463
|
-
|
|
1464
|
-
termflow_line_buffers[event.index] = buffer
|
|
1465
|
-
else:
|
|
1466
|
-
# For thinking parts, stream immediately (dim)
|
|
1467
|
-
if event.index not in banner_printed:
|
|
1468
|
-
_print_thinking_banner()
|
|
1469
|
-
banner_printed.add(event.index)
|
|
1470
|
-
escaped = escape(delta.content_delta)
|
|
1471
|
-
console.print(f"[dim]{escaped}[/dim]", end="")
|
|
1472
|
-
elif isinstance(delta, ToolCallPartDelta):
|
|
1473
|
-
# For tool calls, count chunks received
|
|
1474
|
-
token_count[event.index] += 1
|
|
1475
|
-
# Get tool name if available
|
|
1476
|
-
tool_name = getattr(delta, "tool_name_delta", "")
|
|
1477
|
-
count = token_count[event.index]
|
|
1478
|
-
# Display with tool wrench icon and tool name
|
|
1479
|
-
if tool_name:
|
|
1480
|
-
console.print(
|
|
1481
|
-
f" 🔧 Calling {tool_name}... {count} chunks ",
|
|
1482
|
-
end="\r",
|
|
1483
|
-
)
|
|
1484
|
-
else:
|
|
1485
|
-
console.print(
|
|
1486
|
-
f" 🔧 Calling tool... {count} chunks ",
|
|
1487
|
-
end="\r",
|
|
1488
|
-
)
|
|
1489
|
-
|
|
1490
|
-
# PartEndEvent - finish the streaming with a newline
|
|
1491
|
-
elif isinstance(event, PartEndEvent):
|
|
1492
|
-
if event.index in streaming_parts:
|
|
1493
|
-
# For text parts, finalize termflow rendering
|
|
1494
|
-
if event.index in text_parts:
|
|
1495
|
-
# Render any remaining buffered content
|
|
1496
|
-
if event.index in termflow_parsers:
|
|
1497
|
-
parser = termflow_parsers[event.index]
|
|
1498
|
-
renderer = termflow_renderers[event.index]
|
|
1499
|
-
remaining = termflow_line_buffers.get(event.index, "")
|
|
1500
|
-
|
|
1501
|
-
# Parse and render any remaining partial line
|
|
1502
|
-
if remaining.strip():
|
|
1503
|
-
events_to_render = parser.parse_line(remaining)
|
|
1504
|
-
renderer.render_all(events_to_render)
|
|
1505
|
-
|
|
1506
|
-
# Finalize the parser to close any open blocks
|
|
1507
|
-
final_events = parser.finalize()
|
|
1508
|
-
renderer.render_all(final_events)
|
|
1509
|
-
|
|
1510
|
-
# Clean up termflow state
|
|
1511
|
-
del termflow_parsers[event.index]
|
|
1512
|
-
del termflow_renderers[event.index]
|
|
1513
|
-
del termflow_line_buffers[event.index]
|
|
1514
|
-
# For tool parts, clear the chunk counter line
|
|
1515
|
-
elif event.index in tool_parts:
|
|
1516
|
-
# Clear the chunk counter line by printing spaces and returning
|
|
1517
|
-
console.print(" " * 50, end="\r")
|
|
1518
|
-
# For thinking parts, just print newline
|
|
1519
|
-
elif event.index in banner_printed:
|
|
1520
|
-
console.print() # Final newline after streaming
|
|
1521
|
-
|
|
1522
|
-
# Clean up token count
|
|
1523
|
-
token_count.pop(event.index, None)
|
|
1524
|
-
# Clean up all tracking sets
|
|
1525
|
-
streaming_parts.discard(event.index)
|
|
1526
|
-
thinking_parts.discard(event.index)
|
|
1527
|
-
text_parts.discard(event.index)
|
|
1528
|
-
tool_parts.discard(event.index)
|
|
1529
|
-
banner_printed.discard(event.index)
|
|
1530
|
-
|
|
1531
|
-
# Resume spinner if next part is NOT text/thinking/tool (avoid race condition)
|
|
1532
|
-
# If next part is None or handled differently, it's safe to resume
|
|
1533
|
-
# Note: spinner itself handles blank line before appearing
|
|
1534
|
-
from code_puppy.messaging.spinner import resume_all_spinners
|
|
1535
|
-
|
|
1536
|
-
next_kind = getattr(event, "next_part_kind", None)
|
|
1537
|
-
if next_kind not in ("text", "thinking", "tool-call"):
|
|
1538
|
-
resume_all_spinners()
|
|
1539
|
-
|
|
1540
|
-
# Spinner is resumed in PartEndEvent when appropriate (based on next_part_kind)
|
|
1541
|
-
|
|
1542
1363
|
def _spawn_ctrl_x_key_listener(
|
|
1543
1364
|
self,
|
|
1544
1365
|
stop_event: threading.Event,
|
|
@@ -1788,15 +1609,51 @@ class BaseAgent(ABC):
|
|
|
1788
1609
|
|
|
1789
1610
|
usage_limits = UsageLimits(request_limit=get_message_limit())
|
|
1790
1611
|
|
|
1791
|
-
# MCP servers
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1612
|
+
# Handle MCP servers - add them temporarily when using DBOS
|
|
1613
|
+
if (
|
|
1614
|
+
get_use_dbos()
|
|
1615
|
+
and hasattr(self, "_mcp_servers")
|
|
1616
|
+
and self._mcp_servers
|
|
1617
|
+
):
|
|
1618
|
+
# Temporarily add MCP servers to the DBOS agent using internal _toolsets
|
|
1619
|
+
original_toolsets = pydantic_agent._toolsets
|
|
1620
|
+
pydantic_agent._toolsets = original_toolsets + self._mcp_servers
|
|
1621
|
+
pydantic_agent._toolsets = original_toolsets + self._mcp_servers
|
|
1622
|
+
|
|
1623
|
+
try:
|
|
1624
|
+
# Set the workflow ID for DBOS context so DBOS and Code Puppy ID match
|
|
1625
|
+
with SetWorkflowID(group_id):
|
|
1626
|
+
result_ = await pydantic_agent.run(
|
|
1627
|
+
prompt_payload,
|
|
1628
|
+
message_history=self.get_message_history(),
|
|
1629
|
+
usage_limits=usage_limits,
|
|
1630
|
+
event_stream_handler=event_stream_handler,
|
|
1631
|
+
**kwargs,
|
|
1632
|
+
)
|
|
1633
|
+
return result_
|
|
1634
|
+
finally:
|
|
1635
|
+
# Always restore original toolsets
|
|
1636
|
+
pydantic_agent._toolsets = original_toolsets
|
|
1637
|
+
elif get_use_dbos():
|
|
1638
|
+
with SetWorkflowID(group_id):
|
|
1639
|
+
result_ = await pydantic_agent.run(
|
|
1640
|
+
prompt_payload,
|
|
1641
|
+
message_history=self.get_message_history(),
|
|
1642
|
+
usage_limits=usage_limits,
|
|
1643
|
+
event_stream_handler=event_stream_handler,
|
|
1644
|
+
**kwargs,
|
|
1645
|
+
)
|
|
1646
|
+
return result_
|
|
1647
|
+
else:
|
|
1648
|
+
# Non-DBOS path (MCP servers are already included)
|
|
1649
|
+
result_ = await pydantic_agent.run(
|
|
1650
|
+
prompt_payload,
|
|
1651
|
+
message_history=self.get_message_history(),
|
|
1652
|
+
usage_limits=usage_limits,
|
|
1653
|
+
event_stream_handler=event_stream_handler,
|
|
1654
|
+
**kwargs,
|
|
1655
|
+
)
|
|
1656
|
+
return result_
|
|
1800
1657
|
except* UsageLimitExceeded as ule:
|
|
1801
1658
|
emit_info(f"Usage limit exceeded: {str(ule)}", group_id=group_id)
|
|
1802
1659
|
emit_info(
|
|
@@ -1811,8 +1668,12 @@ class BaseAgent(ABC):
|
|
|
1811
1668
|
)
|
|
1812
1669
|
except* asyncio.exceptions.CancelledError:
|
|
1813
1670
|
emit_info("Cancelled")
|
|
1671
|
+
if get_use_dbos():
|
|
1672
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1814
1673
|
except* InterruptedError as ie:
|
|
1815
1674
|
emit_info(f"Interrupted: {str(ie)}")
|
|
1675
|
+
if get_use_dbos():
|
|
1676
|
+
await DBOS.cancel_workflow_async(group_id)
|
|
1816
1677
|
except* Exception as other_error:
|
|
1817
1678
|
# Filter out CancelledError and UsageLimitExceeded from the exception group - let it propagate
|
|
1818
1679
|
remaining_exceptions = []
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
"""Event stream handler for processing streaming events from agent runs."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import AsyncIterable
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic_ai import PartDeltaEvent, PartEndEvent, PartStartEvent, RunContext
|
|
7
|
+
from pydantic_ai.messages import (
|
|
8
|
+
TextPart,
|
|
9
|
+
TextPartDelta,
|
|
10
|
+
ThinkingPart,
|
|
11
|
+
ThinkingPartDelta,
|
|
12
|
+
ToolCallPart,
|
|
13
|
+
ToolCallPartDelta,
|
|
14
|
+
)
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
from rich.markup import escape
|
|
17
|
+
from rich.text import Text
|
|
18
|
+
|
|
19
|
+
from code_puppy.config import get_banner_color
|
|
20
|
+
from code_puppy.messaging.spinner import pause_all_spinners, resume_all_spinners
|
|
21
|
+
|
|
22
|
+
# Module-level console for streaming output
|
|
23
|
+
# Set via set_streaming_console() to share console with spinner
|
|
24
|
+
_streaming_console: Optional[Console] = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def set_streaming_console(console: Optional[Console]) -> None:
|
|
28
|
+
"""Set the console used for streaming output.
|
|
29
|
+
|
|
30
|
+
This should be called with the same console used by the spinner
|
|
31
|
+
to avoid Live display conflicts that cause line duplication.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
console: The Rich console to use, or None to use a fallback.
|
|
35
|
+
"""
|
|
36
|
+
global _streaming_console
|
|
37
|
+
_streaming_console = console
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_streaming_console() -> Console:
|
|
41
|
+
"""Get the console for streaming output.
|
|
42
|
+
|
|
43
|
+
Returns the configured console or creates a fallback Console.
|
|
44
|
+
"""
|
|
45
|
+
if _streaming_console is not None:
|
|
46
|
+
return _streaming_console
|
|
47
|
+
return Console()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def event_stream_handler(
|
|
51
|
+
ctx: RunContext,
|
|
52
|
+
events: AsyncIterable[Any],
|
|
53
|
+
) -> None:
|
|
54
|
+
"""Handle streaming events from the agent run.
|
|
55
|
+
|
|
56
|
+
This function processes streaming events and emits TextPart, ThinkingPart,
|
|
57
|
+
and ToolCallPart content with styled banners/tokens as they stream in.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
ctx: The run context.
|
|
61
|
+
events: Async iterable of streaming events (PartStartEvent, PartDeltaEvent, etc.).
|
|
62
|
+
"""
|
|
63
|
+
import time
|
|
64
|
+
|
|
65
|
+
from termflow import Parser as TermflowParser
|
|
66
|
+
from termflow import Renderer as TermflowRenderer
|
|
67
|
+
|
|
68
|
+
# Use the module-level console (set via set_streaming_console)
|
|
69
|
+
console = get_streaming_console()
|
|
70
|
+
|
|
71
|
+
# Track which part indices we're currently streaming (for Text/Thinking/Tool parts)
|
|
72
|
+
streaming_parts: set[int] = set()
|
|
73
|
+
thinking_parts: set[int] = set() # Track which parts are thinking (for dim style)
|
|
74
|
+
text_parts: set[int] = set() # Track which parts are text
|
|
75
|
+
tool_parts: set[int] = set() # Track which parts are tool calls
|
|
76
|
+
banner_printed: set[int] = set() # Track if banner was already printed
|
|
77
|
+
token_count: dict[int, int] = {} # Track token count per text/tool part
|
|
78
|
+
did_stream_anything = False # Track if we streamed any content
|
|
79
|
+
|
|
80
|
+
# Termflow streaming state for text parts
|
|
81
|
+
termflow_parsers: dict[int, TermflowParser] = {}
|
|
82
|
+
termflow_renderers: dict[int, TermflowRenderer] = {}
|
|
83
|
+
termflow_line_buffers: dict[int, str] = {} # Buffer incomplete lines
|
|
84
|
+
|
|
85
|
+
def _print_thinking_banner() -> None:
|
|
86
|
+
"""Print the THINKING banner with spinner pause and line clear."""
|
|
87
|
+
nonlocal did_stream_anything
|
|
88
|
+
|
|
89
|
+
pause_all_spinners()
|
|
90
|
+
time.sleep(0.1) # Delay to let spinner fully clear
|
|
91
|
+
# Clear line and print newline before banner
|
|
92
|
+
console.print(" " * 50, end="\r")
|
|
93
|
+
console.print() # Newline before banner
|
|
94
|
+
# Bold banner with configurable color and lightning bolt
|
|
95
|
+
thinking_color = get_banner_color("thinking")
|
|
96
|
+
console.print(
|
|
97
|
+
Text.from_markup(
|
|
98
|
+
f"[bold white on {thinking_color}] THINKING [/bold white on {thinking_color}] [dim]\u26a1 "
|
|
99
|
+
),
|
|
100
|
+
end="",
|
|
101
|
+
)
|
|
102
|
+
did_stream_anything = True
|
|
103
|
+
|
|
104
|
+
def _print_response_banner() -> None:
|
|
105
|
+
"""Print the AGENT RESPONSE banner with spinner pause and line clear."""
|
|
106
|
+
nonlocal did_stream_anything
|
|
107
|
+
|
|
108
|
+
pause_all_spinners()
|
|
109
|
+
time.sleep(0.1) # Delay to let spinner fully clear
|
|
110
|
+
# Clear line and print newline before banner
|
|
111
|
+
console.print(" " * 50, end="\r")
|
|
112
|
+
console.print() # Newline before banner
|
|
113
|
+
response_color = get_banner_color("agent_response")
|
|
114
|
+
console.print(
|
|
115
|
+
Text.from_markup(
|
|
116
|
+
f"[bold white on {response_color}] AGENT RESPONSE [/bold white on {response_color}]"
|
|
117
|
+
)
|
|
118
|
+
)
|
|
119
|
+
did_stream_anything = True
|
|
120
|
+
|
|
121
|
+
async for event in events:
|
|
122
|
+
# PartStartEvent - register the part but defer banner until content arrives
|
|
123
|
+
if isinstance(event, PartStartEvent):
|
|
124
|
+
part = event.part
|
|
125
|
+
if isinstance(part, ThinkingPart):
|
|
126
|
+
streaming_parts.add(event.index)
|
|
127
|
+
thinking_parts.add(event.index)
|
|
128
|
+
# If there's initial content, print banner + content now
|
|
129
|
+
if part.content and part.content.strip():
|
|
130
|
+
_print_thinking_banner()
|
|
131
|
+
escaped = escape(part.content)
|
|
132
|
+
console.print(f"[dim]{escaped}[/dim]", end="")
|
|
133
|
+
banner_printed.add(event.index)
|
|
134
|
+
elif isinstance(part, TextPart):
|
|
135
|
+
streaming_parts.add(event.index)
|
|
136
|
+
text_parts.add(event.index)
|
|
137
|
+
# Initialize termflow streaming for this text part
|
|
138
|
+
termflow_parsers[event.index] = TermflowParser()
|
|
139
|
+
termflow_renderers[event.index] = TermflowRenderer(
|
|
140
|
+
output=console.file, width=console.width
|
|
141
|
+
)
|
|
142
|
+
termflow_line_buffers[event.index] = ""
|
|
143
|
+
# Handle initial content if present
|
|
144
|
+
if part.content and part.content.strip():
|
|
145
|
+
_print_response_banner()
|
|
146
|
+
banner_printed.add(event.index)
|
|
147
|
+
termflow_line_buffers[event.index] = part.content
|
|
148
|
+
elif isinstance(part, ToolCallPart):
|
|
149
|
+
streaming_parts.add(event.index)
|
|
150
|
+
tool_parts.add(event.index)
|
|
151
|
+
token_count[event.index] = 0 # Initialize token counter
|
|
152
|
+
# Track tool name for display
|
|
153
|
+
banner_printed.add(
|
|
154
|
+
event.index
|
|
155
|
+
) # Use banner_printed to track if we've shown tool info
|
|
156
|
+
|
|
157
|
+
# PartDeltaEvent - stream the content as it arrives
|
|
158
|
+
elif isinstance(event, PartDeltaEvent):
|
|
159
|
+
if event.index in streaming_parts:
|
|
160
|
+
delta = event.delta
|
|
161
|
+
if isinstance(delta, (TextPartDelta, ThinkingPartDelta)):
|
|
162
|
+
if delta.content_delta:
|
|
163
|
+
# For text parts, stream markdown with termflow
|
|
164
|
+
if event.index in text_parts:
|
|
165
|
+
# Print banner on first content
|
|
166
|
+
if event.index not in banner_printed:
|
|
167
|
+
_print_response_banner()
|
|
168
|
+
banner_printed.add(event.index)
|
|
169
|
+
|
|
170
|
+
# Add content to line buffer
|
|
171
|
+
termflow_line_buffers[event.index] += delta.content_delta
|
|
172
|
+
|
|
173
|
+
# Process complete lines
|
|
174
|
+
parser = termflow_parsers[event.index]
|
|
175
|
+
renderer = termflow_renderers[event.index]
|
|
176
|
+
buffer = termflow_line_buffers[event.index]
|
|
177
|
+
|
|
178
|
+
while "\n" in buffer:
|
|
179
|
+
line, buffer = buffer.split("\n", 1)
|
|
180
|
+
events_to_render = parser.parse_line(line)
|
|
181
|
+
renderer.render_all(events_to_render)
|
|
182
|
+
|
|
183
|
+
termflow_line_buffers[event.index] = buffer
|
|
184
|
+
else:
|
|
185
|
+
# For thinking parts, stream immediately (dim)
|
|
186
|
+
if event.index not in banner_printed:
|
|
187
|
+
_print_thinking_banner()
|
|
188
|
+
banner_printed.add(event.index)
|
|
189
|
+
escaped = escape(delta.content_delta)
|
|
190
|
+
console.print(f"[dim]{escaped}[/dim]", end="")
|
|
191
|
+
elif isinstance(delta, ToolCallPartDelta):
|
|
192
|
+
# For tool calls, count chunks received
|
|
193
|
+
token_count[event.index] += 1
|
|
194
|
+
# Get tool name if available
|
|
195
|
+
tool_name = getattr(delta, "tool_name_delta", "")
|
|
196
|
+
count = token_count[event.index]
|
|
197
|
+
# Display with tool wrench icon and tool name
|
|
198
|
+
if tool_name:
|
|
199
|
+
console.print(
|
|
200
|
+
f" \U0001f527 Calling {tool_name}... {count} chunks ",
|
|
201
|
+
end="\r",
|
|
202
|
+
)
|
|
203
|
+
else:
|
|
204
|
+
console.print(
|
|
205
|
+
f" \U0001f527 Calling tool... {count} chunks ",
|
|
206
|
+
end="\r",
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# PartEndEvent - finish the streaming with a newline
|
|
210
|
+
elif isinstance(event, PartEndEvent):
|
|
211
|
+
if event.index in streaming_parts:
|
|
212
|
+
# For text parts, finalize termflow rendering
|
|
213
|
+
if event.index in text_parts:
|
|
214
|
+
# Render any remaining buffered content
|
|
215
|
+
if event.index in termflow_parsers:
|
|
216
|
+
parser = termflow_parsers[event.index]
|
|
217
|
+
renderer = termflow_renderers[event.index]
|
|
218
|
+
remaining = termflow_line_buffers.get(event.index, "")
|
|
219
|
+
|
|
220
|
+
# Parse and render any remaining partial line
|
|
221
|
+
if remaining.strip():
|
|
222
|
+
events_to_render = parser.parse_line(remaining)
|
|
223
|
+
renderer.render_all(events_to_render)
|
|
224
|
+
|
|
225
|
+
# Finalize the parser to close any open blocks
|
|
226
|
+
final_events = parser.finalize()
|
|
227
|
+
renderer.render_all(final_events)
|
|
228
|
+
|
|
229
|
+
# Clean up termflow state
|
|
230
|
+
del termflow_parsers[event.index]
|
|
231
|
+
del termflow_renderers[event.index]
|
|
232
|
+
del termflow_line_buffers[event.index]
|
|
233
|
+
# For tool parts, clear the chunk counter line
|
|
234
|
+
elif event.index in tool_parts:
|
|
235
|
+
# Clear the chunk counter line by printing spaces and returning
|
|
236
|
+
console.print(" " * 50, end="\r")
|
|
237
|
+
# For thinking parts, just print newline
|
|
238
|
+
elif event.index in banner_printed:
|
|
239
|
+
console.print() # Final newline after streaming
|
|
240
|
+
|
|
241
|
+
# Clean up token count
|
|
242
|
+
token_count.pop(event.index, None)
|
|
243
|
+
# Clean up all tracking sets
|
|
244
|
+
streaming_parts.discard(event.index)
|
|
245
|
+
thinking_parts.discard(event.index)
|
|
246
|
+
text_parts.discard(event.index)
|
|
247
|
+
tool_parts.discard(event.index)
|
|
248
|
+
banner_printed.discard(event.index)
|
|
249
|
+
|
|
250
|
+
# Resume spinner if next part is NOT text/thinking/tool (avoid race condition)
|
|
251
|
+
# If next part is None or handled differently, it's safe to resume
|
|
252
|
+
# Note: spinner itself handles blank line before appearing
|
|
253
|
+
next_kind = getattr(event, "next_part_kind", None)
|
|
254
|
+
if next_kind not in ("text", "thinking", "tool-call"):
|
|
255
|
+
resume_all_spinners()
|
|
256
|
+
|
|
257
|
+
# Spinner is resumed in PartEndEvent when appropriate (based on next_part_kind)
|
code_puppy/cli_runner.py
CHANGED
|
@@ -12,9 +12,11 @@ import argparse
|
|
|
12
12
|
import asyncio
|
|
13
13
|
import os
|
|
14
14
|
import sys
|
|
15
|
+
import time
|
|
15
16
|
import traceback
|
|
16
17
|
from pathlib import Path
|
|
17
18
|
|
|
19
|
+
from dbos import DBOS, DBOSConfig
|
|
18
20
|
from rich.console import Console
|
|
19
21
|
|
|
20
22
|
from code_puppy import __version__, callbacks, plugins
|
|
@@ -24,8 +26,10 @@ from code_puppy.command_line.clipboard import get_clipboard_manager
|
|
|
24
26
|
from code_puppy.config import (
|
|
25
27
|
AUTOSAVE_DIR,
|
|
26
28
|
COMMAND_HISTORY_FILE,
|
|
29
|
+
DBOS_DATABASE_URL,
|
|
27
30
|
ensure_config_exists,
|
|
28
31
|
finalize_autosave_session,
|
|
32
|
+
get_use_dbos,
|
|
29
33
|
initialize_command_history_file,
|
|
30
34
|
save_command_to_history,
|
|
31
35
|
)
|
|
@@ -283,6 +287,33 @@ async def main():
|
|
|
283
287
|
|
|
284
288
|
await callbacks.on_startup()
|
|
285
289
|
|
|
290
|
+
# Initialize DBOS if not disabled
|
|
291
|
+
if get_use_dbos():
|
|
292
|
+
# Append a Unix timestamp in ms to the version for uniqueness
|
|
293
|
+
dbos_app_version = os.environ.get(
|
|
294
|
+
"DBOS_APP_VERSION", f"{current_version}-{int(time.time() * 1000)}"
|
|
295
|
+
)
|
|
296
|
+
dbos_config: DBOSConfig = {
|
|
297
|
+
"name": "dbos-code-puppy",
|
|
298
|
+
"system_database_url": DBOS_DATABASE_URL,
|
|
299
|
+
"run_admin_server": False,
|
|
300
|
+
"conductor_key": os.environ.get(
|
|
301
|
+
"DBOS_CONDUCTOR_KEY"
|
|
302
|
+
), # Optional, if set in env, connect to conductor
|
|
303
|
+
"log_level": os.environ.get(
|
|
304
|
+
"DBOS_LOG_LEVEL", "ERROR"
|
|
305
|
+
), # Default to ERROR level to suppress verbose logs
|
|
306
|
+
"application_version": dbos_app_version, # Match DBOS app version to Code Puppy version
|
|
307
|
+
}
|
|
308
|
+
try:
|
|
309
|
+
DBOS(config=dbos_config)
|
|
310
|
+
DBOS.launch()
|
|
311
|
+
except Exception as e:
|
|
312
|
+
emit_error(f"Error initializing DBOS: {e}")
|
|
313
|
+
sys.exit(1)
|
|
314
|
+
else:
|
|
315
|
+
pass
|
|
316
|
+
|
|
286
317
|
global shutdown_flag
|
|
287
318
|
shutdown_flag = False
|
|
288
319
|
try:
|
|
@@ -307,6 +338,8 @@ async def main():
|
|
|
307
338
|
if bus_renderer:
|
|
308
339
|
bus_renderer.stop()
|
|
309
340
|
await callbacks.on_shutdown()
|
|
341
|
+
if get_use_dbos():
|
|
342
|
+
DBOS.destroy()
|
|
310
343
|
|
|
311
344
|
|
|
312
345
|
async def interactive_mode(message_renderer, initial_command: str = None) -> None:
|
|
@@ -794,11 +827,12 @@ async def run_prompt_with_attachments(
|
|
|
794
827
|
|
|
795
828
|
link_attachments = [link.url_part for link in processed_prompt.link_attachments]
|
|
796
829
|
|
|
797
|
-
# IMPORTANT: Set the shared console
|
|
830
|
+
# IMPORTANT: Set the shared console for streaming output so it
|
|
798
831
|
# uses the same console as the spinner. This prevents Live display conflicts
|
|
799
832
|
# that cause line duplication during markdown streaming.
|
|
800
|
-
|
|
801
|
-
|
|
833
|
+
from code_puppy.agents.event_stream_handler import set_streaming_console
|
|
834
|
+
|
|
835
|
+
set_streaming_console(spinner_console)
|
|
802
836
|
|
|
803
837
|
# Create the agent task first so we can track and cancel it
|
|
804
838
|
agent_task = asyncio.create_task(
|
|
@@ -874,6 +908,8 @@ def main_entry():
|
|
|
874
908
|
except KeyboardInterrupt:
|
|
875
909
|
# Note: Using sys.stderr for crash output - messaging system may not be available
|
|
876
910
|
sys.stderr.write(traceback.format_exc())
|
|
911
|
+
if get_use_dbos():
|
|
912
|
+
DBOS.destroy()
|
|
877
913
|
return 0
|
|
878
914
|
finally:
|
|
879
915
|
# Reset terminal on Unix-like systems (not Windows)
|
|
@@ -43,6 +43,7 @@ def handle_show_command(command: str) -> bool:
|
|
|
43
43
|
get_protected_token_count,
|
|
44
44
|
get_puppy_name,
|
|
45
45
|
get_temperature,
|
|
46
|
+
get_use_dbos,
|
|
46
47
|
get_yolo_mode,
|
|
47
48
|
)
|
|
48
49
|
from code_puppy.keymap import get_cancel_agent_display_name
|
|
@@ -71,6 +72,7 @@ def handle_show_command(command: str) -> bool:
|
|
|
71
72
|
[bold]default_agent:[/bold] [cyan]{default_agent}[/cyan]
|
|
72
73
|
[bold]model:[/bold] [green]{model}[/green]
|
|
73
74
|
[bold]YOLO_MODE:[/bold] {"[red]ON[/red]" if yolo_mode else "[yellow]off[/yellow]"}
|
|
75
|
+
[bold]DBOS:[/bold] {"[green]enabled[/green]" if get_use_dbos() else "[yellow]disabled[/yellow]"} (toggle: /set enable_dbos true|false)
|
|
74
76
|
[bold]auto_save_session:[/bold] {"[green]enabled[/green]" if auto_save else "[yellow]disabled[/yellow]"}
|
|
75
77
|
[bold]protected_tokens:[/bold] [cyan]{protected_tokens:,}[/cyan] recent tokens preserved
|
|
76
78
|
[bold]compaction_threshold:[/bold] [cyan]{compaction_threshold:.1%}[/cyan] context usage triggers compaction
|
|
@@ -211,6 +213,14 @@ def handle_set_command(command: str) -> bool:
|
|
|
211
213
|
)
|
|
212
214
|
return True
|
|
213
215
|
if key:
|
|
216
|
+
# Check if we're toggling DBOS enablement
|
|
217
|
+
if key == "enable_dbos":
|
|
218
|
+
emit_info(
|
|
219
|
+
Text.from_markup(
|
|
220
|
+
"[yellow]⚠️ DBOS configuration changed. Please restart Code Puppy for this change to take effect.[/yellow]"
|
|
221
|
+
)
|
|
222
|
+
)
|
|
223
|
+
|
|
214
224
|
# Validate cancel_agent_key before setting
|
|
215
225
|
if key == "cancel_agent_key":
|
|
216
226
|
from code_puppy.keymap import VALID_CANCEL_KEYS
|
code_puppy/config.py
CHANGED
|
@@ -47,6 +47,7 @@ MODELS_FILE = os.path.join(DATA_DIR, "models.json")
|
|
|
47
47
|
EXTRA_MODELS_FILE = os.path.join(DATA_DIR, "extra_models.json")
|
|
48
48
|
AGENTS_DIR = os.path.join(DATA_DIR, "agents")
|
|
49
49
|
CONTEXTS_DIR = os.path.join(DATA_DIR, "contexts")
|
|
50
|
+
_DEFAULT_SQLITE_FILE = os.path.join(DATA_DIR, "dbos_store.sqlite")
|
|
50
51
|
|
|
51
52
|
# OAuth plugin model files (XDG_DATA_HOME)
|
|
52
53
|
GEMINI_MODELS_FILE = os.path.join(DATA_DIR, "gemini_models.json")
|
|
@@ -59,6 +60,21 @@ AUTOSAVE_DIR = os.path.join(CACHE_DIR, "autosaves")
|
|
|
59
60
|
|
|
60
61
|
# State files (XDG_STATE_HOME)
|
|
61
62
|
COMMAND_HISTORY_FILE = os.path.join(STATE_DIR, "command_history.txt")
|
|
63
|
+
DBOS_DATABASE_URL = os.environ.get(
|
|
64
|
+
"DBOS_SYSTEM_DATABASE_URL", f"sqlite:///{_DEFAULT_SQLITE_FILE}"
|
|
65
|
+
)
|
|
66
|
+
# DBOS enable switch is controlled solely via puppy.cfg using key 'enable_dbos'.
|
|
67
|
+
# Default: False (DBOS disabled) unless explicitly enabled.
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def get_use_dbos() -> bool:
|
|
71
|
+
"""Return True if DBOS should be used based on 'enable_dbos' (default False)."""
|
|
72
|
+
cfg_val = get_value("enable_dbos")
|
|
73
|
+
if cfg_val is None:
|
|
74
|
+
return False
|
|
75
|
+
return str(cfg_val).strip().lower() in {"1", "true", "yes", "on"}
|
|
76
|
+
|
|
77
|
+
|
|
62
78
|
DEFAULT_SECTION = "puppy"
|
|
63
79
|
REQUIRED_KEYS = ["puppy_name", "owner_name"]
|
|
64
80
|
|
|
@@ -193,6 +209,8 @@ def get_config_keys():
|
|
|
193
209
|
"default_agent",
|
|
194
210
|
"temperature",
|
|
195
211
|
]
|
|
212
|
+
# Add DBOS control key
|
|
213
|
+
default_keys.append("enable_dbos")
|
|
196
214
|
# Add cancel agent key configuration
|
|
197
215
|
default_keys.append("cancel_agent_key")
|
|
198
216
|
# Add banner color keys
|
|
@@ -1029,6 +1047,11 @@ def set_http2(enabled: bool) -> None:
|
|
|
1029
1047
|
set_config_value("http2", "true" if enabled else "false")
|
|
1030
1048
|
|
|
1031
1049
|
|
|
1050
|
+
def set_enable_dbos(enabled: bool) -> None:
|
|
1051
|
+
"""Enable DBOS via config (true enables, default false)."""
|
|
1052
|
+
set_config_value("enable_dbos", "true" if enabled else "false")
|
|
1053
|
+
|
|
1054
|
+
|
|
1032
1055
|
def get_message_limit(default: int = 1000) -> int:
|
|
1033
1056
|
"""
|
|
1034
1057
|
Returns the user-configured message/request limit for the agent.
|
|
@@ -4,7 +4,10 @@ from typing import List
|
|
|
4
4
|
|
|
5
5
|
from pydantic_ai import Agent
|
|
6
6
|
|
|
7
|
-
from code_puppy.config import
|
|
7
|
+
from code_puppy.config import (
|
|
8
|
+
get_global_model_name,
|
|
9
|
+
get_use_dbos,
|
|
10
|
+
)
|
|
8
11
|
from code_puppy.model_factory import ModelFactory, make_model_settings
|
|
9
12
|
|
|
10
13
|
# Keep a module-level agent reference to avoid rebuilding per call
|
|
@@ -103,6 +106,13 @@ def reload_summarization_agent():
|
|
|
103
106
|
retries=1, # Fewer retries for summarization
|
|
104
107
|
model_settings=model_settings,
|
|
105
108
|
)
|
|
109
|
+
if get_use_dbos():
|
|
110
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
111
|
+
|
|
112
|
+
global _reload_count
|
|
113
|
+
_reload_count += 1
|
|
114
|
+
dbos_agent = DBOSAgent(agent, name=f"summarization-agent-{_reload_count}")
|
|
115
|
+
return dbos_agent
|
|
106
116
|
return agent
|
|
107
117
|
|
|
108
118
|
|
code_puppy/tools/agent_tools.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# agent_tools.py
|
|
2
2
|
import asyncio
|
|
3
3
|
import hashlib
|
|
4
|
+
import itertools
|
|
4
5
|
import json
|
|
5
6
|
import pickle
|
|
6
7
|
import re
|
|
@@ -9,6 +10,7 @@ from datetime import datetime
|
|
|
9
10
|
from pathlib import Path
|
|
10
11
|
from typing import List, Set
|
|
11
12
|
|
|
13
|
+
from dbos import DBOS, SetWorkflowID
|
|
12
14
|
from pydantic import BaseModel
|
|
13
15
|
|
|
14
16
|
# Import Agent from pydantic_ai to create temporary agents for invocation
|
|
@@ -18,6 +20,7 @@ from pydantic_ai.messages import ModelMessage
|
|
|
18
20
|
from code_puppy.config import (
|
|
19
21
|
DATA_DIR,
|
|
20
22
|
get_message_limit,
|
|
23
|
+
get_use_dbos,
|
|
21
24
|
)
|
|
22
25
|
from code_puppy.messaging import (
|
|
23
26
|
SubAgentInvocationMessage,
|
|
@@ -34,6 +37,27 @@ from code_puppy.tools.common import generate_group_id
|
|
|
34
37
|
# Set to track active subagent invocation tasks
|
|
35
38
|
_active_subagent_tasks: Set[asyncio.Task] = set()
|
|
36
39
|
|
|
40
|
+
# Atomic counter for DBOS workflow IDs - ensures uniqueness even in rapid back-to-back calls
|
|
41
|
+
# itertools.count() is thread-safe for next() calls
|
|
42
|
+
_dbos_workflow_counter = itertools.count()
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _generate_dbos_workflow_id(base_id: str) -> str:
|
|
46
|
+
"""Generate a unique DBOS workflow ID by appending an atomic counter.
|
|
47
|
+
|
|
48
|
+
DBOS requires workflow IDs to be unique across all executions.
|
|
49
|
+
This function ensures uniqueness by combining the base_id with
|
|
50
|
+
an atomically incrementing counter.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
base_id: The base identifier (e.g., group_id from generate_group_id)
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
A unique workflow ID in format: {base_id}-wf-{counter}
|
|
57
|
+
"""
|
|
58
|
+
counter = next(_dbos_workflow_counter)
|
|
59
|
+
return f"{base_id}-wf-{counter}"
|
|
60
|
+
|
|
37
61
|
|
|
38
62
|
def _generate_session_hash_suffix() -> str:
|
|
39
63
|
"""Generate a short SHA1 hash suffix based on current timestamp for uniqueness.
|
|
@@ -444,11 +468,9 @@ def register_invoke_agent(agent):
|
|
|
444
468
|
instructions = prepared.instructions
|
|
445
469
|
prompt = prepared.user_prompt
|
|
446
470
|
|
|
471
|
+
subagent_name = f"temp-invoke-agent-{session_id}"
|
|
447
472
|
model_settings = make_model_settings(model_name)
|
|
448
473
|
|
|
449
|
-
# Load MCP servers so sub-agents have access to the same tools as the main agent
|
|
450
|
-
mcp_servers = agent_config.load_mcp_servers()
|
|
451
|
-
|
|
452
474
|
temp_agent = Agent(
|
|
453
475
|
model=model,
|
|
454
476
|
instructions=instructions,
|
|
@@ -456,7 +478,6 @@ def register_invoke_agent(agent):
|
|
|
456
478
|
retries=3,
|
|
457
479
|
history_processors=[agent_config.message_history_accumulator],
|
|
458
480
|
model_settings=model_settings,
|
|
459
|
-
toolsets=mcp_servers if mcp_servers else [],
|
|
460
481
|
)
|
|
461
482
|
|
|
462
483
|
# Register the tools that the agent needs
|
|
@@ -465,21 +486,44 @@ def register_invoke_agent(agent):
|
|
|
465
486
|
agent_tools = agent_config.get_available_tools()
|
|
466
487
|
register_tools_for_agent(temp_agent, agent_tools)
|
|
467
488
|
|
|
489
|
+
if get_use_dbos():
|
|
490
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
491
|
+
|
|
492
|
+
dbos_agent = DBOSAgent(temp_agent, name=subagent_name)
|
|
493
|
+
temp_agent = dbos_agent
|
|
494
|
+
|
|
468
495
|
# Run the temporary agent with the provided prompt as an asyncio task
|
|
469
496
|
# Pass the message_history from the session to continue the conversation
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
497
|
+
workflow_id = None # Track for potential cancellation
|
|
498
|
+
if get_use_dbos():
|
|
499
|
+
# Generate a unique workflow ID for DBOS - ensures no collisions in back-to-back calls
|
|
500
|
+
workflow_id = _generate_dbos_workflow_id(group_id)
|
|
501
|
+
with SetWorkflowID(workflow_id):
|
|
502
|
+
task = asyncio.create_task(
|
|
503
|
+
temp_agent.run(
|
|
504
|
+
prompt,
|
|
505
|
+
message_history=message_history,
|
|
506
|
+
usage_limits=UsageLimits(request_limit=get_message_limit()),
|
|
507
|
+
)
|
|
508
|
+
)
|
|
509
|
+
_active_subagent_tasks.add(task)
|
|
510
|
+
else:
|
|
511
|
+
task = asyncio.create_task(
|
|
512
|
+
temp_agent.run(
|
|
513
|
+
prompt,
|
|
514
|
+
message_history=message_history,
|
|
515
|
+
usage_limits=UsageLimits(request_limit=get_message_limit()),
|
|
516
|
+
)
|
|
475
517
|
)
|
|
476
|
-
|
|
477
|
-
_active_subagent_tasks.add(task)
|
|
518
|
+
_active_subagent_tasks.add(task)
|
|
478
519
|
|
|
479
520
|
try:
|
|
480
521
|
result = await task
|
|
481
522
|
finally:
|
|
482
523
|
_active_subagent_tasks.discard(task)
|
|
524
|
+
if task.cancelled():
|
|
525
|
+
if get_use_dbos() and workflow_id:
|
|
526
|
+
DBOS.cancel_workflow(workflow_id)
|
|
483
527
|
|
|
484
528
|
# Extract the response from the result
|
|
485
529
|
response = result.output
|
|
@@ -7,7 +7,7 @@ from functools import lru_cache
|
|
|
7
7
|
from pydantic import BaseModel, Field
|
|
8
8
|
from pydantic_ai import Agent, BinaryContent
|
|
9
9
|
|
|
10
|
-
from code_puppy.config import get_vqa_model_name
|
|
10
|
+
from code_puppy.config import get_use_dbos, get_vqa_model_name
|
|
11
11
|
from code_puppy.model_factory import ModelFactory
|
|
12
12
|
|
|
13
13
|
|
|
@@ -50,6 +50,12 @@ def _load_vqa_agent(model_name: str) -> Agent[None, VisualAnalysisResult]:
|
|
|
50
50
|
retries=2,
|
|
51
51
|
)
|
|
52
52
|
|
|
53
|
+
if get_use_dbos():
|
|
54
|
+
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
55
|
+
|
|
56
|
+
dbos_agent = DBOSAgent(vqa_agent, name="vqa-agent")
|
|
57
|
+
return dbos_agent
|
|
58
|
+
|
|
53
59
|
return vqa_agent
|
|
54
60
|
|
|
55
61
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: code-puppy
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.347
|
|
4
4
|
Summary: Code generation agent
|
|
5
5
|
Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
|
|
6
6
|
Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
|
|
@@ -16,6 +16,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
16
16
|
Classifier: Topic :: Software Development :: Code Generators
|
|
17
17
|
Requires-Python: <3.14,>=3.11
|
|
18
18
|
Requires-Dist: camoufox>=0.4.11
|
|
19
|
+
Requires-Dist: dbos>=2.5.0
|
|
19
20
|
Requires-Dist: fastapi>=0.111.0
|
|
20
21
|
Requires-Dist: httpx[http2]>=0.24.1
|
|
21
22
|
Requires-Dist: json-repair>=0.46.2
|
|
@@ -173,6 +174,27 @@ These providers are automatically configured with correct OpenAI-compatible endp
|
|
|
173
174
|
- **⚠️ Unsupported Providers** - Providers like Amazon Bedrock and Google Vertex that require special authentication are clearly marked
|
|
174
175
|
- **⚠️ No Tool Calling** - Models without tool calling support show a big warning since they can't use Code Puppy's file/shell tools
|
|
175
176
|
|
|
177
|
+
### Durable Execution
|
|
178
|
+
|
|
179
|
+
Code Puppy now supports **[DBOS](https://github.com/dbos-inc/dbos-transact-py)** durable execution.
|
|
180
|
+
|
|
181
|
+
When enabled, every agent is automatically wrapped as a `DBOSAgent`, checkpointing key interactions (including agent inputs, LLM responses, MCP calls, and tool calls) in a database for durability and recovery.
|
|
182
|
+
|
|
183
|
+
You can toggle DBOS via either of these options:
|
|
184
|
+
|
|
185
|
+
- CLI config (persists): `/set enable_dbos true` (or `false` to disable)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
Config takes precedence if set; otherwise the environment variable is used.
|
|
189
|
+
|
|
190
|
+
### Configuration
|
|
191
|
+
|
|
192
|
+
The following environment variables control DBOS behavior:
|
|
193
|
+
- `DBOS_CONDUCTOR_KEY`: If set, Code Puppy connects to the [DBOS Management Console](https://console.dbos.dev/). Make sure you first register an app named `dbos-code-puppy` on the console to generate a Conductor key. Default: `None`.
|
|
194
|
+
- `DBOS_LOG_LEVEL`: Logging verbosity: `CRITICAL`, `ERROR`, `WARNING`, `INFO`, or `DEBUG`. Default: `ERROR`.
|
|
195
|
+
- `DBOS_SYSTEM_DATABASE_URL`: Database URL used by DBOS. Can point to a local SQLite file or a Postgres instance. Example: `postgresql://postgres:dbos@localhost:5432/postgres`. Default: `dbos_store.sqlite` file in the config directory.
|
|
196
|
+
- `DBOS_APP_VERSION`: If set, Code Puppy uses it as the [DBOS application version](https://docs.dbos.dev/architecture#application-and-workflow-versions) and automatically tries to recover pending workflows for this version. Default: Code Puppy version + Unix timestamp in millisecond (disable automatic recovery).
|
|
197
|
+
|
|
176
198
|
### Custom Commands
|
|
177
199
|
Create markdown files in `.claude/commands/`, `.github/prompts/`, or `.agents/commands/` to define custom slash commands. The filename becomes the command name and the content runs as a prompt.
|
|
178
200
|
|
|
@@ -3,8 +3,8 @@ code_puppy/__main__.py,sha256=pDVssJOWP8A83iFkxMLY9YteHYat0EyWDQqMkKHpWp4,203
|
|
|
3
3
|
code_puppy/callbacks.py,sha256=hqTV--dNxG5vwWWm3MrEjmb8MZuHFFdmHePl23NXPHk,8621
|
|
4
4
|
code_puppy/chatgpt_codex_client.py,sha256=Om0ANB_kpHubhCwNzF9ENf8RvKBqs0IYzBLl_SNw0Vk,9833
|
|
5
5
|
code_puppy/claude_cache_client.py,sha256=MLIRSJP428r9IK_aV6XyCXrCfQnNti32U60psPymLM4,14860
|
|
6
|
-
code_puppy/cli_runner.py,sha256=
|
|
7
|
-
code_puppy/config.py,sha256=
|
|
6
|
+
code_puppy/cli_runner.py,sha256=w5CLKgQYYaT7My3Cga2StXYol-u6DBxNzzUuhhsfhsA,34952
|
|
7
|
+
code_puppy/config.py,sha256=RlnrLkyFXm7h2Htf8rQA7vqoAyzLPMrESle417uLmFw,52373
|
|
8
8
|
code_puppy/error_logging.py,sha256=a80OILCUtJhexI6a9GM-r5LqIdjvSRzggfgPp2jv1X0,3297
|
|
9
9
|
code_puppy/gemini_code_assist.py,sha256=KGS7sO5OLc83nDF3xxS-QiU6vxW9vcm6hmzilu79Ef8,13867
|
|
10
10
|
code_puppy/http_utils.py,sha256=H3N5Qz2B1CcsGUYOycGWAqoNMr2P1NCVluKX3aRwRqI,10358
|
|
@@ -20,7 +20,7 @@ code_puppy/reopenable_async_client.py,sha256=pD34chyBFcC7_OVPJ8fp6aRI5jYdN-7VDyc
|
|
|
20
20
|
code_puppy/round_robin_model.py,sha256=kSawwPUiPgg0yg8r4AAVgvjzsWkptxpSORd75-HP7W4,5335
|
|
21
21
|
code_puppy/session_storage.py,sha256=T4hOsAl9z0yz2JZCptjJBOnN8fCmkLZx5eLy1hTdv6Q,9631
|
|
22
22
|
code_puppy/status_display.py,sha256=qHzIQGAPEa2_-4gQSg7_rE1ihOosBq8WO73MWFNmmlo,8938
|
|
23
|
-
code_puppy/summarization_agent.py,sha256=
|
|
23
|
+
code_puppy/summarization_agent.py,sha256=6Pu_Wp_rF-HAhoX9u2uXTabRVkOZUYwRoMP1lzNS4ew,4485
|
|
24
24
|
code_puppy/terminal_utils.py,sha256=TaS19x7EZqudlBUAQwLMzBMNxBHBNInvQQREXqRGtkM,12984
|
|
25
25
|
code_puppy/uvx_detection.py,sha256=tP9X9Nvzow--KIqtqjgrHQkSxMJ3EevfoaeoB9VLY2o,7224
|
|
26
26
|
code_puppy/version_checker.py,sha256=aq2Mwxl1CR9sEFBgrPt3OQOowLOBUp9VaQYWJhuUv8Q,1780
|
|
@@ -40,7 +40,8 @@ code_puppy/agents/agent_qa_expert.py,sha256=5Ikb4U3SZQknUEfwlHZiyZXKqnffnOTQagr_
|
|
|
40
40
|
code_puppy/agents/agent_qa_kitten.py,sha256=5PeFFSwCFlTUvP6h5bGntx0xv5NmRwBiw0HnMqY8nLI,9107
|
|
41
41
|
code_puppy/agents/agent_security_auditor.py,sha256=SpiYNA0XAsIwBj7S2_EQPRslRUmF_-b89pIJyW7DYtY,12022
|
|
42
42
|
code_puppy/agents/agent_typescript_reviewer.py,sha256=vsnpp98xg6cIoFAEJrRTUM_i4wLEWGm5nJxs6fhHobM,10275
|
|
43
|
-
code_puppy/agents/base_agent.py,sha256=
|
|
43
|
+
code_puppy/agents/base_agent.py,sha256=zX7XPNgveBoBCm-SoRncwabnU0uSyEwtG3q-x8JFkiU,73256
|
|
44
|
+
code_puppy/agents/event_stream_handler.py,sha256=C1TDkp9eTHEFvnTQzaGFh_q9izL1r-EnCRTez9kqO2Y,11438
|
|
44
45
|
code_puppy/agents/json_agent.py,sha256=lhopDJDoiSGHvD8A6t50hi9ZBoNRKgUywfxd0Po_Dzc,4886
|
|
45
46
|
code_puppy/agents/prompt_reviewer.py,sha256=JJrJ0m5q0Puxl8vFsyhAbY9ftU9n6c6UxEVdNct1E-Q,5558
|
|
46
47
|
code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
|
|
@@ -51,7 +52,7 @@ code_puppy/command_line/clipboard.py,sha256=oe9bfAX5RnT81FiYrDmhvHaePS1tAT-NFG1f
|
|
|
51
52
|
code_puppy/command_line/colors_menu.py,sha256=LoFVfJ-Mo-Eq9hnb2Rj5mn7oBCnadAGr-8NNHsHlu18,17273
|
|
52
53
|
code_puppy/command_line/command_handler.py,sha256=CY9F27eovZJK_kpU1YmbroYLWGTCuouCOQ-TXfDp-nw,10916
|
|
53
54
|
code_puppy/command_line/command_registry.py,sha256=qFySsw1g8dol3kgi0p6cXrIDlP11_OhOoaQ5nAadWXg,4416
|
|
54
|
-
code_puppy/command_line/config_commands.py,sha256=
|
|
55
|
+
code_puppy/command_line/config_commands.py,sha256=qS9Cm758DPz2QGvHLhAV4Tp_Xfgo3PyoCoLDusbnmCw,25742
|
|
55
56
|
code_puppy/command_line/core_commands.py,sha256=ujAPD4yDbXwYGJJfR2u4ei24eBV-Ps_-BVBjFMEoJy0,27668
|
|
56
57
|
code_puppy/command_line/diff_menu.py,sha256=_Gr9SP9fbItk-08dya9WTAR53s_PlyAvEnbt-8VWKPk,24141
|
|
57
58
|
code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
|
|
@@ -158,7 +159,7 @@ code_puppy/plugins/shell_safety/command_cache.py,sha256=adYtSPNVOZfW_6dQdtEihO6E
|
|
|
158
159
|
code_puppy/plugins/shell_safety/register_callbacks.py,sha256=W3v664RR48Fdbbbltf_NnX22_Ahw2AvAOtvXvWc7KxQ,7322
|
|
159
160
|
code_puppy/prompts/codex_system_prompt.md,sha256=hEFTCziroLqZmqNle5kG34A8kvTteOWezCiVrAEKhE0,24400
|
|
160
161
|
code_puppy/tools/__init__.py,sha256=BVTZ85jLHgDANwOnUSOz3UDlp8VQDq4DoGF23BRlyWw,6032
|
|
161
|
-
code_puppy/tools/agent_tools.py,sha256=
|
|
162
|
+
code_puppy/tools/agent_tools.py,sha256=snBI6FlFtR03CbYKXwu53R48c_fRSuDIwcNdVUruLcA,21020
|
|
162
163
|
code_puppy/tools/command_runner.py,sha256=3qXVnVTaBPia6y2D29As47_TRKgpyCj82yMFK-8UUYc,44954
|
|
163
164
|
code_puppy/tools/common.py,sha256=IYf-KOcP5eN2MwTlpULSXNATn7GzloAKl7_M1Uyfe4Y,40360
|
|
164
165
|
code_puppy/tools/file_modifications.py,sha256=vz9n7R0AGDSdLUArZr_55yJLkyI30M8zreAppxIx02M,29380
|
|
@@ -173,11 +174,11 @@ code_puppy/tools/browser/browser_screenshot.py,sha256=7jeG5N4-OkpRPY3JMwOrsJjutY
|
|
|
173
174
|
code_puppy/tools/browser/browser_scripts.py,sha256=sNb8eLEyzhasy5hV4B9OjM8yIVMLVEMRcQ4K77ABjRI,14564
|
|
174
175
|
code_puppy/tools/browser/browser_workflows.py,sha256=nitW42vCf0ieTX1gLabozTugNQ8phtoFzZbiAhw1V90,6491
|
|
175
176
|
code_puppy/tools/browser/camoufox_manager.py,sha256=RZjGOEftE5sI_tsercUyXFSZI2wpStXf-q0PdYh2G3I,8680
|
|
176
|
-
code_puppy/tools/browser/vqa_agent.py,sha256=
|
|
177
|
-
code_puppy-0.0.
|
|
178
|
-
code_puppy-0.0.
|
|
179
|
-
code_puppy-0.0.
|
|
180
|
-
code_puppy-0.0.
|
|
181
|
-
code_puppy-0.0.
|
|
182
|
-
code_puppy-0.0.
|
|
183
|
-
code_puppy-0.0.
|
|
177
|
+
code_puppy/tools/browser/vqa_agent.py,sha256=DBn9HKloILqJSTSdNZzH_PYWT0B2h9VwmY6akFQI_uU,2913
|
|
178
|
+
code_puppy-0.0.347.data/data/code_puppy/models.json,sha256=FMQdE_yvP_8y0xxt3K918UkFL9cZMYAqW1SfXcQkU_k,3105
|
|
179
|
+
code_puppy-0.0.347.data/data/code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
|
|
180
|
+
code_puppy-0.0.347.dist-info/METADATA,sha256=NoIXPPBnCAxJFAbT5QA7KxxPGor_fP_zQs2mvT5zAd8,27550
|
|
181
|
+
code_puppy-0.0.347.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
182
|
+
code_puppy-0.0.347.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
|
|
183
|
+
code_puppy-0.0.347.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
184
|
+
code_puppy-0.0.347.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|