code-puppy 0.0.342__py3-none-any.whl → 0.0.344__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
code_puppy/config.py CHANGED
@@ -47,7 +47,6 @@ MODELS_FILE = os.path.join(DATA_DIR, "models.json")
47
47
  EXTRA_MODELS_FILE = os.path.join(DATA_DIR, "extra_models.json")
48
48
  AGENTS_DIR = os.path.join(DATA_DIR, "agents")
49
49
  CONTEXTS_DIR = os.path.join(DATA_DIR, "contexts")
50
- _DEFAULT_SQLITE_FILE = os.path.join(DATA_DIR, "dbos_store.sqlite")
51
50
 
52
51
  # OAuth plugin model files (XDG_DATA_HOME)
53
52
  GEMINI_MODELS_FILE = os.path.join(DATA_DIR, "gemini_models.json")
@@ -60,21 +59,6 @@ AUTOSAVE_DIR = os.path.join(CACHE_DIR, "autosaves")
60
59
 
61
60
  # State files (XDG_STATE_HOME)
62
61
  COMMAND_HISTORY_FILE = os.path.join(STATE_DIR, "command_history.txt")
63
- DBOS_DATABASE_URL = os.environ.get(
64
- "DBOS_SYSTEM_DATABASE_URL", f"sqlite:///{_DEFAULT_SQLITE_FILE}"
65
- )
66
- # DBOS enable switch is controlled solely via puppy.cfg using key 'enable_dbos'.
67
- # Default: False (DBOS disabled) unless explicitly enabled.
68
-
69
-
70
- def get_use_dbos() -> bool:
71
- """Return True if DBOS should be used based on 'enable_dbos' (default False)."""
72
- cfg_val = get_value("enable_dbos")
73
- if cfg_val is None:
74
- return False
75
- return str(cfg_val).strip().lower() in {"1", "true", "yes", "on"}
76
-
77
-
78
62
  DEFAULT_SECTION = "puppy"
79
63
  REQUIRED_KEYS = ["puppy_name", "owner_name"]
80
64
 
@@ -209,8 +193,6 @@ def get_config_keys():
209
193
  "default_agent",
210
194
  "temperature",
211
195
  ]
212
- # Add DBOS control key
213
- default_keys.append("enable_dbos")
214
196
  # Add cancel agent key configuration
215
197
  default_keys.append("cancel_agent_key")
216
198
  # Add banner color keys
@@ -1047,11 +1029,6 @@ def set_http2(enabled: bool) -> None:
1047
1029
  set_config_value("http2", "true" if enabled else "false")
1048
1030
 
1049
1031
 
1050
- def set_enable_dbos(enabled: bool) -> None:
1051
- """Enable DBOS via config (true enables, default false)."""
1052
- set_config_value("enable_dbos", "true" if enabled else "false")
1053
-
1054
-
1055
1032
  def get_message_limit(default: int = 1000) -> int:
1056
1033
  """
1057
1034
  Returns the user-configured message/request limit for the agent.
@@ -108,10 +108,17 @@ class AsyncServerLifecycleManager:
108
108
 
109
109
  try:
110
110
  logger.info(f"Starting server lifecycle for {server_id}")
111
+ logger.info(
112
+ f"Server {server_id} _running_count before enter: {getattr(server, '_running_count', 'N/A')}"
113
+ )
111
114
 
112
115
  # Enter the server's context
113
116
  await exit_stack.enter_async_context(server)
114
117
 
118
+ logger.info(
119
+ f"Server {server_id} _running_count after enter: {getattr(server, '_running_count', 'N/A')}"
120
+ )
121
+
115
122
  # Store the managed context
116
123
  async with self._lock:
117
124
  self._servers[server_id] = ManagedServerContext(
@@ -122,26 +129,50 @@ class AsyncServerLifecycleManager:
122
129
  task=asyncio.current_task(),
123
130
  )
124
131
 
125
- logger.info(f"Server {server_id} started successfully")
132
+ logger.info(
133
+ f"Server {server_id} started successfully and stored in _servers"
134
+ )
126
135
 
127
136
  # Keep the task alive until cancelled
137
+ loop_count = 0
128
138
  while True:
129
139
  await asyncio.sleep(1)
140
+ loop_count += 1
130
141
 
131
142
  # Check if server is still running
132
- if not server.is_running:
133
- logger.warning(f"Server {server_id} stopped unexpectedly")
143
+ running_count = getattr(server, "_running_count", "N/A")
144
+ is_running = server.is_running
145
+ logger.debug(
146
+ f"Server {server_id} heartbeat #{loop_count}: "
147
+ f"is_running={is_running}, _running_count={running_count}"
148
+ )
149
+
150
+ if not is_running:
151
+ logger.warning(
152
+ f"Server {server_id} stopped unexpectedly! "
153
+ f"_running_count={running_count}"
154
+ )
134
155
  break
135
156
 
136
157
  except asyncio.CancelledError:
137
158
  logger.info(f"Server {server_id} lifecycle task cancelled")
138
159
  raise
139
160
  except Exception as e:
140
- logger.error(f"Error in server {server_id} lifecycle: {e}")
161
+ logger.error(f"Error in server {server_id} lifecycle: {e}", exc_info=True)
141
162
  finally:
163
+ running_count = getattr(server, "_running_count", "N/A")
164
+ logger.info(
165
+ f"Server {server_id} lifecycle ending, _running_count={running_count}"
166
+ )
167
+
142
168
  # Clean up the context
143
169
  await exit_stack.aclose()
144
170
 
171
+ running_count_after = getattr(server, "_running_count", "N/A")
172
+ logger.info(
173
+ f"Server {server_id} context closed, _running_count={running_count_after}"
174
+ )
175
+
145
176
  # Remove from managed servers
146
177
  async with self._lock:
147
178
  if server_id in self._servers:
@@ -28,6 +28,31 @@ from code_puppy.mcp_.blocking_startup import BlockingMCPServerStdio
28
28
  from code_puppy.messaging import emit_info
29
29
 
30
30
 
31
+ def _expand_env_vars(value: Any) -> Any:
32
+ """
33
+ Recursively expand environment variables in config values.
34
+
35
+ Supports $VAR and ${VAR} syntax. Works with:
36
+ - Strings: expands env vars
37
+ - Dicts: recursively expands all string values
38
+ - Lists: recursively expands all string elements
39
+ - Other types: returned as-is
40
+
41
+ Args:
42
+ value: The value to expand env vars in
43
+
44
+ Returns:
45
+ The value with env vars expanded
46
+ """
47
+ if isinstance(value, str):
48
+ return os.path.expandvars(value)
49
+ elif isinstance(value, dict):
50
+ return {k: _expand_env_vars(v) for k, v in value.items()}
51
+ elif isinstance(value, list):
52
+ return [_expand_env_vars(item) for item in value]
53
+ return value
54
+
55
+
31
56
  class ServerState(Enum):
32
57
  """Enumeration of possible server states."""
33
58
 
@@ -153,9 +178,9 @@ class ManagedMCPServer:
153
178
  if "url" not in config:
154
179
  raise ValueError("SSE server requires 'url' in config")
155
180
 
156
- # Prepare arguments for MCPServerSSE
181
+ # Prepare arguments for MCPServerSSE (expand env vars in URL)
157
182
  sse_kwargs = {
158
- "url": config["url"],
183
+ "url": _expand_env_vars(config["url"]),
159
184
  }
160
185
 
161
186
  # Add optional parameters if provided
@@ -177,21 +202,23 @@ class ManagedMCPServer:
177
202
  if "command" not in config:
178
203
  raise ValueError("Stdio server requires 'command' in config")
179
204
 
180
- # Handle command and arguments
181
- command = config["command"]
205
+ # Handle command and arguments (expand env vars)
206
+ command = _expand_env_vars(config["command"])
182
207
  args = config.get("args", [])
183
208
  if isinstance(args, str):
184
- # If args is a string, split it
185
- args = args.split()
209
+ # If args is a string, split it then expand
210
+ args = [_expand_env_vars(a) for a in args.split()]
211
+ else:
212
+ args = _expand_env_vars(args)
186
213
 
187
214
  # Prepare arguments for MCPServerStdio
188
215
  stdio_kwargs = {"command": command, "args": list(args) if args else []}
189
216
 
190
- # Add optional parameters if provided
217
+ # Add optional parameters if provided (expand env vars in env and cwd)
191
218
  if "env" in config:
192
- stdio_kwargs["env"] = config["env"]
219
+ stdio_kwargs["env"] = _expand_env_vars(config["env"])
193
220
  if "cwd" in config:
194
- stdio_kwargs["cwd"] = config["cwd"]
221
+ stdio_kwargs["cwd"] = _expand_env_vars(config["cwd"])
195
222
  if "timeout" in config:
196
223
  stdio_kwargs["timeout"] = config["timeout"]
197
224
  if "read_timeout" in config:
@@ -212,9 +239,9 @@ class ManagedMCPServer:
212
239
  if "url" not in config:
213
240
  raise ValueError("HTTP server requires 'url' in config")
214
241
 
215
- # Prepare arguments for MCPServerStreamableHTTP
242
+ # Prepare arguments for MCPServerStreamableHTTP (expand env vars in URL)
216
243
  http_kwargs = {
217
- "url": config["url"],
244
+ "url": _expand_env_vars(config["url"]),
218
245
  }
219
246
 
220
247
  # Add optional parameters if provided
@@ -223,13 +250,14 @@ class ManagedMCPServer:
223
250
  if "read_timeout" in config:
224
251
  http_kwargs["read_timeout"] = config["read_timeout"]
225
252
 
226
- # Handle http_client vs headers (mutually exclusive)
227
- if "http_client" in config:
228
- # Use provided http_client
229
- http_kwargs["http_client"] = config["http_client"]
230
- elif config.get("headers"):
231
- # Create HTTP client if headers are provided but no client specified
232
- http_kwargs["http_client"] = self._get_http_client()
253
+ # Pass headers directly instead of creating http_client
254
+ # Note: There's a bug in MCP 1.25.0 where passing http_client
255
+ # causes "'_AsyncGeneratorContextManager' object has no attribute 'stream'"
256
+ # The workaround is to pass headers directly and let pydantic-ai
257
+ # create the http_client internally.
258
+ if config.get("headers"):
259
+ # Expand environment variables in headers
260
+ http_kwargs["headers"] = _expand_env_vars(config["headers"])
233
261
 
234
262
  self._pydantic_server = MCPServerStreamableHTTP(
235
263
  **http_kwargs, process_tool_call=process_tool_call
@@ -469,41 +469,57 @@ class MCPManager:
469
469
  def start_server_sync(self, server_id: str) -> bool:
470
470
  """
471
471
  Synchronous wrapper for start_server.
472
+
473
+ IMPORTANT: This schedules the server start as a background task.
474
+ The server subprocess will start asynchronously - it may not be
475
+ immediately ready when this function returns.
472
476
  """
473
477
  try:
474
- asyncio.get_running_loop()
475
- # We're in an async context, but we need to wait for completion
476
- # Create a future and schedule the coroutine
478
+ loop = asyncio.get_running_loop()
479
+ # We're in an async context - schedule the server start as a background task
480
+ # DO NOT use blocking time.sleep() here as it freezes the event loop!
481
+
482
+ # First, enable the server immediately so it's recognized as "starting"
483
+ managed_server = self._managed_servers.get(server_id)
484
+ if managed_server:
485
+ managed_server.enable()
486
+ self.status_tracker.set_status(server_id, ServerState.STARTING)
487
+ self.status_tracker.record_start_time(server_id)
477
488
 
478
- # Use run_in_executor to run the async function synchronously
479
- async def run_async():
480
- return await self.start_server(server_id)
489
+ # Schedule the async start_server to run in the background
490
+ # This will properly start the subprocess and lifecycle task
491
+ async def start_server_background():
492
+ try:
493
+ result = await self.start_server(server_id)
494
+ if result:
495
+ logger.info(f"Background server start completed: {server_id}")
496
+ else:
497
+ logger.warning(f"Background server start failed: {server_id}")
498
+ return result
499
+ except Exception as e:
500
+ logger.error(f"Background server start error for {server_id}: {e}")
501
+ self.status_tracker.set_status(server_id, ServerState.ERROR)
502
+ return False
481
503
 
482
- # Schedule the task and wait briefly for it to complete
483
- task = asyncio.create_task(run_async())
504
+ # Create the task - it will run when the event loop gets control
505
+ task = loop.create_task(
506
+ start_server_background(), name=f"start_server_{server_id}"
507
+ )
484
508
 
485
- # Give it a moment to complete - this fixes the race condition
486
- import time
509
+ # Store task reference to prevent garbage collection
510
+ if not hasattr(self, "_pending_start_tasks"):
511
+ self._pending_start_tasks = {}
512
+ self._pending_start_tasks[server_id] = task
487
513
 
488
- time.sleep(0.1) # Small delay to let async tasks progress
514
+ # Add callback to clean up task reference when done
515
+ def cleanup_task(t):
516
+ if hasattr(self, "_pending_start_tasks"):
517
+ self._pending_start_tasks.pop(server_id, None)
489
518
 
490
- # Check if task completed, if not, fall back to sync enable
491
- if task.done():
492
- try:
493
- result = task.result()
494
- return result
495
- except Exception:
496
- pass
519
+ task.add_done_callback(cleanup_task)
497
520
 
498
- # If async didn't complete, enable synchronously
499
- managed_server = self._managed_servers.get(server_id)
500
- if managed_server:
501
- managed_server.enable()
502
- self.status_tracker.set_status(server_id, ServerState.RUNNING)
503
- self.status_tracker.record_start_time(server_id)
504
- logger.info(f"Enabled server synchronously: {server_id}")
505
- return True
506
- return False
521
+ logger.info(f"Scheduled background start for server: {server_id}")
522
+ return True # Return immediately - server will start in background
507
523
 
508
524
  except RuntimeError:
509
525
  # No async loop, just enable the server
@@ -582,39 +598,52 @@ class MCPManager:
582
598
  def stop_server_sync(self, server_id: str) -> bool:
583
599
  """
584
600
  Synchronous wrapper for stop_server.
601
+
602
+ IMPORTANT: This schedules the server stop as a background task.
603
+ The server subprocess will stop asynchronously.
585
604
  """
586
605
  try:
587
- asyncio.get_running_loop()
606
+ loop = asyncio.get_running_loop()
607
+ # We're in an async context - schedule the server stop as a background task
608
+ # DO NOT use blocking time.sleep() here as it freezes the event loop!
588
609
 
589
- # We're in an async context, but we need to wait for completion
590
- async def run_async():
591
- return await self.stop_server(server_id)
610
+ # First, disable the server immediately
611
+ managed_server = self._managed_servers.get(server_id)
612
+ if managed_server:
613
+ managed_server.disable()
614
+ self.status_tracker.set_status(server_id, ServerState.STOPPING)
615
+ self.status_tracker.record_stop_time(server_id)
592
616
 
593
- # Schedule the task and wait briefly for it to complete
594
- task = asyncio.create_task(run_async())
617
+ # Schedule the async stop_server to run in the background
618
+ async def stop_server_background():
619
+ try:
620
+ result = await self.stop_server(server_id)
621
+ if result:
622
+ logger.info(f"Background server stop completed: {server_id}")
623
+ return result
624
+ except Exception as e:
625
+ logger.error(f"Background server stop error for {server_id}: {e}")
626
+ return False
595
627
 
596
- # Give it a moment to complete - this fixes the race condition
597
- import time
628
+ # Create the task - it will run when the event loop gets control
629
+ task = loop.create_task(
630
+ stop_server_background(), name=f"stop_server_{server_id}"
631
+ )
598
632
 
599
- time.sleep(0.1) # Small delay to let async tasks progress
633
+ # Store task reference to prevent garbage collection
634
+ if not hasattr(self, "_pending_stop_tasks"):
635
+ self._pending_stop_tasks = {}
636
+ self._pending_stop_tasks[server_id] = task
600
637
 
601
- # Check if task completed, if not, fall back to sync disable
602
- if task.done():
603
- try:
604
- result = task.result()
605
- return result
606
- except Exception:
607
- pass
638
+ # Add callback to clean up task reference when done
639
+ def cleanup_task(t):
640
+ if hasattr(self, "_pending_stop_tasks"):
641
+ self._pending_stop_tasks.pop(server_id, None)
608
642
 
609
- # If async didn't complete, disable synchronously
610
- managed_server = self._managed_servers.get(server_id)
611
- if managed_server:
612
- managed_server.disable()
613
- self.status_tracker.set_status(server_id, ServerState.STOPPED)
614
- self.status_tracker.record_stop_time(server_id)
615
- logger.info(f"Disabled server synchronously: {server_id}")
616
- return True
617
- return False
643
+ task.add_done_callback(cleanup_task)
644
+
645
+ logger.info(f"Scheduled background stop for server: {server_id}")
646
+ return True # Return immediately - server will stop in background
618
647
 
619
648
  except RuntimeError:
620
649
  # No async loop, just disable the server
@@ -4,10 +4,7 @@ from typing import List
4
4
 
5
5
  from pydantic_ai import Agent
6
6
 
7
- from code_puppy.config import (
8
- get_global_model_name,
9
- get_use_dbos,
10
- )
7
+ from code_puppy.config import get_global_model_name
11
8
  from code_puppy.model_factory import ModelFactory, make_model_settings
12
9
 
13
10
  # Keep a module-level agent reference to avoid rebuilding per call
@@ -106,13 +103,6 @@ def reload_summarization_agent():
106
103
  retries=1, # Fewer retries for summarization
107
104
  model_settings=model_settings,
108
105
  )
109
- if get_use_dbos():
110
- from pydantic_ai.durable_exec.dbos import DBOSAgent
111
-
112
- global _reload_count
113
- _reload_count += 1
114
- dbos_agent = DBOSAgent(agent, name=f"summarization-agent-{_reload_count}")
115
- return dbos_agent
116
106
  return agent
117
107
 
118
108
 
@@ -1,7 +1,6 @@
1
1
  # agent_tools.py
2
2
  import asyncio
3
3
  import hashlib
4
- import itertools
5
4
  import json
6
5
  import pickle
7
6
  import re
@@ -10,7 +9,6 @@ from datetime import datetime
10
9
  from pathlib import Path
11
10
  from typing import List, Set
12
11
 
13
- from dbos import DBOS, SetWorkflowID
14
12
  from pydantic import BaseModel
15
13
 
16
14
  # Import Agent from pydantic_ai to create temporary agents for invocation
@@ -20,7 +18,6 @@ from pydantic_ai.messages import ModelMessage
20
18
  from code_puppy.config import (
21
19
  DATA_DIR,
22
20
  get_message_limit,
23
- get_use_dbos,
24
21
  )
25
22
  from code_puppy.messaging import (
26
23
  SubAgentInvocationMessage,
@@ -37,27 +34,6 @@ from code_puppy.tools.common import generate_group_id
37
34
  # Set to track active subagent invocation tasks
38
35
  _active_subagent_tasks: Set[asyncio.Task] = set()
39
36
 
40
- # Atomic counter for DBOS workflow IDs - ensures uniqueness even in rapid back-to-back calls
41
- # itertools.count() is thread-safe for next() calls
42
- _dbos_workflow_counter = itertools.count()
43
-
44
-
45
- def _generate_dbos_workflow_id(base_id: str) -> str:
46
- """Generate a unique DBOS workflow ID by appending an atomic counter.
47
-
48
- DBOS requires workflow IDs to be unique across all executions.
49
- This function ensures uniqueness by combining the base_id with
50
- an atomically incrementing counter.
51
-
52
- Args:
53
- base_id: The base identifier (e.g., group_id from generate_group_id)
54
-
55
- Returns:
56
- A unique workflow ID in format: {base_id}-wf-{counter}
57
- """
58
- counter = next(_dbos_workflow_counter)
59
- return f"{base_id}-wf-{counter}"
60
-
61
37
 
62
38
  def _generate_session_hash_suffix() -> str:
63
39
  """Generate a short SHA1 hash suffix based on current timestamp for uniqueness.
@@ -468,9 +444,11 @@ def register_invoke_agent(agent):
468
444
  instructions = prepared.instructions
469
445
  prompt = prepared.user_prompt
470
446
 
471
- subagent_name = f"temp-invoke-agent-{session_id}"
472
447
  model_settings = make_model_settings(model_name)
473
448
 
449
+ # Load MCP servers so sub-agents have access to the same tools as the main agent
450
+ mcp_servers = agent_config.load_mcp_servers()
451
+
474
452
  temp_agent = Agent(
475
453
  model=model,
476
454
  instructions=instructions,
@@ -478,6 +456,7 @@ def register_invoke_agent(agent):
478
456
  retries=3,
479
457
  history_processors=[agent_config.message_history_accumulator],
480
458
  model_settings=model_settings,
459
+ toolsets=mcp_servers if mcp_servers else [],
481
460
  )
482
461
 
483
462
  # Register the tools that the agent needs
@@ -486,44 +465,21 @@ def register_invoke_agent(agent):
486
465
  agent_tools = agent_config.get_available_tools()
487
466
  register_tools_for_agent(temp_agent, agent_tools)
488
467
 
489
- if get_use_dbos():
490
- from pydantic_ai.durable_exec.dbos import DBOSAgent
491
-
492
- dbos_agent = DBOSAgent(temp_agent, name=subagent_name)
493
- temp_agent = dbos_agent
494
-
495
468
  # Run the temporary agent with the provided prompt as an asyncio task
496
469
  # Pass the message_history from the session to continue the conversation
497
- workflow_id = None # Track for potential cancellation
498
- if get_use_dbos():
499
- # Generate a unique workflow ID for DBOS - ensures no collisions in back-to-back calls
500
- workflow_id = _generate_dbos_workflow_id(group_id)
501
- with SetWorkflowID(workflow_id):
502
- task = asyncio.create_task(
503
- temp_agent.run(
504
- prompt,
505
- message_history=message_history,
506
- usage_limits=UsageLimits(request_limit=get_message_limit()),
507
- )
508
- )
509
- _active_subagent_tasks.add(task)
510
- else:
511
- task = asyncio.create_task(
512
- temp_agent.run(
513
- prompt,
514
- message_history=message_history,
515
- usage_limits=UsageLimits(request_limit=get_message_limit()),
516
- )
470
+ task = asyncio.create_task(
471
+ temp_agent.run(
472
+ prompt,
473
+ message_history=message_history,
474
+ usage_limits=UsageLimits(request_limit=get_message_limit()),
517
475
  )
518
- _active_subagent_tasks.add(task)
476
+ )
477
+ _active_subagent_tasks.add(task)
519
478
 
520
479
  try:
521
480
  result = await task
522
481
  finally:
523
482
  _active_subagent_tasks.discard(task)
524
- if task.cancelled():
525
- if get_use_dbos() and workflow_id:
526
- DBOS.cancel_workflow(workflow_id)
527
483
 
528
484
  # Extract the response from the result
529
485
  response = result.output
@@ -7,7 +7,7 @@ from functools import lru_cache
7
7
  from pydantic import BaseModel, Field
8
8
  from pydantic_ai import Agent, BinaryContent
9
9
 
10
- from code_puppy.config import get_use_dbos, get_vqa_model_name
10
+ from code_puppy.config import get_vqa_model_name
11
11
  from code_puppy.model_factory import ModelFactory
12
12
 
13
13
 
@@ -50,12 +50,6 @@ def _load_vqa_agent(model_name: str) -> Agent[None, VisualAnalysisResult]:
50
50
  retries=2,
51
51
  )
52
52
 
53
- if get_use_dbos():
54
- from pydantic_ai.durable_exec.dbos import DBOSAgent
55
-
56
- dbos_agent = DBOSAgent(vqa_agent, name="vqa-agent")
57
- return dbos_agent
58
-
59
53
  return vqa_agent
60
54
 
61
55
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.342
3
+ Version: 0.0.344
4
4
  Summary: Code generation agent
5
5
  Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
6
6
  Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
@@ -16,7 +16,6 @@ Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Topic :: Software Development :: Code Generators
17
17
  Requires-Python: <3.14,>=3.11
18
18
  Requires-Dist: camoufox>=0.4.11
19
- Requires-Dist: dbos>=2.5.0
20
19
  Requires-Dist: fastapi>=0.111.0
21
20
  Requires-Dist: httpx[http2]>=0.24.1
22
21
  Requires-Dist: json-repair>=0.46.2
@@ -174,27 +173,6 @@ These providers are automatically configured with correct OpenAI-compatible endp
174
173
  - **⚠️ Unsupported Providers** - Providers like Amazon Bedrock and Google Vertex that require special authentication are clearly marked
175
174
  - **⚠️ No Tool Calling** - Models without tool calling support show a big warning since they can't use Code Puppy's file/shell tools
176
175
 
177
- ### Durable Execution
178
-
179
- Code Puppy now supports **[DBOS](https://github.com/dbos-inc/dbos-transact-py)** durable execution.
180
-
181
- When enabled, every agent is automatically wrapped as a `DBOSAgent`, checkpointing key interactions (including agent inputs, LLM responses, MCP calls, and tool calls) in a database for durability and recovery.
182
-
183
- You can toggle DBOS via either of these options:
184
-
185
- - CLI config (persists): `/set enable_dbos true` (or `false` to disable)
186
-
187
-
188
- Config takes precedence if set; otherwise the environment variable is used.
189
-
190
- ### Configuration
191
-
192
- The following environment variables control DBOS behavior:
193
- - `DBOS_CONDUCTOR_KEY`: If set, Code Puppy connects to the [DBOS Management Console](https://console.dbos.dev/). Make sure you first register an app named `dbos-code-puppy` on the console to generate a Conductor key. Default: `None`.
194
- - `DBOS_LOG_LEVEL`: Logging verbosity: `CRITICAL`, `ERROR`, `WARNING`, `INFO`, or `DEBUG`. Default: `ERROR`.
195
- - `DBOS_SYSTEM_DATABASE_URL`: Database URL used by DBOS. Can point to a local SQLite file or a Postgres instance. Example: `postgresql://postgres:dbos@localhost:5432/postgres`. Default: `dbos_store.sqlite` file in the config directory.
196
- - `DBOS_APP_VERSION`: If set, Code Puppy uses it as the [DBOS application version](https://docs.dbos.dev/architecture#application-and-workflow-versions) and automatically tries to recover pending workflows for this version. Default: Code Puppy version + Unix timestamp in millisecond (disable automatic recovery).
197
-
198
176
  ### Custom Commands
199
177
  Create markdown files in `.claude/commands/`, `.github/prompts/`, or `.agents/commands/` to define custom slash commands. The filename becomes the command name and the content runs as a prompt.
200
178