gobby 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. gobby/adapters/claude_code.py +13 -4
  2. gobby/adapters/codex.py +43 -3
  3. gobby/agents/runner.py +8 -0
  4. gobby/cli/__init__.py +6 -0
  5. gobby/cli/clones.py +419 -0
  6. gobby/cli/conductor.py +266 -0
  7. gobby/cli/installers/antigravity.py +3 -9
  8. gobby/cli/installers/claude.py +9 -9
  9. gobby/cli/installers/codex.py +2 -8
  10. gobby/cli/installers/gemini.py +2 -8
  11. gobby/cli/installers/shared.py +71 -8
  12. gobby/cli/skills.py +858 -0
  13. gobby/cli/tasks/ai.py +0 -440
  14. gobby/cli/tasks/crud.py +44 -6
  15. gobby/cli/tasks/main.py +0 -4
  16. gobby/cli/tui.py +2 -2
  17. gobby/cli/utils.py +3 -3
  18. gobby/clones/__init__.py +13 -0
  19. gobby/clones/git.py +547 -0
  20. gobby/conductor/__init__.py +16 -0
  21. gobby/conductor/alerts.py +135 -0
  22. gobby/conductor/loop.py +164 -0
  23. gobby/conductor/monitors/__init__.py +11 -0
  24. gobby/conductor/monitors/agents.py +116 -0
  25. gobby/conductor/monitors/tasks.py +155 -0
  26. gobby/conductor/pricing.py +234 -0
  27. gobby/conductor/token_tracker.py +160 -0
  28. gobby/config/app.py +63 -1
  29. gobby/config/search.py +110 -0
  30. gobby/config/servers.py +1 -1
  31. gobby/config/skills.py +43 -0
  32. gobby/config/tasks.py +6 -14
  33. gobby/hooks/event_handlers.py +145 -2
  34. gobby/hooks/hook_manager.py +48 -2
  35. gobby/hooks/skill_manager.py +130 -0
  36. gobby/install/claude/hooks/hook_dispatcher.py +4 -4
  37. gobby/install/codex/hooks/hook_dispatcher.py +1 -1
  38. gobby/install/gemini/hooks/hook_dispatcher.py +87 -12
  39. gobby/llm/claude.py +22 -34
  40. gobby/llm/claude_executor.py +46 -256
  41. gobby/llm/codex_executor.py +59 -291
  42. gobby/llm/executor.py +21 -0
  43. gobby/llm/gemini.py +134 -110
  44. gobby/llm/litellm_executor.py +143 -6
  45. gobby/llm/resolver.py +95 -33
  46. gobby/mcp_proxy/instructions.py +54 -0
  47. gobby/mcp_proxy/models.py +15 -0
  48. gobby/mcp_proxy/registries.py +68 -5
  49. gobby/mcp_proxy/server.py +33 -3
  50. gobby/mcp_proxy/services/tool_proxy.py +81 -1
  51. gobby/mcp_proxy/stdio.py +2 -1
  52. gobby/mcp_proxy/tools/__init__.py +0 -2
  53. gobby/mcp_proxy/tools/agent_messaging.py +317 -0
  54. gobby/mcp_proxy/tools/clones.py +903 -0
  55. gobby/mcp_proxy/tools/memory.py +1 -24
  56. gobby/mcp_proxy/tools/metrics.py +65 -1
  57. gobby/mcp_proxy/tools/orchestration/__init__.py +3 -0
  58. gobby/mcp_proxy/tools/orchestration/cleanup.py +151 -0
  59. gobby/mcp_proxy/tools/orchestration/wait.py +467 -0
  60. gobby/mcp_proxy/tools/session_messages.py +1 -2
  61. gobby/mcp_proxy/tools/skills/__init__.py +631 -0
  62. gobby/mcp_proxy/tools/task_orchestration.py +7 -0
  63. gobby/mcp_proxy/tools/task_readiness.py +14 -0
  64. gobby/mcp_proxy/tools/task_sync.py +1 -1
  65. gobby/mcp_proxy/tools/tasks/_context.py +0 -20
  66. gobby/mcp_proxy/tools/tasks/_crud.py +91 -4
  67. gobby/mcp_proxy/tools/tasks/_expansion.py +348 -0
  68. gobby/mcp_proxy/tools/tasks/_factory.py +6 -16
  69. gobby/mcp_proxy/tools/tasks/_lifecycle.py +60 -29
  70. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +18 -29
  71. gobby/mcp_proxy/tools/workflows.py +1 -1
  72. gobby/mcp_proxy/tools/worktrees.py +5 -0
  73. gobby/memory/backends/__init__.py +6 -1
  74. gobby/memory/backends/mem0.py +6 -1
  75. gobby/memory/extractor.py +477 -0
  76. gobby/memory/manager.py +11 -2
  77. gobby/prompts/defaults/handoff/compact.md +63 -0
  78. gobby/prompts/defaults/handoff/session_end.md +57 -0
  79. gobby/prompts/defaults/memory/extract.md +61 -0
  80. gobby/runner.py +37 -16
  81. gobby/search/__init__.py +48 -6
  82. gobby/search/backends/__init__.py +159 -0
  83. gobby/search/backends/embedding.py +225 -0
  84. gobby/search/embeddings.py +238 -0
  85. gobby/search/models.py +148 -0
  86. gobby/search/unified.py +496 -0
  87. gobby/servers/http.py +23 -8
  88. gobby/servers/routes/admin.py +280 -0
  89. gobby/servers/routes/mcp/tools.py +241 -52
  90. gobby/servers/websocket.py +2 -2
  91. gobby/sessions/analyzer.py +2 -0
  92. gobby/sessions/transcripts/base.py +1 -0
  93. gobby/sessions/transcripts/claude.py +64 -5
  94. gobby/skills/__init__.py +91 -0
  95. gobby/skills/loader.py +685 -0
  96. gobby/skills/manager.py +384 -0
  97. gobby/skills/parser.py +258 -0
  98. gobby/skills/search.py +463 -0
  99. gobby/skills/sync.py +119 -0
  100. gobby/skills/updater.py +385 -0
  101. gobby/skills/validator.py +368 -0
  102. gobby/storage/clones.py +378 -0
  103. gobby/storage/database.py +1 -1
  104. gobby/storage/memories.py +43 -13
  105. gobby/storage/migrations.py +180 -6
  106. gobby/storage/sessions.py +73 -0
  107. gobby/storage/skills.py +749 -0
  108. gobby/storage/tasks/_crud.py +4 -4
  109. gobby/storage/tasks/_lifecycle.py +41 -6
  110. gobby/storage/tasks/_manager.py +14 -5
  111. gobby/storage/tasks/_models.py +8 -3
  112. gobby/sync/memories.py +39 -4
  113. gobby/sync/tasks.py +83 -6
  114. gobby/tasks/__init__.py +1 -2
  115. gobby/tasks/validation.py +24 -15
  116. gobby/tui/api_client.py +4 -7
  117. gobby/tui/app.py +5 -3
  118. gobby/tui/screens/orchestrator.py +1 -2
  119. gobby/tui/screens/tasks.py +2 -4
  120. gobby/tui/ws_client.py +1 -1
  121. gobby/utils/daemon_client.py +2 -2
  122. gobby/workflows/actions.py +84 -2
  123. gobby/workflows/context_actions.py +43 -0
  124. gobby/workflows/detection_helpers.py +115 -31
  125. gobby/workflows/engine.py +13 -2
  126. gobby/workflows/lifecycle_evaluator.py +29 -1
  127. gobby/workflows/loader.py +19 -6
  128. gobby/workflows/memory_actions.py +74 -0
  129. gobby/workflows/summary_actions.py +17 -0
  130. gobby/workflows/task_enforcement_actions.py +448 -6
  131. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/METADATA +82 -21
  132. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/RECORD +136 -107
  133. gobby/install/codex/prompts/forget.md +0 -7
  134. gobby/install/codex/prompts/memories.md +0 -7
  135. gobby/install/codex/prompts/recall.md +0 -7
  136. gobby/install/codex/prompts/remember.md +0 -13
  137. gobby/llm/gemini_executor.py +0 -339
  138. gobby/mcp_proxy/tools/task_expansion.py +0 -591
  139. gobby/tasks/context.py +0 -747
  140. gobby/tasks/criteria.py +0 -342
  141. gobby/tasks/expansion.py +0 -626
  142. gobby/tasks/prompts/expand.py +0 -327
  143. gobby/tasks/research.py +0 -421
  144. gobby/tasks/tdd.py +0 -352
  145. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/WHEEL +0 -0
  146. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/entry_points.txt +0 -0
  147. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/licenses/LICENSE.md +0 -0
  148. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/top_level.txt +0 -0
gobby/llm/claude.py CHANGED
@@ -495,7 +495,7 @@ class ClaudeLLMProvider(LLMProvider):
495
495
  """
496
496
  Generate a text description of an image using Claude's vision capabilities.
497
497
 
498
- Uses the Anthropic API directly for vision support.
498
+ Uses LiteLLM for unified cost tracking with anthropic/claude-haiku-4-5 model.
499
499
 
500
500
  Args:
501
501
  image_path: Path to the image file to describe
@@ -508,8 +508,6 @@ class ClaudeLLMProvider(LLMProvider):
508
508
  import mimetypes
509
509
  from pathlib import Path
510
510
 
511
- import anthropic
512
-
513
511
  # Validate image exists
514
512
  path = Path(image_path)
515
513
  if not path.exists():
@@ -534,45 +532,35 @@ class ClaudeLLMProvider(LLMProvider):
534
532
  if context:
535
533
  prompt = f"{context}\n\n{prompt}"
536
534
 
537
- # Use Anthropic API for vision
538
- api_key = os.environ.get("ANTHROPIC_API_KEY")
539
- if not api_key:
540
- return "Image description unavailable (ANTHROPIC_API_KEY not set)"
541
-
535
+ # Use LiteLLM for unified cost tracking
542
536
  try:
543
- client = anthropic.AsyncAnthropic(api_key=api_key)
544
- # Type annotation to satisfy mypy
545
- image_block: anthropic.types.ImageBlockParam = {
546
- "type": "image",
547
- "source": {
548
- "type": "base64",
549
- "media_type": mime_type, # type: ignore[typeddict-item]
550
- "data": image_base64,
551
- },
552
- }
553
- text_block: anthropic.types.TextBlockParam = {
554
- "type": "text",
555
- "text": prompt,
556
- }
557
- message = await client.messages.create(
558
- model="claude-haiku-4-5-latest", # Use haiku for cost efficiency
559
- max_tokens=1024,
537
+ import litellm
538
+
539
+ # Route through LiteLLM with anthropic prefix for cost tracking
540
+ response = await litellm.acompletion(
541
+ model="anthropic/claude-haiku-4-5-20251001", # Use haiku for cost efficiency
560
542
  messages=[
561
543
  {
562
544
  "role": "user",
563
- "content": [image_block, text_block],
545
+ "content": [
546
+ {"type": "text", "text": prompt},
547
+ {
548
+ "type": "image_url",
549
+ "image_url": {"url": f"data:{mime_type};base64,{image_base64}"},
550
+ },
551
+ ],
564
552
  }
565
553
  ],
554
+ max_tokens=1024,
566
555
  )
567
556
 
568
- # Extract text from response
569
- result = ""
570
- for block in message.content:
571
- if hasattr(block, "text"):
572
- result += block.text
573
-
574
- return result if result else "No description generated"
557
+ if not response or not getattr(response, "choices", None):
558
+ return "No description generated"
559
+ return response.choices[0].message.content or "No description generated"
575
560
 
561
+ except ImportError:
562
+ self.logger.error("LiteLLM not installed, falling back to unavailable")
563
+ return "Image description unavailable (LiteLLM not installed)"
576
564
  except Exception as e:
577
- self.logger.error(f"Failed to describe image with Claude: {e}")
565
+ self.logger.error(f"Failed to describe image with Claude via LiteLLM: {e}")
578
566
  return f"Image description failed: {e}"
@@ -1,22 +1,20 @@
1
1
  """
2
- Claude implementation of AgentExecutor.
2
+ Claude implementation of AgentExecutor for subscription mode only.
3
3
 
4
- Supports multiple auth modes:
5
- - api_key: Direct Anthropic API with API key
6
- - subscription: Claude Agent SDK with CLI (Pro/Team subscriptions)
4
+ This executor uses the Claude Agent SDK with CLI for Pro/Team subscriptions.
5
+
6
+ Note: api_key mode is now routed through LiteLLMExecutor for unified cost tracking.
7
+ Use the resolver.create_executor() function which handles routing automatically.
7
8
  """
8
9
 
9
10
  import asyncio
10
11
  import concurrent.futures
11
12
  import json
12
13
  import logging
13
- import os
14
14
  import shutil
15
15
  from collections.abc import Callable
16
16
  from typing import Any, Literal
17
17
 
18
- import anthropic
19
-
20
18
  from gobby.llm.executor import (
21
19
  AgentExecutor,
22
20
  AgentResult,
@@ -28,26 +26,28 @@ from gobby.llm.executor import (
28
26
 
29
27
  logger = logging.getLogger(__name__)
30
28
 
31
- # Auth mode type
32
- ClaudeAuthMode = Literal["api_key", "subscription"]
29
+ # Auth mode type - subscription only, api_key routes through LiteLLM
30
+ ClaudeAuthMode = Literal["subscription"]
33
31
 
34
32
 
35
33
  class ClaudeExecutor(AgentExecutor):
36
34
  """
37
- Claude implementation of AgentExecutor.
35
+ Claude implementation of AgentExecutor for subscription mode only.
36
+
37
+ Uses Claude Agent SDK with CLI for Pro/Team subscriptions. This executor
38
+ is for subscription-based authentication only.
38
39
 
39
- Supports two authentication modes:
40
- - api_key: Uses the Anthropic API directly with an API key
41
- - subscription: Uses Claude Agent SDK with CLI for Pro/Team subscriptions
40
+ For api_key mode, use LiteLLMExecutor with provider="claude" which routes
41
+ through anthropic/model-name for unified cost tracking.
42
42
 
43
43
  The executor implements a proper agentic loop:
44
- 1. Send prompt to Claude with tool schemas
44
+ 1. Send prompt to Claude with tool schemas via SDK
45
45
  2. When Claude requests a tool, call tool_handler
46
46
  3. Send tool result back to Claude
47
47
  4. Repeat until Claude stops requesting tools or limits are reached
48
48
 
49
49
  Example:
50
- >>> executor = ClaudeExecutor(auth_mode="api_key", api_key="sk-ant-...")
50
+ >>> executor = ClaudeExecutor(auth_mode="subscription")
51
51
  >>> result = await executor.run(
52
52
  ... prompt="Create a task",
53
53
  ... tools=[ToolSchema(name="create_task", ...)],
@@ -55,71 +55,47 @@ class ClaudeExecutor(AgentExecutor):
55
55
  ... )
56
56
  """
57
57
 
58
- _client: anthropic.AsyncAnthropic | None
59
58
  _cli_path: str
60
59
 
61
60
  def __init__(
62
61
  self,
63
- auth_mode: ClaudeAuthMode = "api_key",
64
- api_key: str | None = None,
62
+ auth_mode: ClaudeAuthMode = "subscription",
65
63
  default_model: str = "claude-sonnet-4-20250514",
66
64
  ):
67
65
  """
68
- Initialize ClaudeExecutor.
66
+ Initialize ClaudeExecutor for subscription mode.
69
67
 
70
68
  Args:
71
- auth_mode: Authentication mode ("api_key" or "subscription").
72
- api_key: Anthropic API key (required for api_key mode).
69
+ auth_mode: Must be "subscription". API key mode is handled by LiteLLMExecutor.
73
70
  default_model: Default model to use if not specified in run().
71
+
72
+ Raises:
73
+ ValueError: If auth_mode is not "subscription" or Claude CLI not found.
74
74
  """
75
+ if auth_mode != "subscription":
76
+ raise ValueError(
77
+ "ClaudeExecutor only supports subscription mode. "
78
+ "For api_key mode, use LiteLLMExecutor with provider='claude'."
79
+ )
80
+
75
81
  self.auth_mode = auth_mode
76
82
  self.default_model = default_model
77
83
  self.logger = logger
78
- self._client = None
79
84
  self._cli_path = ""
80
85
 
81
- if auth_mode == "api_key":
82
- # Use provided key or fall back to environment variable
83
- key = api_key or os.environ.get("ANTHROPIC_API_KEY")
84
- if not key:
85
- raise ValueError(
86
- "API key required for api_key mode. "
87
- "Provide api_key parameter or set ANTHROPIC_API_KEY env var."
88
- )
89
- self._client = anthropic.AsyncAnthropic(api_key=key)
90
- elif auth_mode == "subscription":
91
- # Verify Claude CLI is available for subscription mode
92
- cli_path = shutil.which("claude")
93
- if not cli_path:
94
- raise ValueError(
95
- "Claude CLI not found in PATH. Install Claude Code for subscription mode."
96
- )
97
- self._cli_path = cli_path
98
- else:
99
- raise ValueError(f"Unknown auth_mode: {auth_mode}")
86
+ # Verify Claude CLI is available for subscription mode
87
+ cli_path = shutil.which("claude")
88
+ if not cli_path:
89
+ raise ValueError(
90
+ "Claude CLI not found in PATH. Install Claude Code for subscription mode."
91
+ )
92
+ self._cli_path = cli_path
100
93
 
101
94
  @property
102
95
  def provider_name(self) -> str:
103
96
  """Return the provider name."""
104
97
  return "claude"
105
98
 
106
- def _convert_tools_to_anthropic_format(
107
- self, tools: list[ToolSchema]
108
- ) -> list[anthropic.types.ToolParam]:
109
- """Convert ToolSchema list to Anthropic API format."""
110
- anthropic_tools: list[anthropic.types.ToolParam] = []
111
- for tool in tools:
112
- # input_schema must have "type": "object" at minimum
113
- input_schema: dict[str, Any] = {"type": "object", **tool.input_schema}
114
- anthropic_tools.append(
115
- {
116
- "name": tool.name,
117
- "description": tool.description,
118
- "input_schema": input_schema,
119
- }
120
- )
121
- return anthropic_tools
122
-
123
99
  async def run(
124
100
  self,
125
101
  prompt: str,
@@ -131,10 +107,10 @@ class ClaudeExecutor(AgentExecutor):
131
107
  timeout: float = 120.0,
132
108
  ) -> AgentResult:
133
109
  """
134
- Execute an agentic loop with tool calling.
110
+ Execute an agentic loop with tool calling via Claude Agent SDK.
135
111
 
136
- Runs Claude with the given prompt, calling tools via tool_handler
137
- until completion, max_turns, or timeout.
112
+ Runs Claude with the given prompt using subscription-based authentication,
113
+ calling tools via tool_handler until completion, max_turns, or timeout.
138
114
 
139
115
  Args:
140
116
  prompt: The user prompt to process.
@@ -148,201 +124,15 @@ class ClaudeExecutor(AgentExecutor):
148
124
  Returns:
149
125
  AgentResult with output, status, and tool call records.
150
126
  """
151
- if self.auth_mode == "api_key":
152
- return await self._run_with_api(
153
- prompt=prompt,
154
- tools=tools,
155
- tool_handler=tool_handler,
156
- system_prompt=system_prompt,
157
- model=model or self.default_model,
158
- max_turns=max_turns,
159
- timeout=timeout,
160
- )
161
- else:
162
- return await self._run_with_sdk(
163
- prompt=prompt,
164
- tools=tools,
165
- tool_handler=tool_handler,
166
- system_prompt=system_prompt,
167
- model=model or self.default_model,
168
- max_turns=max_turns,
169
- timeout=timeout,
170
- )
171
-
172
- async def _run_with_api(
173
- self,
174
- prompt: str,
175
- tools: list[ToolSchema],
176
- tool_handler: ToolHandler,
177
- system_prompt: str | None,
178
- model: str,
179
- max_turns: int,
180
- timeout: float,
181
- ) -> AgentResult:
182
- """Run using direct Anthropic API."""
183
- if self._client is None:
184
- return AgentResult(
185
- output="",
186
- status="error",
187
- error="Anthropic client not initialized",
188
- turns_used=0,
189
- )
190
-
191
- tool_calls: list[ToolCallRecord] = []
192
- anthropic_tools = self._convert_tools_to_anthropic_format(tools)
193
-
194
- # Build initial messages
195
- messages: list[anthropic.types.MessageParam] = [{"role": "user", "content": prompt}]
196
-
197
- # Track turns in outer scope so timeout handler can access the count
198
- turns_counter = [0]
199
-
200
- async def _run_loop() -> AgentResult:
201
- nonlocal messages
202
- turns_used = 0
203
- final_output = ""
204
- client = self._client
205
- if client is None:
206
- raise RuntimeError("ClaudeExecutor client not initialized")
207
-
208
- while turns_used < max_turns:
209
- turns_used += 1
210
- turns_counter[0] = turns_used
211
-
212
- # Call Claude
213
- try:
214
- response = await client.messages.create(
215
- model=model,
216
- max_tokens=8192,
217
- system=system_prompt or "You are a helpful assistant.",
218
- messages=messages,
219
- tools=anthropic_tools if anthropic_tools else [],
220
- )
221
- except anthropic.APIError as e:
222
- return AgentResult(
223
- output="",
224
- status="error",
225
- tool_calls=tool_calls,
226
- error=f"Anthropic API error: {e}",
227
- turns_used=turns_used,
228
- )
229
-
230
- # Process response
231
- assistant_content: list[anthropic.types.ContentBlockParam] = []
232
- tool_use_blocks: list[dict[str, Any]] = []
233
-
234
- for block in response.content:
235
- if block.type == "text":
236
- final_output = block.text
237
- assistant_content.append({"type": "text", "text": block.text})
238
- elif block.type == "tool_use":
239
- tool_use_blocks.append(
240
- {
241
- "id": block.id,
242
- "name": block.name,
243
- "input": block.input,
244
- }
245
- )
246
- assistant_content.append(
247
- {
248
- "type": "tool_use",
249
- "id": block.id,
250
- "name": block.name,
251
- "input": dict(block.input) if block.input else {},
252
- }
253
- )
254
-
255
- # Add assistant message to history
256
- messages.append({"role": "assistant", "content": assistant_content})
257
-
258
- # If no tool use, we're done
259
- if not tool_use_blocks:
260
- return AgentResult(
261
- output=final_output,
262
- status="success",
263
- tool_calls=tool_calls,
264
- turns_used=turns_used,
265
- )
266
-
267
- # Handle tool calls
268
- tool_results: list[anthropic.types.ToolResultBlockParam] = []
269
-
270
- for tool_use in tool_use_blocks:
271
- tool_name = tool_use["name"]
272
- arguments = tool_use["input"] if isinstance(tool_use["input"], dict) else {}
273
-
274
- # Record the tool call
275
- record = ToolCallRecord(
276
- tool_name=tool_name,
277
- arguments=arguments,
278
- )
279
- tool_calls.append(record)
280
-
281
- # Execute via handler
282
- try:
283
- result = await tool_handler(tool_name, arguments)
284
- record.result = result
285
-
286
- # Format result for Claude
287
- if result.success:
288
- content = json.dumps(result.result) if result.result else "Success"
289
- else:
290
- content = f"Error: {result.error}"
291
-
292
- tool_results.append(
293
- {
294
- "type": "tool_result",
295
- "tool_use_id": tool_use["id"],
296
- "content": content,
297
- }
298
- )
299
- except Exception as e:
300
- self.logger.error(f"Tool handler error for {tool_name}: {e}")
301
- record.result = ToolResult(
302
- tool_name=tool_name,
303
- success=False,
304
- error=str(e),
305
- )
306
- tool_results.append(
307
- {
308
- "type": "tool_result",
309
- "tool_use_id": tool_use["id"],
310
- "content": f"Error: {e}",
311
- "is_error": True,
312
- }
313
- )
314
-
315
- # Add tool results to messages
316
- messages.append({"role": "user", "content": tool_results})
317
-
318
- # Check stop reason
319
- if response.stop_reason == "end_turn":
320
- return AgentResult(
321
- output=final_output,
322
- status="success",
323
- tool_calls=tool_calls,
324
- turns_used=turns_used,
325
- )
326
-
327
- # Max turns reached
328
- return AgentResult(
329
- output=final_output,
330
- status="partial",
331
- tool_calls=tool_calls,
332
- turns_used=turns_used,
333
- )
334
-
335
- # Run with timeout
336
- try:
337
- return await asyncio.wait_for(_run_loop(), timeout=timeout)
338
- except TimeoutError:
339
- return AgentResult(
340
- output="",
341
- status="timeout",
342
- tool_calls=tool_calls,
343
- error=f"Execution timed out after {timeout}s",
344
- turns_used=turns_counter[0],
345
- )
127
+ return await self._run_with_sdk(
128
+ prompt=prompt,
129
+ tools=tools,
130
+ tool_handler=tool_handler,
131
+ system_prompt=system_prompt,
132
+ model=model or self.default_model,
133
+ max_turns=max_turns,
134
+ timeout=timeout,
135
+ )
346
136
 
347
137
  async def _run_with_sdk(
348
138
  self,