gobby 0.2.8__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. gobby/__init__.py +1 -1
  2. gobby/adapters/claude_code.py +3 -26
  3. gobby/app_context.py +59 -0
  4. gobby/cli/utils.py +5 -17
  5. gobby/config/features.py +0 -20
  6. gobby/config/tasks.py +4 -0
  7. gobby/hooks/event_handlers/__init__.py +155 -0
  8. gobby/hooks/event_handlers/_agent.py +175 -0
  9. gobby/hooks/event_handlers/_base.py +87 -0
  10. gobby/hooks/event_handlers/_misc.py +66 -0
  11. gobby/hooks/event_handlers/_session.py +573 -0
  12. gobby/hooks/event_handlers/_tool.py +196 -0
  13. gobby/hooks/hook_manager.py +2 -0
  14. gobby/llm/claude.py +377 -42
  15. gobby/mcp_proxy/importer.py +4 -41
  16. gobby/mcp_proxy/manager.py +13 -3
  17. gobby/mcp_proxy/registries.py +14 -0
  18. gobby/mcp_proxy/services/recommendation.py +2 -28
  19. gobby/mcp_proxy/tools/artifacts.py +3 -3
  20. gobby/mcp_proxy/tools/task_readiness.py +27 -4
  21. gobby/mcp_proxy/tools/workflows/__init__.py +266 -0
  22. gobby/mcp_proxy/tools/workflows/_artifacts.py +225 -0
  23. gobby/mcp_proxy/tools/workflows/_import.py +112 -0
  24. gobby/mcp_proxy/tools/workflows/_lifecycle.py +321 -0
  25. gobby/mcp_proxy/tools/workflows/_query.py +207 -0
  26. gobby/mcp_proxy/tools/workflows/_resolution.py +78 -0
  27. gobby/mcp_proxy/tools/workflows/_terminal.py +139 -0
  28. gobby/memory/components/__init__.py +0 -0
  29. gobby/memory/components/ingestion.py +98 -0
  30. gobby/memory/components/search.py +108 -0
  31. gobby/memory/manager.py +16 -25
  32. gobby/paths.py +51 -0
  33. gobby/prompts/loader.py +1 -35
  34. gobby/runner.py +23 -10
  35. gobby/servers/http.py +186 -149
  36. gobby/servers/routes/admin.py +12 -0
  37. gobby/servers/routes/mcp/endpoints/execution.py +15 -7
  38. gobby/servers/routes/mcp/endpoints/registry.py +8 -8
  39. gobby/sessions/analyzer.py +2 -2
  40. gobby/skills/parser.py +23 -0
  41. gobby/skills/sync.py +5 -4
  42. gobby/storage/artifacts.py +19 -0
  43. gobby/storage/migrations.py +25 -2
  44. gobby/storage/skills.py +47 -7
  45. gobby/tasks/external_validator.py +4 -17
  46. gobby/tasks/validation.py +13 -87
  47. gobby/tools/summarizer.py +18 -51
  48. gobby/utils/status.py +13 -0
  49. gobby/workflows/actions.py +5 -0
  50. gobby/workflows/context_actions.py +21 -24
  51. gobby/workflows/enforcement/__init__.py +11 -1
  52. gobby/workflows/enforcement/blocking.py +96 -0
  53. gobby/workflows/enforcement/handlers.py +35 -1
  54. gobby/workflows/engine.py +6 -3
  55. gobby/workflows/lifecycle_evaluator.py +2 -1
  56. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/METADATA +1 -1
  57. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/RECORD +61 -45
  58. gobby/hooks/event_handlers.py +0 -1008
  59. gobby/mcp_proxy/tools/workflows.py +0 -1023
  60. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/WHEEL +0 -0
  61. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/entry_points.txt +0 -0
  62. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/licenses/LICENSE.md +0 -0
  63. {gobby-0.2.8.dist-info → gobby-0.2.9.dist-info}/top_level.txt +0 -0
gobby/llm/claude.py CHANGED
@@ -1,5 +1,9 @@
1
1
  """
2
2
  Claude implementation of LLMProvider.
3
+
4
+ Supports two authentication modes:
5
+ - subscription: Uses Claude Agent SDK via Claude CLI (requires CLI installed)
6
+ - api_key: Uses LiteLLM with anthropic/ prefix (BYOK, no CLI needed)
3
7
  """
4
8
 
5
9
  import asyncio
@@ -9,7 +13,7 @@ import os
9
13
  import shutil
10
14
  import time
11
15
  from dataclasses import dataclass, field
12
- from typing import Any
16
+ from typing import Any, Literal, cast
13
17
 
14
18
  from claude_agent_sdk import (
15
19
  AssistantMessage,
@@ -26,6 +30,9 @@ from claude_agent_sdk import (
26
30
  from gobby.config.app import DaemonConfig
27
31
  from gobby.llm.base import LLMProvider
28
32
 
33
+ # Type alias for auth mode
34
+ AuthMode = Literal["subscription", "api_key"]
35
+
29
36
 
30
37
  @dataclass
31
38
  class ToolCall:
@@ -60,9 +67,16 @@ logger = logging.getLogger(__name__)
60
67
 
61
68
  class ClaudeLLMProvider(LLMProvider):
62
69
  """
63
- Claude implementation of LLMProvider using claude_agent_sdk.
70
+ Claude implementation of LLMProvider.
71
+
72
+ Supports two authentication modes:
73
+ - subscription (default): Uses Claude Agent SDK via Claude CLI
74
+ - api_key: Uses LiteLLM with anthropic/ prefix (BYOK, no CLI needed)
64
75
 
65
- Uses subscription-based authentication through Claude CLI.
76
+ The auth_mode is determined by:
77
+ 1. Constructor parameter (highest priority)
78
+ 2. Config file: llm_providers.claude.auth_mode
79
+ 3. Default: "subscription"
66
80
  """
67
81
 
68
82
  @property
@@ -70,16 +84,40 @@ class ClaudeLLMProvider(LLMProvider):
70
84
  """Return provider name."""
71
85
  return "claude"
72
86
 
73
- def __init__(self, config: DaemonConfig):
87
+ @property
88
+ def auth_mode(self) -> AuthMode:
89
+ """Return current authentication mode."""
90
+ return self._auth_mode
91
+
92
+ def __init__(
93
+ self,
94
+ config: DaemonConfig,
95
+ auth_mode: AuthMode | None = None,
96
+ ):
74
97
  """
75
98
  Initialize ClaudeLLMProvider.
76
99
 
77
100
  Args:
78
101
  config: Client configuration.
102
+ auth_mode: Authentication mode override. If None, uses config or default.
79
103
  """
80
104
  self.config = config
81
105
  self.logger = logger
82
- self._claude_cli_path = self._find_cli_path()
106
+ self._litellm: Any = None
107
+
108
+ # Determine auth mode from param -> config -> default
109
+ self._auth_mode: AuthMode = "subscription"
110
+ if auth_mode:
111
+ self._auth_mode = auth_mode
112
+ elif config.llm_providers and config.llm_providers.claude:
113
+ self._auth_mode = config.llm_providers.claude.auth_mode # type: ignore[assignment]
114
+
115
+ # Set up based on auth mode
116
+ if self._auth_mode == "subscription":
117
+ self._claude_cli_path = self._find_cli_path()
118
+ else: # api_key
119
+ self._claude_cli_path = None
120
+ self._setup_litellm()
83
121
 
84
122
  def _find_cli_path(self) -> str | None:
85
123
  """
@@ -147,17 +185,37 @@ class ClaudeLLMProvider(LLMProvider):
147
185
 
148
186
  return cli_path
149
187
 
150
- async def generate_summary(
151
- self, context: dict[str, Any], prompt_template: str | None = None
152
- ) -> str:
188
+ def _setup_litellm(self) -> None:
153
189
  """
154
- Generate session summary using Claude.
190
+ Initialize LiteLLM for api_key mode.
191
+
192
+ LiteLLM reads ANTHROPIC_API_KEY from the environment automatically.
155
193
  """
156
- cli_path = self._verify_cli_path()
157
- if not cli_path:
158
- return "Session summary unavailable (Claude CLI not found)"
194
+ try:
195
+ import litellm
196
+
197
+ self._litellm = litellm
198
+ self.logger.debug("LiteLLM initialized for Claude api_key mode")
199
+ except ImportError:
200
+ self.logger.error("litellm package required for api_key mode")
159
201
 
160
- # Build formatted context for prompt template
202
+ def _format_summary_context(self, context: dict[str, Any], prompt_template: str | None) -> str:
203
+ """
204
+ Format context and validate prompt template for summary generation.
205
+
206
+ Transforms list/dict values to strings for template substitution
207
+ and validates that a prompt template is provided.
208
+
209
+ Args:
210
+ context: Raw context dict with transcript_summary, last_messages, etc.
211
+ prompt_template: Template string with placeholders for context values.
212
+
213
+ Returns:
214
+ Formatted prompt string ready for LLM consumption.
215
+
216
+ Raises:
217
+ ValueError: If prompt_template is None.
218
+ """
161
219
  # Transform list/dict values to strings for template substitution
162
220
  formatted_context = {
163
221
  "transcript_summary": context.get("transcript_summary", ""),
@@ -171,13 +229,68 @@ class ClaudeLLMProvider(LLMProvider):
171
229
  },
172
230
  }
173
231
 
174
- # Build prompt - prompt_template is required
232
+ # Validate prompt_template is provided
175
233
  if not prompt_template:
176
234
  raise ValueError(
177
235
  "prompt_template is required for generate_summary. "
178
236
  "Configure 'session_summary.prompt' in ~/.gobby/config.yaml"
179
237
  )
180
- prompt = prompt_template.format(**formatted_context)
238
+
239
+ return prompt_template.format(**formatted_context)
240
+
241
+ async def _retry_async(
242
+ self,
243
+ operation: Any,
244
+ max_retries: int = 3,
245
+ delay: float = 1.0,
246
+ on_retry: Any | None = None,
247
+ ) -> Any:
248
+ """
249
+ Execute an async operation with retry logic.
250
+
251
+ Args:
252
+ operation: Callable that returns an awaitable (coroutine factory).
253
+ max_retries: Maximum number of attempts (default: 3).
254
+ delay: Delay in seconds between retries (default: 1.0).
255
+ on_retry: Optional callback(attempt: int, error: Exception) called on retry.
256
+
257
+ Returns:
258
+ Result of the operation if successful.
259
+
260
+ Raises:
261
+ Exception: The last exception if all retries fail.
262
+ """
263
+ for attempt in range(max_retries):
264
+ try:
265
+ return await operation()
266
+ except Exception as e:
267
+ if attempt < max_retries - 1:
268
+ if on_retry:
269
+ on_retry(attempt, e)
270
+ await asyncio.sleep(delay)
271
+ else:
272
+ raise
273
+
274
+ async def generate_summary(
275
+ self, context: dict[str, Any], prompt_template: str | None = None
276
+ ) -> str:
277
+ """
278
+ Generate session summary using Claude.
279
+ """
280
+ if self._auth_mode == "subscription":
281
+ return await self._generate_summary_sdk(context, prompt_template)
282
+ else:
283
+ return await self._generate_summary_litellm(context, prompt_template)
284
+
285
+ async def _generate_summary_sdk(
286
+ self, context: dict[str, Any], prompt_template: str | None = None
287
+ ) -> str:
288
+ """Generate session summary using Claude Agent SDK (subscription mode)."""
289
+ cli_path = self._verify_cli_path()
290
+ if not cli_path:
291
+ return "Session summary unavailable (Claude CLI not found)"
292
+
293
+ prompt = self._format_summary_context(context, prompt_template)
181
294
 
182
295
  # Configure Claude Agent SDK
183
296
  options = ClaudeAgentOptions(
@@ -205,8 +318,45 @@ class ClaudeLLMProvider(LLMProvider):
205
318
  self.logger.error(f"Failed to generate summary with Claude: {e}")
206
319
  return f"Session summary generation failed: {e}"
207
320
 
321
+ async def _generate_summary_litellm(
322
+ self, context: dict[str, Any], prompt_template: str | None = None
323
+ ) -> str:
324
+ """Generate session summary using LiteLLM (api_key mode)."""
325
+ if not self._litellm:
326
+ return "Session summary unavailable (LiteLLM not initialized)"
327
+
328
+ prompt = self._format_summary_context(context, prompt_template)
329
+
330
+ try:
331
+ response = await self._litellm.acompletion(
332
+ model=f"anthropic/{self.config.session_summary.model}",
333
+ messages=[
334
+ {
335
+ "role": "system",
336
+ "content": "You are a session summary generator. Create comprehensive, actionable summaries.",
337
+ },
338
+ {"role": "user", "content": prompt},
339
+ ],
340
+ max_tokens=4000,
341
+ )
342
+ return response.choices[0].message.content or ""
343
+ except Exception as e:
344
+ self.logger.error(f"Failed to generate summary with LiteLLM: {e}")
345
+ return f"Session summary generation failed: {e}"
346
+
208
347
  async def synthesize_title(
209
348
  self, user_prompt: str, prompt_template: str | None = None
349
+ ) -> str | None:
350
+ """
351
+ Synthesize session title using Claude.
352
+ """
353
+ if self._auth_mode == "subscription":
354
+ return await self._synthesize_title_sdk(user_prompt, prompt_template)
355
+ else:
356
+ return await self._synthesize_title_litellm(user_prompt, prompt_template)
357
+
358
+ async def _synthesize_title_sdk(
359
+ self, user_prompt: str, prompt_template: str | None = None
210
360
  ) -> str | None:
211
361
  """
212
362
  Synthesize session title using Claude.
@@ -243,26 +393,63 @@ class ClaudeLLMProvider(LLMProvider):
243
393
  title_text = block.text
244
394
  return title_text.strip()
245
395
 
396
+ def _on_retry(attempt: int, error: Exception) -> None:
397
+ self.logger.warning(
398
+ f"Title synthesis failed (attempt {attempt + 1}), retrying: {error}"
399
+ )
400
+
246
401
  try:
247
- # Retry logic for title synthesis
248
- max_retries = 3
249
- for attempt in range(max_retries):
250
- try:
251
- return await _run_query()
252
- except Exception as e:
253
- if attempt < max_retries - 1:
254
- self.logger.warning(
255
- f"Title synthesis failed (attempt {attempt + 1}), retrying: {e}"
256
- )
257
- await asyncio.sleep(1)
258
- else:
259
- raise e
260
- # This should be unreachable, but mypy can't prove it
261
- return None # pragma: no cover
402
+ result = await self._retry_async(
403
+ _run_query, max_retries=3, delay=1.0, on_retry=_on_retry
404
+ )
405
+ return cast(str, result)
262
406
  except Exception as e:
263
407
  self.logger.error(f"Failed to synthesize title with Claude: {e}")
264
408
  return None
265
409
 
410
+ async def _synthesize_title_litellm(
411
+ self, user_prompt: str, prompt_template: str | None = None
412
+ ) -> str | None:
413
+ """Synthesize session title using LiteLLM (api_key mode)."""
414
+ if not self._litellm:
415
+ return None
416
+
417
+ # Build prompt - prompt_template is required
418
+ if not prompt_template:
419
+ raise ValueError(
420
+ "prompt_template is required for synthesize_title. "
421
+ "Configure 'title_synthesis.prompt' in ~/.gobby/config.yaml"
422
+ )
423
+ prompt = prompt_template.format(user_prompt=user_prompt)
424
+
425
+ async def _run_query() -> str:
426
+ response = await self._litellm.acompletion(
427
+ model=f"anthropic/{self.config.title_synthesis.model}",
428
+ messages=[
429
+ {
430
+ "role": "system",
431
+ "content": "You are a session title generator. Create concise, descriptive titles.",
432
+ },
433
+ {"role": "user", "content": prompt},
434
+ ],
435
+ max_tokens=100,
436
+ )
437
+ return (response.choices[0].message.content or "").strip()
438
+
439
+ def _on_retry(attempt: int, error: Exception) -> None:
440
+ self.logger.warning(
441
+ f"Title synthesis failed (attempt {attempt + 1}), retrying: {error}"
442
+ )
443
+
444
+ try:
445
+ result = await self._retry_async(
446
+ _run_query, max_retries=3, delay=1.0, on_retry=_on_retry
447
+ )
448
+ return cast(str, result)
449
+ except Exception as e:
450
+ self.logger.error(f"Failed to synthesize title with LiteLLM: {e}")
451
+ return None
452
+
266
453
  async def generate_text(
267
454
  self,
268
455
  prompt: str,
@@ -272,6 +459,18 @@ class ClaudeLLMProvider(LLMProvider):
272
459
  """
273
460
  Generate text using Claude.
274
461
  """
462
+ if self._auth_mode == "subscription":
463
+ return await self._generate_text_sdk(prompt, system_prompt, model)
464
+ else:
465
+ return await self._generate_text_litellm(prompt, system_prompt, model)
466
+
467
+ async def _generate_text_sdk(
468
+ self,
469
+ prompt: str,
470
+ system_prompt: str | None = None,
471
+ model: str | None = None,
472
+ ) -> str:
473
+ """Generate text using Claude Agent SDK (subscription mode)."""
275
474
  cli_path = self._verify_cli_path()
276
475
  if not cli_path:
277
476
  return "Generation unavailable (Claude CLI not found)"
@@ -323,6 +522,36 @@ class ClaudeLLMProvider(LLMProvider):
323
522
  self.logger.error(f"Failed to generate text with Claude: {e}", exc_info=True)
324
523
  return f"Generation failed: {e}"
325
524
 
525
+ async def _generate_text_litellm(
526
+ self,
527
+ prompt: str,
528
+ system_prompt: str | None = None,
529
+ model: str | None = None,
530
+ ) -> str:
531
+ """Generate text using LiteLLM (api_key mode)."""
532
+ if not self._litellm:
533
+ return "Generation unavailable (LiteLLM not initialized)"
534
+
535
+ model = model or "claude-haiku-4-5"
536
+ litellm_model = f"anthropic/{model}"
537
+
538
+ try:
539
+ response = await self._litellm.acompletion(
540
+ model=litellm_model,
541
+ messages=[
542
+ {
543
+ "role": "system",
544
+ "content": system_prompt or "You are a helpful assistant.",
545
+ },
546
+ {"role": "user", "content": prompt},
547
+ ],
548
+ max_tokens=4000,
549
+ )
550
+ return response.choices[0].message.content or ""
551
+ except Exception as e:
552
+ self.logger.error(f"Failed to generate text with LiteLLM: {e}", exc_info=True)
553
+ return f"Generation failed: {e}"
554
+
326
555
  async def generate_with_mcp_tools(
327
556
  self,
328
557
  prompt: str,
@@ -338,6 +567,9 @@ class ClaudeLLMProvider(LLMProvider):
338
567
  This method enables the agent to call MCP tools during generation,
339
568
  tracking all tool calls made and returning them alongside the final text.
340
569
 
570
+ Note: This method requires subscription mode (Claude Agent SDK).
571
+ In api_key mode, returns an error message.
572
+
341
573
  Args:
342
574
  prompt: User prompt to process.
343
575
  allowed_tools: List of allowed MCP tool patterns.
@@ -364,6 +596,14 @@ class ClaudeLLMProvider(LLMProvider):
364
596
  >>> for call in result.tool_calls:
365
597
  ... print(f"Called {call.tool_name} with {call.arguments}")
366
598
  """
599
+ # MCP tools require subscription mode (Claude Agent SDK)
600
+ if self._auth_mode == "api_key":
601
+ return MCPToolResult(
602
+ text="MCP tools require subscription mode. "
603
+ "Set auth_mode: subscription in llm_providers.claude config.",
604
+ tool_calls=[],
605
+ )
606
+
367
607
  cli_path = self._verify_cli_path()
368
608
  if not cli_path:
369
609
  return MCPToolResult(
@@ -495,7 +735,8 @@ class ClaudeLLMProvider(LLMProvider):
495
735
  """
496
736
  Generate a text description of an image using Claude's vision capabilities.
497
737
 
498
- Uses LiteLLM for unified cost tracking with anthropic/claude-haiku-4-5 model.
738
+ In subscription mode, uses Claude Agent SDK.
739
+ In api_key mode, uses LiteLLM with anthropic/ prefix.
499
740
 
500
741
  Args:
501
742
  image_path: Path to the image file to describe
@@ -504,6 +745,21 @@ class ClaudeLLMProvider(LLMProvider):
504
745
  Returns:
505
746
  Text description of the image
506
747
  """
748
+ if self._auth_mode == "subscription":
749
+ return await self._describe_image_sdk(image_path, context)
750
+ else:
751
+ return await self._describe_image_litellm(image_path, context)
752
+
753
+ def _prepare_image_data(self, image_path: str) -> tuple[str, str] | str:
754
+ """
755
+ Validate and prepare image data for API calls.
756
+
757
+ Args:
758
+ image_path: Path to the image file.
759
+
760
+ Returns:
761
+ Tuple of (image_base64, mime_type) on success, or error string on failure.
762
+ """
507
763
  import base64
508
764
  import mimetypes
509
765
  from pathlib import Path
@@ -524,21 +780,103 @@ class ClaudeLLMProvider(LLMProvider):
524
780
  # Determine media type
525
781
  mime_type, _ = mimetypes.guess_type(str(path))
526
782
  if mime_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]:
527
- # Default to png for unknown types
528
783
  mime_type = "image/png"
529
784
 
785
+ return (image_base64, mime_type)
786
+
787
+ async def _describe_image_sdk(
788
+ self,
789
+ image_path: str,
790
+ context: str | None = None,
791
+ ) -> str:
792
+ """Describe image using Claude Agent SDK (subscription mode)."""
793
+ cli_path = self._verify_cli_path()
794
+ if not cli_path:
795
+ return "Image description unavailable (Claude CLI not found)"
796
+
797
+ # Prepare image data
798
+ result = self._prepare_image_data(image_path)
799
+ if isinstance(result, str):
800
+ return result
801
+ image_base64, mime_type = result
802
+
803
+ # Build prompt with image
804
+ text_prompt = "Please describe this image in detail, focusing on the key visual elements and any text visible."
805
+ if context:
806
+ text_prompt = f"{context}\n\n{text_prompt}"
807
+
808
+ # Configure Claude Agent SDK
809
+ options = ClaudeAgentOptions(
810
+ system_prompt="You are a vision assistant that describes images in detail.",
811
+ max_turns=1,
812
+ model="claude-haiku-4-5",
813
+ tools=[],
814
+ allowed_tools=[],
815
+ permission_mode="default",
816
+ cli_path=cli_path,
817
+ )
818
+
819
+ # Build async generator yielding structured message with image content
820
+ # The SDK accepts AsyncIterable[dict] for multimodal input
821
+ async def _message_generator() -> Any:
822
+ yield {
823
+ "role": "user",
824
+ "content": [
825
+ {"type": "text", "text": text_prompt},
826
+ {
827
+ "type": "image",
828
+ "source": {
829
+ "type": "base64",
830
+ "media_type": mime_type,
831
+ "data": image_base64,
832
+ },
833
+ },
834
+ ],
835
+ }
836
+
837
+ async def _run_query() -> str:
838
+ result_text = ""
839
+ async for message in query(prompt=_message_generator(), options=options):
840
+ if isinstance(message, AssistantMessage):
841
+ for block in message.content:
842
+ if isinstance(block, TextBlock):
843
+ result_text += block.text
844
+ elif isinstance(message, ResultMessage):
845
+ if message.result:
846
+ result_text = message.result
847
+ return result_text
848
+
849
+ try:
850
+ return await _run_query()
851
+ except Exception as e:
852
+ self.logger.error(f"Failed to describe image with Claude SDK: {e}")
853
+ return f"Image description failed: {e}"
854
+
855
+ async def _describe_image_litellm(
856
+ self,
857
+ image_path: str,
858
+ context: str | None = None,
859
+ ) -> str:
860
+ """Describe image using LiteLLM (api_key mode)."""
861
+ if not self._litellm:
862
+ return "Image description unavailable (LiteLLM not initialized)"
863
+
864
+ # Prepare image data
865
+ result = self._prepare_image_data(image_path)
866
+ if isinstance(result, str):
867
+ return result
868
+ image_base64, mime_type = result
869
+
530
870
  # Build prompt
531
871
  prompt = "Please describe this image in detail, focusing on the key visual elements and any text visible."
532
872
  if context:
533
873
  prompt = f"{context}\n\n{prompt}"
534
874
 
535
- # Use LiteLLM for unified cost tracking
536
875
  try:
537
- import litellm
538
-
539
- # Route through LiteLLM with anthropic prefix for cost tracking
540
- response = await litellm.acompletion(
541
- model="anthropic/claude-haiku-4-5-20251001", # Use haiku for cost efficiency
876
+ # Route through LiteLLM with anthropic prefix
877
+ # Use same model as SDK path for consistency
878
+ response = await self._litellm.acompletion(
879
+ model="anthropic/claude-haiku-4-5",
542
880
  messages=[
543
881
  {
544
882
  "role": "user",
@@ -558,9 +896,6 @@ class ClaudeLLMProvider(LLMProvider):
558
896
  return "No description generated"
559
897
  return response.choices[0].message.content or "No description generated"
560
898
 
561
- except ImportError:
562
- self.logger.error("LiteLLM not installed, falling back to unavailable")
563
- return "Image description unavailable (LiteLLM not installed)"
564
899
  except Exception as e:
565
- self.logger.error(f"Failed to describe image with Claude via LiteLLM: {e}")
900
+ self.logger.error(f"Failed to describe image with LiteLLM: {e}")
566
901
  return f"Image description failed: {e}"
@@ -5,7 +5,6 @@ import re
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
7
  from gobby.config.app import DaemonConfig
8
- from gobby.config.features import DEFAULT_IMPORT_MCP_SERVER_PROMPT
9
8
  from gobby.prompts import PromptLoader
10
9
  from gobby.storage.database import DatabaseProtocol
11
10
  from gobby.storage.mcp import LocalMCPManager
@@ -20,21 +19,6 @@ logger = logging.getLogger(__name__)
20
19
  # Pattern to detect placeholder secrets like <YOUR_API_KEY>
21
20
  SECRET_PLACEHOLDER_PATTERN = re.compile(r"<YOUR_[A-Z0-9_]+>")
22
21
 
23
- DEFAULT_GITHUB_FETCH_PROMPT = """Fetch the README from this GitHub repository and extract MCP server configuration:
24
-
25
- {github_url}
26
-
27
- If the URL doesn't point directly to a README, try to find and fetch the README.md file.
28
-
29
- After reading the documentation, extract the MCP server configuration as a JSON object."""
30
-
31
- DEFAULT_SEARCH_FETCH_PROMPT = """Search for MCP server: {search_query}
32
-
33
- Find the official documentation or GitHub repository for this MCP server.
34
- Then fetch and read the README or installation docs.
35
-
36
- After reading the documentation, extract the MCP server configuration as a JSON object."""
37
-
38
22
 
39
23
  class MCPServerImporter:
40
24
  """Handles importing MCP servers from various sources."""
@@ -73,11 +57,6 @@ class MCPServerImporter:
73
57
 
74
58
  self._loader = PromptLoader(project_dir=Path(project_path) if project_path else None)
75
59
 
76
- # Register fallbacks
77
- self._loader.register_fallback("import/github_fetch", lambda: DEFAULT_GITHUB_FETCH_PROMPT)
78
- self._loader.register_fallback("import/search_fetch", lambda: DEFAULT_SEARCH_FETCH_PROMPT)
79
- self._loader.register_fallback("import/system", lambda: DEFAULT_IMPORT_MCP_SERVER_PROMPT)
80
-
81
60
  async def import_from_project(
82
61
  self,
83
62
  source_project: str,
@@ -204,19 +183,11 @@ class MCPServerImporter:
204
183
 
205
184
  # Build prompt to fetch and extract config
206
185
  prompt_path = self.import_config.github_fetch_prompt_path or "import/github_fetch"
207
- try:
208
- prompt = self._loader.render(prompt_path, {"github_url": github_url})
209
- except Exception as e:
210
- logger.warning(f"Failed to load Github fetch prompt: {e}")
211
- prompt = DEFAULT_GITHUB_FETCH_PROMPT.format(github_url=github_url)
186
+ prompt = self._loader.render(prompt_path, {"github_url": github_url})
212
187
 
213
188
  # Get system prompt
214
189
  sys_prompt_path = self.import_config.prompt_path or "import/system"
215
- try:
216
- system_prompt = self._loader.render(sys_prompt_path, {})
217
- except Exception as e:
218
- logger.warning(f"Failed to load import system prompt: {e}")
219
- system_prompt = DEFAULT_IMPORT_MCP_SERVER_PROMPT
190
+ system_prompt = self._loader.render(sys_prompt_path, {})
220
191
 
221
192
  options = ClaudeAgentOptions(
222
193
  system_prompt=system_prompt,
@@ -268,19 +239,11 @@ class MCPServerImporter:
268
239
 
269
240
  # Build prompt to search and extract config
270
241
  prompt_path = self.import_config.search_fetch_prompt_path or "import/search_fetch"
271
- try:
272
- prompt = self._loader.render(prompt_path, {"search_query": search_query})
273
- except Exception as e:
274
- logger.warning(f"Failed to load search fetch prompt: {e}")
275
- prompt = DEFAULT_SEARCH_FETCH_PROMPT.format(search_query=search_query)
242
+ prompt = self._loader.render(prompt_path, {"search_query": search_query})
276
243
 
277
244
  # Get system prompt
278
245
  sys_prompt_path = self.import_config.prompt_path or "import/system"
279
- try:
280
- system_prompt = self._loader.render(sys_prompt_path, {})
281
- except Exception as e:
282
- logger.warning(f"Failed to load import system prompt: {e}")
283
- system_prompt = DEFAULT_IMPORT_MCP_SERVER_PROMPT
246
+ system_prompt = self._loader.render(sys_prompt_path, {})
284
247
 
285
248
  options = ClaudeAgentOptions(
286
249
  system_prompt=system_prompt,
@@ -684,6 +684,12 @@ class MCPClientManager:
684
684
 
685
685
  async def get_tool_input_schema(self, server_name: str, tool_name: str) -> dict[str, Any]:
686
686
  """Get full inputSchema for a specific tool."""
687
+ tool_info = await self.get_tool_info(server_name, tool_name)
688
+ input_schema = tool_info.get("inputSchema", {})
689
+ return cast(dict[str, Any], input_schema)
690
+
691
+ async def get_tool_info(self, server_name: str, tool_name: str) -> dict[str, Any]:
692
+ """Get full tool info including name, description, and inputSchema."""
687
693
 
688
694
  # This is an optimization. Instead of calling list_tools again,
689
695
  # we try to fetch it. But standard MCP list_tools returns everything.
@@ -696,9 +702,13 @@ class MCPClientManager:
696
702
  # tool might be an object or dict
697
703
  t_name = getattr(tool, "name", tool.get("name") if isinstance(tool, dict) else None)
698
704
  if t_name == tool_name:
699
- # Return schema
700
- if isinstance(tool, dict) and "inputSchema" in tool:
701
- return cast(dict[str, Any], tool["inputSchema"])
705
+ if isinstance(tool, dict):
706
+ result: dict[str, Any] = {"name": t_name}
707
+ if "description" in tool and tool["description"]:
708
+ result["description"] = tool["description"]
709
+ if "inputSchema" in tool:
710
+ result["inputSchema"] = tool["inputSchema"]
711
+ return result
702
712
 
703
713
  raise MCPError(f"Tool {tool_name} not found on server {server_name}")
704
714