gobby 0.2.5__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. gobby/adapters/claude_code.py +13 -4
  2. gobby/adapters/codex.py +43 -3
  3. gobby/agents/runner.py +8 -0
  4. gobby/cli/__init__.py +6 -0
  5. gobby/cli/clones.py +419 -0
  6. gobby/cli/conductor.py +266 -0
  7. gobby/cli/installers/antigravity.py +3 -9
  8. gobby/cli/installers/claude.py +9 -9
  9. gobby/cli/installers/codex.py +2 -8
  10. gobby/cli/installers/gemini.py +2 -8
  11. gobby/cli/installers/shared.py +71 -8
  12. gobby/cli/skills.py +858 -0
  13. gobby/cli/tasks/ai.py +0 -440
  14. gobby/cli/tasks/crud.py +44 -6
  15. gobby/cli/tasks/main.py +0 -4
  16. gobby/cli/tui.py +2 -2
  17. gobby/cli/utils.py +3 -3
  18. gobby/clones/__init__.py +13 -0
  19. gobby/clones/git.py +547 -0
  20. gobby/conductor/__init__.py +16 -0
  21. gobby/conductor/alerts.py +135 -0
  22. gobby/conductor/loop.py +164 -0
  23. gobby/conductor/monitors/__init__.py +11 -0
  24. gobby/conductor/monitors/agents.py +116 -0
  25. gobby/conductor/monitors/tasks.py +155 -0
  26. gobby/conductor/pricing.py +234 -0
  27. gobby/conductor/token_tracker.py +160 -0
  28. gobby/config/app.py +63 -1
  29. gobby/config/search.py +110 -0
  30. gobby/config/servers.py +1 -1
  31. gobby/config/skills.py +43 -0
  32. gobby/config/tasks.py +6 -14
  33. gobby/hooks/event_handlers.py +145 -2
  34. gobby/hooks/hook_manager.py +48 -2
  35. gobby/hooks/skill_manager.py +130 -0
  36. gobby/install/claude/hooks/hook_dispatcher.py +4 -4
  37. gobby/install/codex/hooks/hook_dispatcher.py +1 -1
  38. gobby/install/gemini/hooks/hook_dispatcher.py +87 -12
  39. gobby/llm/claude.py +22 -34
  40. gobby/llm/claude_executor.py +46 -256
  41. gobby/llm/codex_executor.py +59 -291
  42. gobby/llm/executor.py +21 -0
  43. gobby/llm/gemini.py +134 -110
  44. gobby/llm/litellm_executor.py +143 -6
  45. gobby/llm/resolver.py +95 -33
  46. gobby/mcp_proxy/instructions.py +54 -0
  47. gobby/mcp_proxy/models.py +15 -0
  48. gobby/mcp_proxy/registries.py +68 -5
  49. gobby/mcp_proxy/server.py +33 -3
  50. gobby/mcp_proxy/services/tool_proxy.py +81 -1
  51. gobby/mcp_proxy/stdio.py +2 -1
  52. gobby/mcp_proxy/tools/__init__.py +0 -2
  53. gobby/mcp_proxy/tools/agent_messaging.py +317 -0
  54. gobby/mcp_proxy/tools/clones.py +903 -0
  55. gobby/mcp_proxy/tools/memory.py +1 -24
  56. gobby/mcp_proxy/tools/metrics.py +65 -1
  57. gobby/mcp_proxy/tools/orchestration/__init__.py +3 -0
  58. gobby/mcp_proxy/tools/orchestration/cleanup.py +151 -0
  59. gobby/mcp_proxy/tools/orchestration/wait.py +467 -0
  60. gobby/mcp_proxy/tools/session_messages.py +1 -2
  61. gobby/mcp_proxy/tools/skills/__init__.py +631 -0
  62. gobby/mcp_proxy/tools/task_orchestration.py +7 -0
  63. gobby/mcp_proxy/tools/task_readiness.py +14 -0
  64. gobby/mcp_proxy/tools/task_sync.py +1 -1
  65. gobby/mcp_proxy/tools/tasks/_context.py +0 -20
  66. gobby/mcp_proxy/tools/tasks/_crud.py +91 -4
  67. gobby/mcp_proxy/tools/tasks/_expansion.py +348 -0
  68. gobby/mcp_proxy/tools/tasks/_factory.py +6 -16
  69. gobby/mcp_proxy/tools/tasks/_lifecycle.py +60 -29
  70. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +18 -29
  71. gobby/mcp_proxy/tools/workflows.py +1 -1
  72. gobby/mcp_proxy/tools/worktrees.py +5 -0
  73. gobby/memory/backends/__init__.py +6 -1
  74. gobby/memory/backends/mem0.py +6 -1
  75. gobby/memory/extractor.py +477 -0
  76. gobby/memory/manager.py +11 -2
  77. gobby/prompts/defaults/handoff/compact.md +63 -0
  78. gobby/prompts/defaults/handoff/session_end.md +57 -0
  79. gobby/prompts/defaults/memory/extract.md +61 -0
  80. gobby/runner.py +37 -16
  81. gobby/search/__init__.py +48 -6
  82. gobby/search/backends/__init__.py +159 -0
  83. gobby/search/backends/embedding.py +225 -0
  84. gobby/search/embeddings.py +238 -0
  85. gobby/search/models.py +148 -0
  86. gobby/search/unified.py +496 -0
  87. gobby/servers/http.py +23 -8
  88. gobby/servers/routes/admin.py +280 -0
  89. gobby/servers/routes/mcp/tools.py +241 -52
  90. gobby/servers/websocket.py +2 -2
  91. gobby/sessions/analyzer.py +2 -0
  92. gobby/sessions/transcripts/base.py +1 -0
  93. gobby/sessions/transcripts/claude.py +64 -5
  94. gobby/skills/__init__.py +91 -0
  95. gobby/skills/loader.py +685 -0
  96. gobby/skills/manager.py +384 -0
  97. gobby/skills/parser.py +258 -0
  98. gobby/skills/search.py +463 -0
  99. gobby/skills/sync.py +119 -0
  100. gobby/skills/updater.py +385 -0
  101. gobby/skills/validator.py +368 -0
  102. gobby/storage/clones.py +378 -0
  103. gobby/storage/database.py +1 -1
  104. gobby/storage/memories.py +43 -13
  105. gobby/storage/migrations.py +180 -6
  106. gobby/storage/sessions.py +73 -0
  107. gobby/storage/skills.py +749 -0
  108. gobby/storage/tasks/_crud.py +4 -4
  109. gobby/storage/tasks/_lifecycle.py +41 -6
  110. gobby/storage/tasks/_manager.py +14 -5
  111. gobby/storage/tasks/_models.py +8 -3
  112. gobby/sync/memories.py +39 -4
  113. gobby/sync/tasks.py +83 -6
  114. gobby/tasks/__init__.py +1 -2
  115. gobby/tasks/validation.py +24 -15
  116. gobby/tui/api_client.py +4 -7
  117. gobby/tui/app.py +5 -3
  118. gobby/tui/screens/orchestrator.py +1 -2
  119. gobby/tui/screens/tasks.py +2 -4
  120. gobby/tui/ws_client.py +1 -1
  121. gobby/utils/daemon_client.py +2 -2
  122. gobby/workflows/actions.py +84 -2
  123. gobby/workflows/context_actions.py +43 -0
  124. gobby/workflows/detection_helpers.py +115 -31
  125. gobby/workflows/engine.py +13 -2
  126. gobby/workflows/lifecycle_evaluator.py +29 -1
  127. gobby/workflows/loader.py +19 -6
  128. gobby/workflows/memory_actions.py +74 -0
  129. gobby/workflows/summary_actions.py +17 -0
  130. gobby/workflows/task_enforcement_actions.py +448 -6
  131. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/METADATA +82 -21
  132. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/RECORD +136 -107
  133. gobby/install/codex/prompts/forget.md +0 -7
  134. gobby/install/codex/prompts/memories.md +0 -7
  135. gobby/install/codex/prompts/recall.md +0 -7
  136. gobby/install/codex/prompts/remember.md +0 -13
  137. gobby/llm/gemini_executor.py +0 -339
  138. gobby/mcp_proxy/tools/task_expansion.py +0 -591
  139. gobby/tasks/context.py +0 -747
  140. gobby/tasks/criteria.py +0 -342
  141. gobby/tasks/expansion.py +0 -626
  142. gobby/tasks/prompts/expand.py +0 -327
  143. gobby/tasks/research.py +0 -421
  144. gobby/tasks/tdd.py +0 -352
  145. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/WHEEL +0 -0
  146. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/entry_points.txt +0 -0
  147. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/licenses/LICENSE.md +0 -0
  148. {gobby-0.2.5.dist-info → gobby-0.2.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,238 @@
1
+ """LiteLLM-based embedding generation.
2
+
3
+ This module provides a unified interface for generating embeddings using
4
+ LiteLLM, which supports multiple providers through a single API:
5
+
6
+ | Provider | Model Format | Config |
7
+ |------------|--------------------------------|--------------------------------|
8
+ | OpenAI | text-embedding-3-small | OPENAI_API_KEY |
9
+ | Ollama | openai/nomic-embed-text | api_base=http://localhost:11434/v1 |
10
+ | Azure | azure/azure-embedding-model | api_base, api_key, api_version |
11
+ | Vertex AI | vertex_ai/text-embedding-004 | GCP credentials |
12
+ | Gemini | gemini/text-embedding-004 | GEMINI_API_KEY |
13
+ | Mistral | mistral/mistral-embed | MISTRAL_API_KEY |
14
+
15
+ Example usage:
16
+ from gobby.search.embeddings import generate_embeddings, is_embedding_available
17
+
18
+ if is_embedding_available("text-embedding-3-small"):
19
+ embeddings = await generate_embeddings(
20
+ texts=["hello world", "foo bar"],
21
+ model="text-embedding-3-small"
22
+ )
23
+ """
24
+
25
+ from __future__ import annotations
26
+
27
+ import logging
28
+ import os
29
+ from typing import TYPE_CHECKING
30
+
31
+ if TYPE_CHECKING:
32
+ from gobby.search.models import SearchConfig
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ async def generate_embeddings(
38
+ texts: list[str],
39
+ model: str = "text-embedding-3-small",
40
+ api_base: str | None = None,
41
+ api_key: str | None = None,
42
+ ) -> list[list[float]]:
43
+ """Generate embeddings using LiteLLM.
44
+
45
+ Supports OpenAI, Ollama, Azure, Gemini, Mistral and other providers
46
+ through LiteLLM's unified API.
47
+
48
+ Args:
49
+ texts: List of texts to embed
50
+ model: LiteLLM model string (e.g., "text-embedding-3-small",
51
+ "openai/nomic-embed-text" for Ollama)
52
+ api_base: Optional API base URL for custom endpoints (e.g., Ollama)
53
+ api_key: Optional API key (uses environment variable if not set)
54
+
55
+ Returns:
56
+ List of embedding vectors (one per input text). Returns an empty
57
+ list if the input texts list is empty.
58
+
59
+ Raises:
60
+ RuntimeError: If LiteLLM is not installed or embedding fails
61
+ """
62
+ if not texts:
63
+ return []
64
+
65
+ try:
66
+ import litellm
67
+ from litellm.exceptions import (
68
+ AuthenticationError,
69
+ ContextWindowExceededError,
70
+ NotFoundError,
71
+ RateLimitError,
72
+ )
73
+ except ImportError as e:
74
+ raise RuntimeError("litellm package not installed. Run: uv add litellm") from e
75
+
76
+ # Build kwargs for LiteLLM
77
+ kwargs: dict[str, str | list[str]] = {
78
+ "model": model,
79
+ "input": texts,
80
+ }
81
+
82
+ if api_key:
83
+ kwargs["api_key"] = api_key
84
+
85
+ if api_base:
86
+ kwargs["api_base"] = api_base
87
+
88
+ try:
89
+ response = await litellm.aembedding(**kwargs)
90
+ embeddings: list[list[float]] = [item["embedding"] for item in response.data]
91
+ logger.debug(f"Generated {len(embeddings)} embeddings via LiteLLM ({model})")
92
+ return embeddings
93
+ except AuthenticationError as e:
94
+ logger.error(f"LiteLLM authentication failed: {e}")
95
+ raise RuntimeError(f"Authentication failed: {e}") from e
96
+ except NotFoundError as e:
97
+ logger.error(f"LiteLLM model not found: {e}")
98
+ raise RuntimeError(f"Model not found: {e}") from e
99
+ except RateLimitError as e:
100
+ logger.error(f"LiteLLM rate limit exceeded: {e}")
101
+ raise RuntimeError(f"Rate limit exceeded: {e}") from e
102
+ except ContextWindowExceededError as e:
103
+ logger.error(f"LiteLLM context window exceeded: {e}")
104
+ raise RuntimeError(f"Context window exceeded: {e}") from e
105
+ except Exception as e:
106
+ logger.error(f"Failed to generate embeddings with LiteLLM: {e}")
107
+ raise RuntimeError(f"Embedding generation failed: {e}") from e
108
+
109
+
110
+ async def generate_embedding(
111
+ text: str,
112
+ model: str = "text-embedding-3-small",
113
+ api_base: str | None = None,
114
+ api_key: str | None = None,
115
+ ) -> list[float]:
116
+ """Generate embedding for a single text.
117
+
118
+ Convenience wrapper around generate_embeddings for single texts.
119
+
120
+ Args:
121
+ text: Text to embed
122
+ model: LiteLLM model string
123
+ api_base: Optional API base URL
124
+ api_key: Optional API key
125
+
126
+ Returns:
127
+ Embedding vector as list of floats
128
+
129
+ Raises:
130
+ RuntimeError: If embedding generation fails
131
+ """
132
+ embeddings = await generate_embeddings(
133
+ texts=[text],
134
+ model=model,
135
+ api_base=api_base,
136
+ api_key=api_key,
137
+ )
138
+ if not embeddings:
139
+ raise RuntimeError(
140
+ f"Embedding API returned empty result for model={model}, "
141
+ f"api_base={api_base}, api_key={'[set]' if api_key else '[not set]'}"
142
+ )
143
+ return embeddings[0]
144
+
145
+
146
+ def is_embedding_available(
147
+ model: str = "text-embedding-3-small",
148
+ api_key: str | None = None,
149
+ api_base: str | None = None,
150
+ ) -> bool:
151
+ """Check if embedding is available for the given model.
152
+
153
+ For local models (Ollama), assumes availability if api_base is set.
154
+ For cloud models, requires an API key.
155
+
156
+ Args:
157
+ model: LiteLLM model string
158
+ api_key: Optional explicit API key
159
+ api_base: Optional API base URL
160
+
161
+ Returns:
162
+ True if embeddings can be generated, False otherwise
163
+ """
164
+ # Local models with api_base (Ollama, custom endpoints) are assumed available
165
+ if api_base:
166
+ return True
167
+
168
+ # Check for Ollama-style models that use local endpoints
169
+ if model.startswith("ollama/"):
170
+ # Native Ollama models - assume available locally
171
+ # In practice, we'll catch connection errors at runtime
172
+ return True
173
+
174
+ # openai/ prefix models require OpenAI API key
175
+ if model.startswith("openai/"):
176
+ effective_key = api_key or os.environ.get("OPENAI_API_KEY")
177
+ return effective_key is not None and len(effective_key) > 0
178
+
179
+ # Cloud models need API key
180
+ effective_key = api_key
181
+
182
+ # Check environment variables based on model prefix
183
+ if not effective_key:
184
+ if model.startswith("gemini/"):
185
+ effective_key = os.environ.get("GEMINI_API_KEY")
186
+ elif model.startswith("mistral/"):
187
+ effective_key = os.environ.get("MISTRAL_API_KEY")
188
+ elif model.startswith("azure/"):
189
+ effective_key = os.environ.get("AZURE_API_KEY")
190
+ elif model.startswith("vertex_ai/"):
191
+ # Vertex AI uses GCP credentials, check for project
192
+ effective_key = os.environ.get("VERTEXAI_PROJECT")
193
+ else:
194
+ # Default to OpenAI
195
+ effective_key = os.environ.get("OPENAI_API_KEY")
196
+
197
+ return effective_key is not None and len(effective_key) > 0
198
+
199
+
200
+ def is_embedding_available_for_config(config: SearchConfig) -> bool:
201
+ """Check if embedding is available for a SearchConfig.
202
+
203
+ Convenience wrapper that extracts config values.
204
+
205
+ Args:
206
+ config: SearchConfig to check
207
+
208
+ Returns:
209
+ True if embeddings can be generated, False otherwise
210
+ """
211
+ return is_embedding_available(
212
+ model=config.embedding_model,
213
+ api_key=config.embedding_api_key,
214
+ api_base=config.embedding_api_base,
215
+ )
216
+
217
+
218
+ async def generate_embeddings_for_config(
219
+ texts: list[str],
220
+ config: SearchConfig,
221
+ ) -> list[list[float]]:
222
+ """Generate embeddings using a SearchConfig.
223
+
224
+ Convenience wrapper that extracts config values.
225
+
226
+ Args:
227
+ texts: List of texts to embed
228
+ config: SearchConfig with model and API settings
229
+
230
+ Returns:
231
+ List of embedding vectors
232
+ """
233
+ return await generate_embeddings(
234
+ texts=texts,
235
+ model=config.embedding_model,
236
+ api_base=config.embedding_api_base,
237
+ api_key=config.embedding_api_key,
238
+ )
gobby/search/models.py ADDED
@@ -0,0 +1,148 @@
1
+ """Search models and configuration.
2
+
3
+ This module defines the core data structures for the unified search layer:
4
+ - SearchMode: Enum for search modes (tfidf, embedding, auto, hybrid)
5
+ - SearchConfig: Configuration for search behavior
6
+ - FallbackEvent: Event emitted when falling back to TF-IDF
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from dataclasses import dataclass, field
12
+ from datetime import UTC, datetime
13
+ from enum import Enum
14
+ from typing import Any
15
+
16
+ from pydantic import BaseModel, Field
17
+
18
+
19
+ class SearchMode(str, Enum):
20
+ """Search mode options for UnifiedSearcher.
21
+
22
+ Modes:
23
+ - TFIDF: TF-IDF only (always works, no API needed)
24
+ - EMBEDDING: Embedding-based search only (fails if unavailable)
25
+ - AUTO: Try embedding, fallback to TF-IDF if unavailable
26
+ - HYBRID: Combine both with weighted scores
27
+ """
28
+
29
+ TFIDF = "tfidf"
30
+ EMBEDDING = "embedding"
31
+ AUTO = "auto"
32
+ HYBRID = "hybrid"
33
+
34
+
35
+ class SearchConfig(BaseModel):
36
+ """Configuration for unified search with fallback.
37
+
38
+ This config controls how UnifiedSearcher behaves, including:
39
+ - Which search mode to use (tfidf, embedding, auto, hybrid)
40
+ - Which embedding model to use (LiteLLM format)
41
+ - Weights for hybrid mode
42
+ - Whether to notify on fallback
43
+
44
+ Example configs:
45
+ # OpenAI (default - just needs OPENAI_API_KEY env var)
46
+ SearchConfig(mode="auto", embedding_model="text-embedding-3-small")
47
+
48
+ # Ollama (local, no API key needed)
49
+ SearchConfig(
50
+ mode="auto",
51
+ embedding_model="openai/nomic-embed-text",
52
+ embedding_api_base="http://localhost:11434/v1"
53
+ )
54
+
55
+ # Gemini
56
+ SearchConfig(mode="hybrid", embedding_model="gemini/text-embedding-004")
57
+ """
58
+
59
+ mode: str = Field(
60
+ default="auto",
61
+ description="Search mode: tfidf, embedding, auto, hybrid",
62
+ )
63
+ embedding_model: str = Field(
64
+ default="text-embedding-3-small",
65
+ description="LiteLLM model string (e.g., text-embedding-3-small, openai/nomic-embed-text)",
66
+ )
67
+ embedding_api_base: str | None = Field(
68
+ default=None,
69
+ description="API base URL for Ollama/custom endpoints (e.g., http://localhost:11434/v1)",
70
+ )
71
+ embedding_api_key: str | None = Field(
72
+ default=None,
73
+ description="API key for embedding provider (uses env var if not set)",
74
+ )
75
+ tfidf_weight: float = Field(
76
+ default=0.4,
77
+ ge=0.0,
78
+ le=1.0,
79
+ description="Weight for TF-IDF scores in hybrid mode",
80
+ )
81
+ embedding_weight: float = Field(
82
+ default=0.6,
83
+ ge=0.0,
84
+ le=1.0,
85
+ description="Weight for embedding scores in hybrid mode",
86
+ )
87
+ notify_on_fallback: bool = Field(
88
+ default=True,
89
+ description="Log warning when falling back to TF-IDF",
90
+ )
91
+
92
+ def get_mode_enum(self) -> SearchMode:
93
+ """Get the mode as a SearchMode enum."""
94
+ return SearchMode(self.mode)
95
+
96
+ def get_normalized_weights(self) -> tuple[float, float]:
97
+ """Get normalized weights that sum to 1.0.
98
+
99
+ Returns:
100
+ Tuple of (tfidf_weight, embedding_weight) normalized to sum to 1.0
101
+ """
102
+ total = self.tfidf_weight + self.embedding_weight
103
+ if total == 0:
104
+ # Default to equal weights if both are 0
105
+ return (0.5, 0.5)
106
+ return (self.tfidf_weight / total, self.embedding_weight / total)
107
+
108
+
109
+ @dataclass
110
+ class FallbackEvent:
111
+ """Event emitted when UnifiedSearcher falls back to TF-IDF.
112
+
113
+ This event is emitted via the event_callback when:
114
+ - Embedding provider is unavailable (no API key, no connection)
115
+ - Embedding API call fails (rate limit, timeout, error)
116
+ - Any other embedding-related error occurs
117
+
118
+ Attributes:
119
+ reason: Human-readable explanation of why fallback occurred
120
+ original_error: The underlying exception, if any
121
+ timestamp: When the fallback occurred
122
+ mode: The original search mode that was attempted
123
+ items_reindexed: Number of items reindexed into TF-IDF (if applicable)
124
+ metadata: Additional context about the fallback
125
+ """
126
+
127
+ reason: str
128
+ original_error: Exception | None = None
129
+ timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
130
+ mode: str = "auto"
131
+ items_reindexed: int = 0
132
+ metadata: dict[str, Any] = field(default_factory=dict)
133
+
134
+ def to_dict(self) -> dict[str, Any]:
135
+ """Convert to dictionary for logging/serialization."""
136
+ return {
137
+ "reason": self.reason,
138
+ "original_error": str(self.original_error) if self.original_error else None,
139
+ "timestamp": self.timestamp.isoformat(),
140
+ "mode": self.mode,
141
+ "items_reindexed": self.items_reindexed,
142
+ "metadata": self.metadata,
143
+ }
144
+
145
+ def __str__(self) -> str:
146
+ """Human-readable string representation."""
147
+ error_info = f" ({self.original_error})" if self.original_error else ""
148
+ return f"FallbackEvent: {self.reason}{error_info}"