gobby 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +3 -0
- gobby/adapters/__init__.py +30 -0
- gobby/adapters/base.py +93 -0
- gobby/adapters/claude_code.py +276 -0
- gobby/adapters/codex.py +1292 -0
- gobby/adapters/gemini.py +343 -0
- gobby/agents/__init__.py +37 -0
- gobby/agents/codex_session.py +120 -0
- gobby/agents/constants.py +112 -0
- gobby/agents/context.py +362 -0
- gobby/agents/definitions.py +133 -0
- gobby/agents/gemini_session.py +111 -0
- gobby/agents/registry.py +618 -0
- gobby/agents/runner.py +968 -0
- gobby/agents/session.py +259 -0
- gobby/agents/spawn.py +916 -0
- gobby/agents/spawners/__init__.py +77 -0
- gobby/agents/spawners/base.py +142 -0
- gobby/agents/spawners/cross_platform.py +266 -0
- gobby/agents/spawners/embedded.py +225 -0
- gobby/agents/spawners/headless.py +226 -0
- gobby/agents/spawners/linux.py +125 -0
- gobby/agents/spawners/macos.py +277 -0
- gobby/agents/spawners/windows.py +308 -0
- gobby/agents/tty_config.py +319 -0
- gobby/autonomous/__init__.py +32 -0
- gobby/autonomous/progress_tracker.py +447 -0
- gobby/autonomous/stop_registry.py +269 -0
- gobby/autonomous/stuck_detector.py +383 -0
- gobby/cli/__init__.py +67 -0
- gobby/cli/__main__.py +8 -0
- gobby/cli/agents.py +529 -0
- gobby/cli/artifacts.py +266 -0
- gobby/cli/daemon.py +329 -0
- gobby/cli/extensions.py +526 -0
- gobby/cli/github.py +263 -0
- gobby/cli/init.py +53 -0
- gobby/cli/install.py +614 -0
- gobby/cli/installers/__init__.py +37 -0
- gobby/cli/installers/antigravity.py +65 -0
- gobby/cli/installers/claude.py +363 -0
- gobby/cli/installers/codex.py +192 -0
- gobby/cli/installers/gemini.py +294 -0
- gobby/cli/installers/git_hooks.py +377 -0
- gobby/cli/installers/shared.py +737 -0
- gobby/cli/linear.py +250 -0
- gobby/cli/mcp.py +30 -0
- gobby/cli/mcp_proxy.py +698 -0
- gobby/cli/memory.py +304 -0
- gobby/cli/merge.py +384 -0
- gobby/cli/projects.py +79 -0
- gobby/cli/sessions.py +622 -0
- gobby/cli/tasks/__init__.py +30 -0
- gobby/cli/tasks/_utils.py +658 -0
- gobby/cli/tasks/ai.py +1025 -0
- gobby/cli/tasks/commits.py +169 -0
- gobby/cli/tasks/crud.py +685 -0
- gobby/cli/tasks/deps.py +135 -0
- gobby/cli/tasks/labels.py +63 -0
- gobby/cli/tasks/main.py +273 -0
- gobby/cli/tasks/search.py +178 -0
- gobby/cli/tui.py +34 -0
- gobby/cli/utils.py +513 -0
- gobby/cli/workflows.py +927 -0
- gobby/cli/worktrees.py +481 -0
- gobby/config/__init__.py +129 -0
- gobby/config/app.py +551 -0
- gobby/config/extensions.py +167 -0
- gobby/config/features.py +472 -0
- gobby/config/llm_providers.py +98 -0
- gobby/config/logging.py +66 -0
- gobby/config/mcp.py +346 -0
- gobby/config/persistence.py +247 -0
- gobby/config/servers.py +141 -0
- gobby/config/sessions.py +250 -0
- gobby/config/tasks.py +784 -0
- gobby/hooks/__init__.py +104 -0
- gobby/hooks/artifact_capture.py +213 -0
- gobby/hooks/broadcaster.py +243 -0
- gobby/hooks/event_handlers.py +723 -0
- gobby/hooks/events.py +218 -0
- gobby/hooks/git.py +169 -0
- gobby/hooks/health_monitor.py +171 -0
- gobby/hooks/hook_manager.py +856 -0
- gobby/hooks/hook_types.py +575 -0
- gobby/hooks/plugins.py +813 -0
- gobby/hooks/session_coordinator.py +396 -0
- gobby/hooks/verification_runner.py +268 -0
- gobby/hooks/webhooks.py +339 -0
- gobby/install/claude/commands/gobby/bug.md +51 -0
- gobby/install/claude/commands/gobby/chore.md +51 -0
- gobby/install/claude/commands/gobby/epic.md +52 -0
- gobby/install/claude/commands/gobby/eval.md +235 -0
- gobby/install/claude/commands/gobby/feat.md +49 -0
- gobby/install/claude/commands/gobby/nit.md +52 -0
- gobby/install/claude/commands/gobby/ref.md +52 -0
- gobby/install/claude/hooks/HOOK_SCHEMAS.md +632 -0
- gobby/install/claude/hooks/hook_dispatcher.py +364 -0
- gobby/install/claude/hooks/validate_settings.py +102 -0
- gobby/install/claude/hooks-template.json +118 -0
- gobby/install/codex/hooks/hook_dispatcher.py +153 -0
- gobby/install/codex/prompts/forget.md +7 -0
- gobby/install/codex/prompts/memories.md +7 -0
- gobby/install/codex/prompts/recall.md +7 -0
- gobby/install/codex/prompts/remember.md +13 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +268 -0
- gobby/install/gemini/hooks-template.json +138 -0
- gobby/install/shared/plugins/code_guardian.py +456 -0
- gobby/install/shared/plugins/example_notify.py +331 -0
- gobby/integrations/__init__.py +10 -0
- gobby/integrations/github.py +145 -0
- gobby/integrations/linear.py +145 -0
- gobby/llm/__init__.py +40 -0
- gobby/llm/base.py +120 -0
- gobby/llm/claude.py +578 -0
- gobby/llm/claude_executor.py +503 -0
- gobby/llm/codex.py +322 -0
- gobby/llm/codex_executor.py +513 -0
- gobby/llm/executor.py +316 -0
- gobby/llm/factory.py +34 -0
- gobby/llm/gemini.py +258 -0
- gobby/llm/gemini_executor.py +339 -0
- gobby/llm/litellm.py +287 -0
- gobby/llm/litellm_executor.py +303 -0
- gobby/llm/resolver.py +499 -0
- gobby/llm/service.py +236 -0
- gobby/mcp_proxy/__init__.py +29 -0
- gobby/mcp_proxy/actions.py +175 -0
- gobby/mcp_proxy/daemon_control.py +198 -0
- gobby/mcp_proxy/importer.py +436 -0
- gobby/mcp_proxy/lazy.py +325 -0
- gobby/mcp_proxy/manager.py +798 -0
- gobby/mcp_proxy/metrics.py +609 -0
- gobby/mcp_proxy/models.py +139 -0
- gobby/mcp_proxy/registries.py +215 -0
- gobby/mcp_proxy/schema_hash.py +381 -0
- gobby/mcp_proxy/semantic_search.py +706 -0
- gobby/mcp_proxy/server.py +549 -0
- gobby/mcp_proxy/services/__init__.py +0 -0
- gobby/mcp_proxy/services/fallback.py +306 -0
- gobby/mcp_proxy/services/recommendation.py +224 -0
- gobby/mcp_proxy/services/server_mgmt.py +214 -0
- gobby/mcp_proxy/services/system.py +72 -0
- gobby/mcp_proxy/services/tool_filter.py +231 -0
- gobby/mcp_proxy/services/tool_proxy.py +309 -0
- gobby/mcp_proxy/stdio.py +565 -0
- gobby/mcp_proxy/tools/__init__.py +27 -0
- gobby/mcp_proxy/tools/agents.py +1103 -0
- gobby/mcp_proxy/tools/artifacts.py +207 -0
- gobby/mcp_proxy/tools/hub.py +335 -0
- gobby/mcp_proxy/tools/internal.py +337 -0
- gobby/mcp_proxy/tools/memory.py +543 -0
- gobby/mcp_proxy/tools/merge.py +422 -0
- gobby/mcp_proxy/tools/metrics.py +283 -0
- gobby/mcp_proxy/tools/orchestration/__init__.py +23 -0
- gobby/mcp_proxy/tools/orchestration/cleanup.py +619 -0
- gobby/mcp_proxy/tools/orchestration/monitor.py +380 -0
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +746 -0
- gobby/mcp_proxy/tools/orchestration/review.py +736 -0
- gobby/mcp_proxy/tools/orchestration/utils.py +16 -0
- gobby/mcp_proxy/tools/session_messages.py +1056 -0
- gobby/mcp_proxy/tools/task_dependencies.py +219 -0
- gobby/mcp_proxy/tools/task_expansion.py +591 -0
- gobby/mcp_proxy/tools/task_github.py +393 -0
- gobby/mcp_proxy/tools/task_linear.py +379 -0
- gobby/mcp_proxy/tools/task_orchestration.py +77 -0
- gobby/mcp_proxy/tools/task_readiness.py +522 -0
- gobby/mcp_proxy/tools/task_sync.py +351 -0
- gobby/mcp_proxy/tools/task_validation.py +843 -0
- gobby/mcp_proxy/tools/tasks/__init__.py +25 -0
- gobby/mcp_proxy/tools/tasks/_context.py +112 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +516 -0
- gobby/mcp_proxy/tools/tasks/_factory.py +176 -0
- gobby/mcp_proxy/tools/tasks/_helpers.py +129 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +517 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +301 -0
- gobby/mcp_proxy/tools/tasks/_resolution.py +55 -0
- gobby/mcp_proxy/tools/tasks/_search.py +215 -0
- gobby/mcp_proxy/tools/tasks/_session.py +125 -0
- gobby/mcp_proxy/tools/workflows.py +973 -0
- gobby/mcp_proxy/tools/worktrees.py +1264 -0
- gobby/mcp_proxy/transports/__init__.py +0 -0
- gobby/mcp_proxy/transports/base.py +95 -0
- gobby/mcp_proxy/transports/factory.py +44 -0
- gobby/mcp_proxy/transports/http.py +139 -0
- gobby/mcp_proxy/transports/stdio.py +213 -0
- gobby/mcp_proxy/transports/websocket.py +136 -0
- gobby/memory/backends/__init__.py +116 -0
- gobby/memory/backends/mem0.py +408 -0
- gobby/memory/backends/memu.py +485 -0
- gobby/memory/backends/null.py +111 -0
- gobby/memory/backends/openmemory.py +537 -0
- gobby/memory/backends/sqlite.py +304 -0
- gobby/memory/context.py +87 -0
- gobby/memory/manager.py +1001 -0
- gobby/memory/protocol.py +451 -0
- gobby/memory/search/__init__.py +66 -0
- gobby/memory/search/text.py +127 -0
- gobby/memory/viz.py +258 -0
- gobby/prompts/__init__.py +13 -0
- gobby/prompts/defaults/expansion/system.md +119 -0
- gobby/prompts/defaults/expansion/user.md +48 -0
- gobby/prompts/defaults/external_validation/agent.md +72 -0
- gobby/prompts/defaults/external_validation/external.md +63 -0
- gobby/prompts/defaults/external_validation/spawn.md +83 -0
- gobby/prompts/defaults/external_validation/system.md +6 -0
- gobby/prompts/defaults/features/import_mcp.md +22 -0
- gobby/prompts/defaults/features/import_mcp_github.md +17 -0
- gobby/prompts/defaults/features/import_mcp_search.md +16 -0
- gobby/prompts/defaults/features/recommend_tools.md +32 -0
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +35 -0
- gobby/prompts/defaults/features/recommend_tools_llm.md +30 -0
- gobby/prompts/defaults/features/server_description.md +20 -0
- gobby/prompts/defaults/features/server_description_system.md +6 -0
- gobby/prompts/defaults/features/task_description.md +31 -0
- gobby/prompts/defaults/features/task_description_system.md +6 -0
- gobby/prompts/defaults/features/tool_summary.md +17 -0
- gobby/prompts/defaults/features/tool_summary_system.md +6 -0
- gobby/prompts/defaults/research/step.md +58 -0
- gobby/prompts/defaults/validation/criteria.md +47 -0
- gobby/prompts/defaults/validation/validate.md +38 -0
- gobby/prompts/loader.py +346 -0
- gobby/prompts/models.py +113 -0
- gobby/py.typed +0 -0
- gobby/runner.py +488 -0
- gobby/search/__init__.py +23 -0
- gobby/search/protocol.py +104 -0
- gobby/search/tfidf.py +232 -0
- gobby/servers/__init__.py +7 -0
- gobby/servers/http.py +636 -0
- gobby/servers/models.py +31 -0
- gobby/servers/routes/__init__.py +23 -0
- gobby/servers/routes/admin.py +416 -0
- gobby/servers/routes/dependencies.py +118 -0
- gobby/servers/routes/mcp/__init__.py +24 -0
- gobby/servers/routes/mcp/hooks.py +135 -0
- gobby/servers/routes/mcp/plugins.py +121 -0
- gobby/servers/routes/mcp/tools.py +1337 -0
- gobby/servers/routes/mcp/webhooks.py +159 -0
- gobby/servers/routes/sessions.py +582 -0
- gobby/servers/websocket.py +766 -0
- gobby/sessions/__init__.py +13 -0
- gobby/sessions/analyzer.py +322 -0
- gobby/sessions/lifecycle.py +240 -0
- gobby/sessions/manager.py +563 -0
- gobby/sessions/processor.py +225 -0
- gobby/sessions/summary.py +532 -0
- gobby/sessions/transcripts/__init__.py +41 -0
- gobby/sessions/transcripts/base.py +125 -0
- gobby/sessions/transcripts/claude.py +386 -0
- gobby/sessions/transcripts/codex.py +143 -0
- gobby/sessions/transcripts/gemini.py +195 -0
- gobby/storage/__init__.py +21 -0
- gobby/storage/agents.py +409 -0
- gobby/storage/artifact_classifier.py +341 -0
- gobby/storage/artifacts.py +285 -0
- gobby/storage/compaction.py +67 -0
- gobby/storage/database.py +357 -0
- gobby/storage/inter_session_messages.py +194 -0
- gobby/storage/mcp.py +680 -0
- gobby/storage/memories.py +562 -0
- gobby/storage/merge_resolutions.py +550 -0
- gobby/storage/migrations.py +860 -0
- gobby/storage/migrations_legacy.py +1359 -0
- gobby/storage/projects.py +166 -0
- gobby/storage/session_messages.py +251 -0
- gobby/storage/session_tasks.py +97 -0
- gobby/storage/sessions.py +817 -0
- gobby/storage/task_dependencies.py +223 -0
- gobby/storage/tasks/__init__.py +42 -0
- gobby/storage/tasks/_aggregates.py +180 -0
- gobby/storage/tasks/_crud.py +449 -0
- gobby/storage/tasks/_id.py +104 -0
- gobby/storage/tasks/_lifecycle.py +311 -0
- gobby/storage/tasks/_manager.py +889 -0
- gobby/storage/tasks/_models.py +300 -0
- gobby/storage/tasks/_ordering.py +119 -0
- gobby/storage/tasks/_path_cache.py +110 -0
- gobby/storage/tasks/_queries.py +343 -0
- gobby/storage/tasks/_search.py +143 -0
- gobby/storage/workflow_audit.py +393 -0
- gobby/storage/worktrees.py +547 -0
- gobby/sync/__init__.py +29 -0
- gobby/sync/github.py +333 -0
- gobby/sync/linear.py +304 -0
- gobby/sync/memories.py +284 -0
- gobby/sync/tasks.py +641 -0
- gobby/tasks/__init__.py +8 -0
- gobby/tasks/build_verification.py +193 -0
- gobby/tasks/commits.py +633 -0
- gobby/tasks/context.py +747 -0
- gobby/tasks/criteria.py +342 -0
- gobby/tasks/enhanced_validator.py +226 -0
- gobby/tasks/escalation.py +263 -0
- gobby/tasks/expansion.py +626 -0
- gobby/tasks/external_validator.py +764 -0
- gobby/tasks/issue_extraction.py +171 -0
- gobby/tasks/prompts/expand.py +327 -0
- gobby/tasks/research.py +421 -0
- gobby/tasks/tdd.py +352 -0
- gobby/tasks/tree_builder.py +263 -0
- gobby/tasks/validation.py +712 -0
- gobby/tasks/validation_history.py +357 -0
- gobby/tasks/validation_models.py +89 -0
- gobby/tools/__init__.py +0 -0
- gobby/tools/summarizer.py +170 -0
- gobby/tui/__init__.py +5 -0
- gobby/tui/api_client.py +281 -0
- gobby/tui/app.py +327 -0
- gobby/tui/screens/__init__.py +25 -0
- gobby/tui/screens/agents.py +333 -0
- gobby/tui/screens/chat.py +450 -0
- gobby/tui/screens/dashboard.py +377 -0
- gobby/tui/screens/memory.py +305 -0
- gobby/tui/screens/metrics.py +231 -0
- gobby/tui/screens/orchestrator.py +904 -0
- gobby/tui/screens/sessions.py +412 -0
- gobby/tui/screens/tasks.py +442 -0
- gobby/tui/screens/workflows.py +289 -0
- gobby/tui/screens/worktrees.py +174 -0
- gobby/tui/widgets/__init__.py +21 -0
- gobby/tui/widgets/chat.py +210 -0
- gobby/tui/widgets/conductor.py +104 -0
- gobby/tui/widgets/menu.py +132 -0
- gobby/tui/widgets/message_panel.py +160 -0
- gobby/tui/widgets/review_gate.py +224 -0
- gobby/tui/widgets/task_tree.py +99 -0
- gobby/tui/widgets/token_budget.py +166 -0
- gobby/tui/ws_client.py +258 -0
- gobby/utils/__init__.py +3 -0
- gobby/utils/daemon_client.py +235 -0
- gobby/utils/git.py +222 -0
- gobby/utils/id.py +38 -0
- gobby/utils/json_helpers.py +161 -0
- gobby/utils/logging.py +376 -0
- gobby/utils/machine_id.py +135 -0
- gobby/utils/metrics.py +589 -0
- gobby/utils/project_context.py +182 -0
- gobby/utils/project_init.py +263 -0
- gobby/utils/status.py +256 -0
- gobby/utils/validation.py +80 -0
- gobby/utils/version.py +23 -0
- gobby/workflows/__init__.py +4 -0
- gobby/workflows/actions.py +1310 -0
- gobby/workflows/approval_flow.py +138 -0
- gobby/workflows/artifact_actions.py +103 -0
- gobby/workflows/audit_helpers.py +110 -0
- gobby/workflows/autonomous_actions.py +286 -0
- gobby/workflows/context_actions.py +394 -0
- gobby/workflows/definitions.py +130 -0
- gobby/workflows/detection_helpers.py +208 -0
- gobby/workflows/engine.py +485 -0
- gobby/workflows/evaluator.py +669 -0
- gobby/workflows/git_utils.py +96 -0
- gobby/workflows/hooks.py +169 -0
- gobby/workflows/lifecycle_evaluator.py +613 -0
- gobby/workflows/llm_actions.py +70 -0
- gobby/workflows/loader.py +333 -0
- gobby/workflows/mcp_actions.py +60 -0
- gobby/workflows/memory_actions.py +272 -0
- gobby/workflows/premature_stop.py +164 -0
- gobby/workflows/session_actions.py +139 -0
- gobby/workflows/state_actions.py +123 -0
- gobby/workflows/state_manager.py +104 -0
- gobby/workflows/stop_signal_actions.py +163 -0
- gobby/workflows/summary_actions.py +344 -0
- gobby/workflows/task_actions.py +249 -0
- gobby/workflows/task_enforcement_actions.py +901 -0
- gobby/workflows/templates.py +52 -0
- gobby/workflows/todo_actions.py +84 -0
- gobby/workflows/webhook.py +223 -0
- gobby/workflows/webhook_executor.py +399 -0
- gobby/worktrees/__init__.py +5 -0
- gobby/worktrees/git.py +690 -0
- gobby/worktrees/merge/__init__.py +20 -0
- gobby/worktrees/merge/conflict_parser.py +177 -0
- gobby/worktrees/merge/resolver.py +485 -0
- gobby-0.2.5.dist-info/METADATA +351 -0
- gobby-0.2.5.dist-info/RECORD +383 -0
- gobby-0.2.5.dist-info/WHEEL +5 -0
- gobby-0.2.5.dist-info/entry_points.txt +2 -0
- gobby-0.2.5.dist-info/licenses/LICENSE.md +193 -0
- gobby-0.2.5.dist-info/top_level.txt +1 -0
gobby/llm/executor.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Abstract base class for agent executors.
|
|
3
|
+
|
|
4
|
+
AgentExecutor defines the interface for executing agentic loops with tool calling.
|
|
5
|
+
Each LLM provider implements this interface to enable subagent spawning.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from collections.abc import Awaitable, Callable
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from typing import Any, Literal
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ToolSchema:
|
|
16
|
+
"""Schema definition for an MCP tool."""
|
|
17
|
+
|
|
18
|
+
name: str
|
|
19
|
+
"""Tool name (e.g., 'create_task')."""
|
|
20
|
+
|
|
21
|
+
description: str
|
|
22
|
+
"""Human-readable description of what the tool does."""
|
|
23
|
+
|
|
24
|
+
input_schema: dict[str, Any]
|
|
25
|
+
"""JSON Schema for the tool's input parameters."""
|
|
26
|
+
|
|
27
|
+
server_name: str | None = None
|
|
28
|
+
"""Optional server name this tool belongs to (e.g., 'gobby-tasks')."""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ToolResult:
|
|
33
|
+
"""Result from executing a tool call."""
|
|
34
|
+
|
|
35
|
+
tool_name: str
|
|
36
|
+
"""Name of the tool that was called."""
|
|
37
|
+
|
|
38
|
+
success: bool
|
|
39
|
+
"""Whether the tool call succeeded."""
|
|
40
|
+
|
|
41
|
+
result: Any = None
|
|
42
|
+
"""Result data from the tool (if success=True)."""
|
|
43
|
+
|
|
44
|
+
error: str | None = None
|
|
45
|
+
"""Error message (if success=False)."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class ToolCallRecord:
|
|
50
|
+
"""Record of a tool call made during agent execution."""
|
|
51
|
+
|
|
52
|
+
tool_name: str
|
|
53
|
+
"""Name of the tool that was called."""
|
|
54
|
+
|
|
55
|
+
arguments: dict[str, Any]
|
|
56
|
+
"""Arguments passed to the tool."""
|
|
57
|
+
|
|
58
|
+
result: ToolResult | None = None
|
|
59
|
+
"""Result from the tool execution."""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class AgentResult:
|
|
64
|
+
"""Result from running an agent to completion."""
|
|
65
|
+
|
|
66
|
+
output: str
|
|
67
|
+
"""Final text output from the agent."""
|
|
68
|
+
|
|
69
|
+
status: Literal["success", "partial", "blocked", "timeout", "error"]
|
|
70
|
+
"""Completion status of the agent run."""
|
|
71
|
+
|
|
72
|
+
tool_calls: list[ToolCallRecord] = field(default_factory=list)
|
|
73
|
+
"""List of all tool calls made during execution."""
|
|
74
|
+
|
|
75
|
+
artifacts: dict[str, Any] = field(default_factory=dict)
|
|
76
|
+
"""Structured artifacts produced by the agent (via complete() tool)."""
|
|
77
|
+
|
|
78
|
+
files_modified: list[str] = field(default_factory=list)
|
|
79
|
+
"""List of files modified during execution."""
|
|
80
|
+
|
|
81
|
+
next_steps: list[str] = field(default_factory=list)
|
|
82
|
+
"""Suggested next steps from the agent."""
|
|
83
|
+
|
|
84
|
+
error: str | None = None
|
|
85
|
+
"""Error message if status is 'error'."""
|
|
86
|
+
|
|
87
|
+
turns_used: int = 0
|
|
88
|
+
"""Number of turns used during execution."""
|
|
89
|
+
|
|
90
|
+
run_id: str | None = None
|
|
91
|
+
"""ID of the agent run (set by AgentRunner)."""
|
|
92
|
+
|
|
93
|
+
child_session_id: str | None = None
|
|
94
|
+
"""ID of the child session created for this agent (set by AgentRunner)."""
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# Type alias for the tool handler callback
|
|
98
|
+
ToolHandler = Callable[[str, dict[str, Any]], Awaitable[ToolResult]]
|
|
99
|
+
"""
|
|
100
|
+
Callback function to execute a tool call.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
tool_name: Name of the tool to execute.
|
|
104
|
+
arguments: Arguments to pass to the tool.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
ToolResult with success/failure and result data.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class AgentExecutor(ABC):
|
|
112
|
+
"""
|
|
113
|
+
Abstract base class for executing agentic loops with tool calling.
|
|
114
|
+
|
|
115
|
+
Each LLM provider (Claude, Gemini, Codex, LiteLLM) implements this interface
|
|
116
|
+
to enable subagent spawning. The executor handles:
|
|
117
|
+
|
|
118
|
+
- Running the agent loop with tool calling
|
|
119
|
+
- Enforcing turn limits and timeouts
|
|
120
|
+
- Calling tools via the provided tool_handler
|
|
121
|
+
- Detecting completion (via 'complete' tool or natural end)
|
|
122
|
+
- Returning structured results
|
|
123
|
+
|
|
124
|
+
The tool_handler callback is provided by the caller (AgentRunner) and handles:
|
|
125
|
+
- Workflow-based tool filtering
|
|
126
|
+
- Routing to MCP servers
|
|
127
|
+
- Recording tool metrics
|
|
128
|
+
|
|
129
|
+
Example usage:
|
|
130
|
+
>>> executor = ClaudeExecutor(config)
|
|
131
|
+
>>> result = await executor.run(
|
|
132
|
+
... prompt="Create a task called 'Fix bug'",
|
|
133
|
+
... tools=[ToolSchema(name="create_task", ...)],
|
|
134
|
+
... tool_handler=my_tool_handler,
|
|
135
|
+
... )
|
|
136
|
+
>>> print(result.output)
|
|
137
|
+
>>> print(result.status)
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
@abstractmethod
|
|
142
|
+
def provider_name(self) -> str:
|
|
143
|
+
"""
|
|
144
|
+
Return the provider name for this executor.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Provider name (e.g., "claude", "gemini", "litellm", "codex").
|
|
148
|
+
"""
|
|
149
|
+
pass
|
|
150
|
+
|
|
151
|
+
@abstractmethod
|
|
152
|
+
async def run(
|
|
153
|
+
self,
|
|
154
|
+
prompt: str,
|
|
155
|
+
tools: list[ToolSchema],
|
|
156
|
+
tool_handler: ToolHandler,
|
|
157
|
+
system_prompt: str | None = None,
|
|
158
|
+
model: str | None = None,
|
|
159
|
+
max_turns: int = 10,
|
|
160
|
+
timeout: float = 120.0,
|
|
161
|
+
) -> AgentResult:
|
|
162
|
+
"""
|
|
163
|
+
Execute an agentic loop with tool calling.
|
|
164
|
+
|
|
165
|
+
Runs the agent with the given prompt, making tool calls as needed
|
|
166
|
+
until completion, max_turns, or timeout.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
prompt: The user prompt to process.
|
|
170
|
+
tools: List of available tools with their schemas.
|
|
171
|
+
tool_handler: Callback to execute tool calls. This is called
|
|
172
|
+
whenever the agent wants to use a tool.
|
|
173
|
+
system_prompt: Optional system prompt to set agent behavior.
|
|
174
|
+
model: Optional model override. If None, uses provider default.
|
|
175
|
+
max_turns: Maximum number of turns before stopping (default: 10).
|
|
176
|
+
timeout: Maximum execution time in seconds (default: 120.0).
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
AgentResult containing output, status, tool calls, and artifacts.
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
No exceptions should be raised - errors are captured in AgentResult.
|
|
183
|
+
"""
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
async def run_with_complete_tool(
|
|
187
|
+
self,
|
|
188
|
+
prompt: str,
|
|
189
|
+
tools: list[ToolSchema],
|
|
190
|
+
tool_handler: ToolHandler,
|
|
191
|
+
system_prompt: str | None = None,
|
|
192
|
+
model: str | None = None,
|
|
193
|
+
max_turns: int = 10,
|
|
194
|
+
timeout: float = 120.0,
|
|
195
|
+
) -> AgentResult:
|
|
196
|
+
"""
|
|
197
|
+
Execute an agentic loop that requires explicit completion via 'complete' tool.
|
|
198
|
+
|
|
199
|
+
This is a convenience wrapper that adds the 'complete' tool to the
|
|
200
|
+
available tools and watches for its invocation to terminate the loop.
|
|
201
|
+
|
|
202
|
+
The complete tool signature:
|
|
203
|
+
complete(
|
|
204
|
+
output: str,
|
|
205
|
+
status: Literal["success", "partial", "blocked"] = "success",
|
|
206
|
+
artifacts: dict[str, Any] = {},
|
|
207
|
+
files_modified: list[str] = [],
|
|
208
|
+
next_steps: list[str] = [],
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
prompt: The user prompt to process.
|
|
213
|
+
tools: List of available tools (complete tool is added automatically).
|
|
214
|
+
tool_handler: Callback to execute tool calls.
|
|
215
|
+
system_prompt: Optional system prompt.
|
|
216
|
+
model: Optional model override.
|
|
217
|
+
max_turns: Maximum turns before stopping.
|
|
218
|
+
timeout: Maximum execution time in seconds.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
AgentResult populated from the complete() call, or with status='timeout'
|
|
222
|
+
if the agent didn't call complete() before limits were reached.
|
|
223
|
+
"""
|
|
224
|
+
# Add complete tool schema
|
|
225
|
+
complete_tool = ToolSchema(
|
|
226
|
+
name="complete",
|
|
227
|
+
description=(
|
|
228
|
+
"Signal that you have completed the task. Call this when you are done "
|
|
229
|
+
"with all work. Provide a summary of what was accomplished, any artifacts "
|
|
230
|
+
"produced, and suggested next steps."
|
|
231
|
+
),
|
|
232
|
+
input_schema={
|
|
233
|
+
"type": "object",
|
|
234
|
+
"properties": {
|
|
235
|
+
"output": {
|
|
236
|
+
"type": "string",
|
|
237
|
+
"description": "Summary of what was accomplished.",
|
|
238
|
+
},
|
|
239
|
+
"status": {
|
|
240
|
+
"type": "string",
|
|
241
|
+
"enum": ["success", "partial", "blocked"],
|
|
242
|
+
"default": "success",
|
|
243
|
+
"description": "Completion status.",
|
|
244
|
+
},
|
|
245
|
+
"artifacts": {
|
|
246
|
+
"type": "object",
|
|
247
|
+
"description": "Structured outputs from the task.",
|
|
248
|
+
"default": {},
|
|
249
|
+
},
|
|
250
|
+
"files_modified": {
|
|
251
|
+
"type": "array",
|
|
252
|
+
"items": {"type": "string"},
|
|
253
|
+
"description": "List of files that were modified.",
|
|
254
|
+
"default": [],
|
|
255
|
+
},
|
|
256
|
+
"next_steps": {
|
|
257
|
+
"type": "array",
|
|
258
|
+
"items": {"type": "string"},
|
|
259
|
+
"description": "Suggested next steps for the caller.",
|
|
260
|
+
"default": [],
|
|
261
|
+
},
|
|
262
|
+
},
|
|
263
|
+
"required": ["output"],
|
|
264
|
+
},
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
all_tools = [*tools, complete_tool]
|
|
268
|
+
completion_result: AgentResult | None = None
|
|
269
|
+
|
|
270
|
+
async def wrapped_handler(tool_name: str, arguments: dict[str, Any]) -> ToolResult:
|
|
271
|
+
nonlocal completion_result
|
|
272
|
+
|
|
273
|
+
if tool_name == "complete":
|
|
274
|
+
# Validate status against allowed values for complete() tool
|
|
275
|
+
allowed_statuses = {"success", "partial", "blocked"}
|
|
276
|
+
raw_status = arguments.get("status")
|
|
277
|
+
validated_status: Literal["success", "partial", "blocked"] = (
|
|
278
|
+
raw_status if raw_status in allowed_statuses else "success"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Extract completion data
|
|
282
|
+
completion_result = AgentResult(
|
|
283
|
+
output=arguments.get("output", ""),
|
|
284
|
+
status=validated_status,
|
|
285
|
+
artifacts=arguments.get("artifacts", {}),
|
|
286
|
+
files_modified=arguments.get("files_modified", []),
|
|
287
|
+
next_steps=arguments.get("next_steps", []),
|
|
288
|
+
)
|
|
289
|
+
return ToolResult(
|
|
290
|
+
tool_name="complete",
|
|
291
|
+
success=True,
|
|
292
|
+
result="Task completed.",
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Delegate to the original handler
|
|
296
|
+
return await tool_handler(tool_name, arguments)
|
|
297
|
+
|
|
298
|
+
# Run with the wrapped handler
|
|
299
|
+
result = await self.run(
|
|
300
|
+
prompt=prompt,
|
|
301
|
+
tools=all_tools,
|
|
302
|
+
tool_handler=wrapped_handler,
|
|
303
|
+
system_prompt=system_prompt,
|
|
304
|
+
model=model,
|
|
305
|
+
max_turns=max_turns,
|
|
306
|
+
timeout=timeout,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# If complete() was called, use that result (preserving tool_calls and turns)
|
|
310
|
+
if completion_result is not None:
|
|
311
|
+
completion_result.tool_calls = result.tool_calls
|
|
312
|
+
completion_result.turns_used = result.turns_used
|
|
313
|
+
return completion_result
|
|
314
|
+
|
|
315
|
+
# Otherwise, return the raw result (might be timeout or natural end)
|
|
316
|
+
return result
|
gobby/llm/factory.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Factory for creating LLM service.
|
|
3
|
+
|
|
4
|
+
Provides factory function for creating LLMService with multi-provider support.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
from gobby.config.app import DaemonConfig
|
|
10
|
+
from gobby.llm.service import LLMService
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def create_llm_service(config: DaemonConfig) -> LLMService:
|
|
16
|
+
"""
|
|
17
|
+
Create an LLM service for multi-provider support.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
config: Client configuration with llm_providers.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
LLMService instance with access to all configured providers.
|
|
24
|
+
|
|
25
|
+
Raises:
|
|
26
|
+
ValueError: If config doesn't have llm_providers set.
|
|
27
|
+
|
|
28
|
+
Example:
|
|
29
|
+
service = create_llm_service(config)
|
|
30
|
+
provider, model, prompt = service.get_provider_for_feature(
|
|
31
|
+
config.session_summary
|
|
32
|
+
)
|
|
33
|
+
"""
|
|
34
|
+
return LLMService(config)
|
gobby/llm/gemini.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Gemini implementation of LLMProvider.
|
|
3
|
+
|
|
4
|
+
Supports two authentication modes:
|
|
5
|
+
- api_key: Use GEMINI_API_KEY environment variable (BYOK)
|
|
6
|
+
- adc: Use Google Application Default Credentials (subscription-based via gcloud auth)
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
from typing import Any, Literal
|
|
13
|
+
|
|
14
|
+
from gobby.config.app import DaemonConfig
|
|
15
|
+
from gobby.llm.base import AuthMode, LLMProvider
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class GeminiProvider(LLMProvider):
|
|
21
|
+
"""
|
|
22
|
+
Gemini implementation of LLMProvider using google-generativeai package.
|
|
23
|
+
|
|
24
|
+
Supports two authentication modes:
|
|
25
|
+
- api_key: Use GEMINI_API_KEY environment variable (BYOK)
|
|
26
|
+
- adc: Use Google Application Default Credentials (run `gcloud auth application-default login`)
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
config: DaemonConfig,
|
|
32
|
+
auth_mode: Literal["api_key", "adc"] | None = None,
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Initialize GeminiProvider.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
config: Client configuration.
|
|
39
|
+
auth_mode: Override auth mode. If None, reads from config.llm_providers.gemini.auth_mode
|
|
40
|
+
or falls back to "api_key".
|
|
41
|
+
"""
|
|
42
|
+
self.config = config
|
|
43
|
+
self.logger = logger
|
|
44
|
+
self.model_summary = None
|
|
45
|
+
self.model_title = None
|
|
46
|
+
|
|
47
|
+
# Determine auth mode from config or parameter
|
|
48
|
+
self._auth_mode: AuthMode = "api_key" # Default
|
|
49
|
+
if auth_mode:
|
|
50
|
+
self._auth_mode = auth_mode
|
|
51
|
+
elif config.llm_providers and config.llm_providers.gemini:
|
|
52
|
+
self._auth_mode = config.llm_providers.gemini.auth_mode
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
import google.generativeai as genai
|
|
56
|
+
|
|
57
|
+
# Initialize based on auth mode
|
|
58
|
+
if self._auth_mode == "adc":
|
|
59
|
+
# Use Application Default Credentials
|
|
60
|
+
# User must run: gcloud auth application-default login
|
|
61
|
+
try:
|
|
62
|
+
import google.auth
|
|
63
|
+
|
|
64
|
+
credentials, project = google.auth.default()
|
|
65
|
+
genai.configure(credentials=credentials)
|
|
66
|
+
self.genai = genai
|
|
67
|
+
self.logger.debug("Gemini initialized with ADC credentials")
|
|
68
|
+
except Exception as e:
|
|
69
|
+
self.logger.error(
|
|
70
|
+
f"Failed to initialize Gemini with ADC: {e}. "
|
|
71
|
+
"Run 'gcloud auth application-default login' to authenticate."
|
|
72
|
+
)
|
|
73
|
+
self.genai = None
|
|
74
|
+
else:
|
|
75
|
+
# Use API key from environment
|
|
76
|
+
api_key = os.environ.get("GEMINI_API_KEY")
|
|
77
|
+
if api_key:
|
|
78
|
+
genai.configure(api_key=api_key)
|
|
79
|
+
self.genai = genai
|
|
80
|
+
self.logger.debug("Gemini initialized with API key")
|
|
81
|
+
else:
|
|
82
|
+
self.logger.warning("GEMINI_API_KEY not found in environment variables.")
|
|
83
|
+
self.genai = None
|
|
84
|
+
|
|
85
|
+
# Initialize models if genai is configured
|
|
86
|
+
if self.genai:
|
|
87
|
+
summary_model_name = self.config.session_summary.model or "gemini-1.5-pro"
|
|
88
|
+
title_model_name = self.config.title_synthesis.model or "gemini-1.5-flash"
|
|
89
|
+
|
|
90
|
+
self.model_summary = genai.GenerativeModel(summary_model_name)
|
|
91
|
+
self.model_title = genai.GenerativeModel(title_model_name)
|
|
92
|
+
|
|
93
|
+
except ImportError:
|
|
94
|
+
self.logger.error(
|
|
95
|
+
"google-generativeai package not found. Please install with `pip install google-generativeai`."
|
|
96
|
+
)
|
|
97
|
+
self.genai = None
|
|
98
|
+
except Exception as e:
|
|
99
|
+
self.logger.error(f"Failed to initialize Gemini client: {e}")
|
|
100
|
+
self.genai = None
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def provider_name(self) -> str:
|
|
104
|
+
"""Return provider name."""
|
|
105
|
+
return "gemini"
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def auth_mode(self) -> AuthMode:
|
|
109
|
+
"""Return the authentication mode for this provider."""
|
|
110
|
+
return self._auth_mode
|
|
111
|
+
|
|
112
|
+
async def generate_summary(
|
|
113
|
+
self, context: dict[str, Any], prompt_template: str | None = None
|
|
114
|
+
) -> str:
|
|
115
|
+
"""
|
|
116
|
+
Generate session summary using Gemini.
|
|
117
|
+
"""
|
|
118
|
+
if not self.genai or not self.model_summary:
|
|
119
|
+
return "Session summary unavailable (Gemini client not initialized)"
|
|
120
|
+
|
|
121
|
+
# Build formatted context for prompt template
|
|
122
|
+
formatted_context = {
|
|
123
|
+
"transcript_summary": context.get("transcript_summary", ""),
|
|
124
|
+
"last_messages": json.dumps(context.get("last_messages", []), indent=2),
|
|
125
|
+
"git_status": context.get("git_status", ""),
|
|
126
|
+
"file_changes": context.get("file_changes", ""),
|
|
127
|
+
**{
|
|
128
|
+
k: v
|
|
129
|
+
for k, v in context.items()
|
|
130
|
+
if k not in ["transcript_summary", "last_messages", "git_status", "file_changes"]
|
|
131
|
+
},
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
# Build prompt - prompt_template is required
|
|
135
|
+
if not prompt_template:
|
|
136
|
+
raise ValueError(
|
|
137
|
+
"prompt_template is required for generate_summary. "
|
|
138
|
+
"Configure 'session_summary.prompt' in ~/.gobby/config.yaml"
|
|
139
|
+
)
|
|
140
|
+
prompt = prompt_template.format(**formatted_context)
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
# Gemini async generation
|
|
144
|
+
response = await self.model_summary.generate_content_async(prompt)
|
|
145
|
+
return response.text or ""
|
|
146
|
+
except Exception as e:
|
|
147
|
+
self.logger.error(f"Failed to generate summary with Gemini: {e}")
|
|
148
|
+
return f"Session summary generation failed: {e}"
|
|
149
|
+
|
|
150
|
+
async def synthesize_title(
|
|
151
|
+
self, user_prompt: str, prompt_template: str | None = None
|
|
152
|
+
) -> str | None:
|
|
153
|
+
"""
|
|
154
|
+
Synthesize session title using Gemini.
|
|
155
|
+
"""
|
|
156
|
+
if not self.genai or not self.model_title:
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
# Build prompt - prompt_template is required
|
|
160
|
+
if not prompt_template:
|
|
161
|
+
raise ValueError(
|
|
162
|
+
"prompt_template is required for synthesize_title. "
|
|
163
|
+
"Configure 'title_synthesis.prompt' in ~/.gobby/config.yaml"
|
|
164
|
+
)
|
|
165
|
+
prompt = prompt_template.format(user_prompt=user_prompt)
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
response = await self.model_title.generate_content_async(prompt)
|
|
169
|
+
return (response.text or "").strip()
|
|
170
|
+
except Exception as e:
|
|
171
|
+
self.logger.error(f"Failed to synthesize title with Gemini: {e}")
|
|
172
|
+
return None
|
|
173
|
+
|
|
174
|
+
async def generate_text(
|
|
175
|
+
self,
|
|
176
|
+
prompt: str,
|
|
177
|
+
system_prompt: str | None = None,
|
|
178
|
+
model: str | None = None,
|
|
179
|
+
) -> str:
|
|
180
|
+
"""
|
|
181
|
+
Generate text using Gemini.
|
|
182
|
+
"""
|
|
183
|
+
if not self.genai:
|
|
184
|
+
return "Generation unavailable (Gemini client not initialized)"
|
|
185
|
+
|
|
186
|
+
model_name = model or "gemini-1.5-flash"
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
# Note: Gemini system prompts are configured at model creation,
|
|
190
|
+
# but simple generation usually just includes it in the prompt or uses default.
|
|
191
|
+
# For simplicity we'll just generate content.
|
|
192
|
+
model_instance = self.genai.GenerativeModel(model_name)
|
|
193
|
+
|
|
194
|
+
full_prompt = prompt
|
|
195
|
+
if system_prompt:
|
|
196
|
+
# Prepend system prompt if provided
|
|
197
|
+
full_prompt = f"{system_prompt}\n\n{prompt}"
|
|
198
|
+
|
|
199
|
+
response = await model_instance.generate_content_async(full_prompt)
|
|
200
|
+
return response.text or ""
|
|
201
|
+
except Exception as e:
|
|
202
|
+
self.logger.error(f"Failed to generate text with Gemini: {e}")
|
|
203
|
+
return f"Generation failed: {e}"
|
|
204
|
+
|
|
205
|
+
async def describe_image(
|
|
206
|
+
self,
|
|
207
|
+
image_path: str,
|
|
208
|
+
context: str | None = None,
|
|
209
|
+
) -> str:
|
|
210
|
+
"""
|
|
211
|
+
Generate a text description of an image using Gemini's vision capabilities.
|
|
212
|
+
|
|
213
|
+
Uses Gemini 1.5 Flash for efficient image description.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
image_path: Path to the image file
|
|
217
|
+
context: Optional context to guide the description
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
Text description of the image
|
|
221
|
+
"""
|
|
222
|
+
from pathlib import Path
|
|
223
|
+
|
|
224
|
+
if not self.genai:
|
|
225
|
+
return "Image description unavailable (Gemini client not initialized)"
|
|
226
|
+
|
|
227
|
+
path = Path(image_path)
|
|
228
|
+
if not path.exists():
|
|
229
|
+
return f"Image not found: {image_path}"
|
|
230
|
+
|
|
231
|
+
try:
|
|
232
|
+
# Use PIL to load the image - Gemini accepts PIL images directly
|
|
233
|
+
from PIL import Image
|
|
234
|
+
|
|
235
|
+
# Use context manager to ensure image file handle is properly closed
|
|
236
|
+
with Image.open(path) as image:
|
|
237
|
+
# Build prompt
|
|
238
|
+
prompt = (
|
|
239
|
+
"Please describe this image in detail, focusing on key visual elements, "
|
|
240
|
+
"any text visible, and the overall context or meaning."
|
|
241
|
+
)
|
|
242
|
+
if context:
|
|
243
|
+
prompt = f"{context}\n\n{prompt}"
|
|
244
|
+
|
|
245
|
+
# Use gemini-1.5-flash for efficient vision tasks
|
|
246
|
+
model = self.genai.GenerativeModel("gemini-1.5-flash")
|
|
247
|
+
|
|
248
|
+
# Generate content with image and prompt
|
|
249
|
+
response = await model.generate_content_async([prompt, image])
|
|
250
|
+
|
|
251
|
+
return response.text or "No description generated"
|
|
252
|
+
|
|
253
|
+
except ImportError:
|
|
254
|
+
self.logger.error("PIL/Pillow not installed. Required for image description.")
|
|
255
|
+
return "Image description unavailable (PIL not installed)"
|
|
256
|
+
except Exception as e:
|
|
257
|
+
self.logger.error(f"Failed to describe image with Gemini: {e}")
|
|
258
|
+
return f"Image description failed: {e}"
|