gobby 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +3 -0
- gobby/adapters/__init__.py +30 -0
- gobby/adapters/base.py +93 -0
- gobby/adapters/claude_code.py +276 -0
- gobby/adapters/codex.py +1292 -0
- gobby/adapters/gemini.py +343 -0
- gobby/agents/__init__.py +37 -0
- gobby/agents/codex_session.py +120 -0
- gobby/agents/constants.py +112 -0
- gobby/agents/context.py +362 -0
- gobby/agents/definitions.py +133 -0
- gobby/agents/gemini_session.py +111 -0
- gobby/agents/registry.py +618 -0
- gobby/agents/runner.py +968 -0
- gobby/agents/session.py +259 -0
- gobby/agents/spawn.py +916 -0
- gobby/agents/spawners/__init__.py +77 -0
- gobby/agents/spawners/base.py +142 -0
- gobby/agents/spawners/cross_platform.py +266 -0
- gobby/agents/spawners/embedded.py +225 -0
- gobby/agents/spawners/headless.py +226 -0
- gobby/agents/spawners/linux.py +125 -0
- gobby/agents/spawners/macos.py +277 -0
- gobby/agents/spawners/windows.py +308 -0
- gobby/agents/tty_config.py +319 -0
- gobby/autonomous/__init__.py +32 -0
- gobby/autonomous/progress_tracker.py +447 -0
- gobby/autonomous/stop_registry.py +269 -0
- gobby/autonomous/stuck_detector.py +383 -0
- gobby/cli/__init__.py +67 -0
- gobby/cli/__main__.py +8 -0
- gobby/cli/agents.py +529 -0
- gobby/cli/artifacts.py +266 -0
- gobby/cli/daemon.py +329 -0
- gobby/cli/extensions.py +526 -0
- gobby/cli/github.py +263 -0
- gobby/cli/init.py +53 -0
- gobby/cli/install.py +614 -0
- gobby/cli/installers/__init__.py +37 -0
- gobby/cli/installers/antigravity.py +65 -0
- gobby/cli/installers/claude.py +363 -0
- gobby/cli/installers/codex.py +192 -0
- gobby/cli/installers/gemini.py +294 -0
- gobby/cli/installers/git_hooks.py +377 -0
- gobby/cli/installers/shared.py +737 -0
- gobby/cli/linear.py +250 -0
- gobby/cli/mcp.py +30 -0
- gobby/cli/mcp_proxy.py +698 -0
- gobby/cli/memory.py +304 -0
- gobby/cli/merge.py +384 -0
- gobby/cli/projects.py +79 -0
- gobby/cli/sessions.py +622 -0
- gobby/cli/tasks/__init__.py +30 -0
- gobby/cli/tasks/_utils.py +658 -0
- gobby/cli/tasks/ai.py +1025 -0
- gobby/cli/tasks/commits.py +169 -0
- gobby/cli/tasks/crud.py +685 -0
- gobby/cli/tasks/deps.py +135 -0
- gobby/cli/tasks/labels.py +63 -0
- gobby/cli/tasks/main.py +273 -0
- gobby/cli/tasks/search.py +178 -0
- gobby/cli/tui.py +34 -0
- gobby/cli/utils.py +513 -0
- gobby/cli/workflows.py +927 -0
- gobby/cli/worktrees.py +481 -0
- gobby/config/__init__.py +129 -0
- gobby/config/app.py +551 -0
- gobby/config/extensions.py +167 -0
- gobby/config/features.py +472 -0
- gobby/config/llm_providers.py +98 -0
- gobby/config/logging.py +66 -0
- gobby/config/mcp.py +346 -0
- gobby/config/persistence.py +247 -0
- gobby/config/servers.py +141 -0
- gobby/config/sessions.py +250 -0
- gobby/config/tasks.py +784 -0
- gobby/hooks/__init__.py +104 -0
- gobby/hooks/artifact_capture.py +213 -0
- gobby/hooks/broadcaster.py +243 -0
- gobby/hooks/event_handlers.py +723 -0
- gobby/hooks/events.py +218 -0
- gobby/hooks/git.py +169 -0
- gobby/hooks/health_monitor.py +171 -0
- gobby/hooks/hook_manager.py +856 -0
- gobby/hooks/hook_types.py +575 -0
- gobby/hooks/plugins.py +813 -0
- gobby/hooks/session_coordinator.py +396 -0
- gobby/hooks/verification_runner.py +268 -0
- gobby/hooks/webhooks.py +339 -0
- gobby/install/claude/commands/gobby/bug.md +51 -0
- gobby/install/claude/commands/gobby/chore.md +51 -0
- gobby/install/claude/commands/gobby/epic.md +52 -0
- gobby/install/claude/commands/gobby/eval.md +235 -0
- gobby/install/claude/commands/gobby/feat.md +49 -0
- gobby/install/claude/commands/gobby/nit.md +52 -0
- gobby/install/claude/commands/gobby/ref.md +52 -0
- gobby/install/claude/hooks/HOOK_SCHEMAS.md +632 -0
- gobby/install/claude/hooks/hook_dispatcher.py +364 -0
- gobby/install/claude/hooks/validate_settings.py +102 -0
- gobby/install/claude/hooks-template.json +118 -0
- gobby/install/codex/hooks/hook_dispatcher.py +153 -0
- gobby/install/codex/prompts/forget.md +7 -0
- gobby/install/codex/prompts/memories.md +7 -0
- gobby/install/codex/prompts/recall.md +7 -0
- gobby/install/codex/prompts/remember.md +13 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +268 -0
- gobby/install/gemini/hooks-template.json +138 -0
- gobby/install/shared/plugins/code_guardian.py +456 -0
- gobby/install/shared/plugins/example_notify.py +331 -0
- gobby/integrations/__init__.py +10 -0
- gobby/integrations/github.py +145 -0
- gobby/integrations/linear.py +145 -0
- gobby/llm/__init__.py +40 -0
- gobby/llm/base.py +120 -0
- gobby/llm/claude.py +578 -0
- gobby/llm/claude_executor.py +503 -0
- gobby/llm/codex.py +322 -0
- gobby/llm/codex_executor.py +513 -0
- gobby/llm/executor.py +316 -0
- gobby/llm/factory.py +34 -0
- gobby/llm/gemini.py +258 -0
- gobby/llm/gemini_executor.py +339 -0
- gobby/llm/litellm.py +287 -0
- gobby/llm/litellm_executor.py +303 -0
- gobby/llm/resolver.py +499 -0
- gobby/llm/service.py +236 -0
- gobby/mcp_proxy/__init__.py +29 -0
- gobby/mcp_proxy/actions.py +175 -0
- gobby/mcp_proxy/daemon_control.py +198 -0
- gobby/mcp_proxy/importer.py +436 -0
- gobby/mcp_proxy/lazy.py +325 -0
- gobby/mcp_proxy/manager.py +798 -0
- gobby/mcp_proxy/metrics.py +609 -0
- gobby/mcp_proxy/models.py +139 -0
- gobby/mcp_proxy/registries.py +215 -0
- gobby/mcp_proxy/schema_hash.py +381 -0
- gobby/mcp_proxy/semantic_search.py +706 -0
- gobby/mcp_proxy/server.py +549 -0
- gobby/mcp_proxy/services/__init__.py +0 -0
- gobby/mcp_proxy/services/fallback.py +306 -0
- gobby/mcp_proxy/services/recommendation.py +224 -0
- gobby/mcp_proxy/services/server_mgmt.py +214 -0
- gobby/mcp_proxy/services/system.py +72 -0
- gobby/mcp_proxy/services/tool_filter.py +231 -0
- gobby/mcp_proxy/services/tool_proxy.py +309 -0
- gobby/mcp_proxy/stdio.py +565 -0
- gobby/mcp_proxy/tools/__init__.py +27 -0
- gobby/mcp_proxy/tools/agents.py +1103 -0
- gobby/mcp_proxy/tools/artifacts.py +207 -0
- gobby/mcp_proxy/tools/hub.py +335 -0
- gobby/mcp_proxy/tools/internal.py +337 -0
- gobby/mcp_proxy/tools/memory.py +543 -0
- gobby/mcp_proxy/tools/merge.py +422 -0
- gobby/mcp_proxy/tools/metrics.py +283 -0
- gobby/mcp_proxy/tools/orchestration/__init__.py +23 -0
- gobby/mcp_proxy/tools/orchestration/cleanup.py +619 -0
- gobby/mcp_proxy/tools/orchestration/monitor.py +380 -0
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +746 -0
- gobby/mcp_proxy/tools/orchestration/review.py +736 -0
- gobby/mcp_proxy/tools/orchestration/utils.py +16 -0
- gobby/mcp_proxy/tools/session_messages.py +1056 -0
- gobby/mcp_proxy/tools/task_dependencies.py +219 -0
- gobby/mcp_proxy/tools/task_expansion.py +591 -0
- gobby/mcp_proxy/tools/task_github.py +393 -0
- gobby/mcp_proxy/tools/task_linear.py +379 -0
- gobby/mcp_proxy/tools/task_orchestration.py +77 -0
- gobby/mcp_proxy/tools/task_readiness.py +522 -0
- gobby/mcp_proxy/tools/task_sync.py +351 -0
- gobby/mcp_proxy/tools/task_validation.py +843 -0
- gobby/mcp_proxy/tools/tasks/__init__.py +25 -0
- gobby/mcp_proxy/tools/tasks/_context.py +112 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +516 -0
- gobby/mcp_proxy/tools/tasks/_factory.py +176 -0
- gobby/mcp_proxy/tools/tasks/_helpers.py +129 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +517 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +301 -0
- gobby/mcp_proxy/tools/tasks/_resolution.py +55 -0
- gobby/mcp_proxy/tools/tasks/_search.py +215 -0
- gobby/mcp_proxy/tools/tasks/_session.py +125 -0
- gobby/mcp_proxy/tools/workflows.py +973 -0
- gobby/mcp_proxy/tools/worktrees.py +1264 -0
- gobby/mcp_proxy/transports/__init__.py +0 -0
- gobby/mcp_proxy/transports/base.py +95 -0
- gobby/mcp_proxy/transports/factory.py +44 -0
- gobby/mcp_proxy/transports/http.py +139 -0
- gobby/mcp_proxy/transports/stdio.py +213 -0
- gobby/mcp_proxy/transports/websocket.py +136 -0
- gobby/memory/backends/__init__.py +116 -0
- gobby/memory/backends/mem0.py +408 -0
- gobby/memory/backends/memu.py +485 -0
- gobby/memory/backends/null.py +111 -0
- gobby/memory/backends/openmemory.py +537 -0
- gobby/memory/backends/sqlite.py +304 -0
- gobby/memory/context.py +87 -0
- gobby/memory/manager.py +1001 -0
- gobby/memory/protocol.py +451 -0
- gobby/memory/search/__init__.py +66 -0
- gobby/memory/search/text.py +127 -0
- gobby/memory/viz.py +258 -0
- gobby/prompts/__init__.py +13 -0
- gobby/prompts/defaults/expansion/system.md +119 -0
- gobby/prompts/defaults/expansion/user.md +48 -0
- gobby/prompts/defaults/external_validation/agent.md +72 -0
- gobby/prompts/defaults/external_validation/external.md +63 -0
- gobby/prompts/defaults/external_validation/spawn.md +83 -0
- gobby/prompts/defaults/external_validation/system.md +6 -0
- gobby/prompts/defaults/features/import_mcp.md +22 -0
- gobby/prompts/defaults/features/import_mcp_github.md +17 -0
- gobby/prompts/defaults/features/import_mcp_search.md +16 -0
- gobby/prompts/defaults/features/recommend_tools.md +32 -0
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +35 -0
- gobby/prompts/defaults/features/recommend_tools_llm.md +30 -0
- gobby/prompts/defaults/features/server_description.md +20 -0
- gobby/prompts/defaults/features/server_description_system.md +6 -0
- gobby/prompts/defaults/features/task_description.md +31 -0
- gobby/prompts/defaults/features/task_description_system.md +6 -0
- gobby/prompts/defaults/features/tool_summary.md +17 -0
- gobby/prompts/defaults/features/tool_summary_system.md +6 -0
- gobby/prompts/defaults/research/step.md +58 -0
- gobby/prompts/defaults/validation/criteria.md +47 -0
- gobby/prompts/defaults/validation/validate.md +38 -0
- gobby/prompts/loader.py +346 -0
- gobby/prompts/models.py +113 -0
- gobby/py.typed +0 -0
- gobby/runner.py +488 -0
- gobby/search/__init__.py +23 -0
- gobby/search/protocol.py +104 -0
- gobby/search/tfidf.py +232 -0
- gobby/servers/__init__.py +7 -0
- gobby/servers/http.py +636 -0
- gobby/servers/models.py +31 -0
- gobby/servers/routes/__init__.py +23 -0
- gobby/servers/routes/admin.py +416 -0
- gobby/servers/routes/dependencies.py +118 -0
- gobby/servers/routes/mcp/__init__.py +24 -0
- gobby/servers/routes/mcp/hooks.py +135 -0
- gobby/servers/routes/mcp/plugins.py +121 -0
- gobby/servers/routes/mcp/tools.py +1337 -0
- gobby/servers/routes/mcp/webhooks.py +159 -0
- gobby/servers/routes/sessions.py +582 -0
- gobby/servers/websocket.py +766 -0
- gobby/sessions/__init__.py +13 -0
- gobby/sessions/analyzer.py +322 -0
- gobby/sessions/lifecycle.py +240 -0
- gobby/sessions/manager.py +563 -0
- gobby/sessions/processor.py +225 -0
- gobby/sessions/summary.py +532 -0
- gobby/sessions/transcripts/__init__.py +41 -0
- gobby/sessions/transcripts/base.py +125 -0
- gobby/sessions/transcripts/claude.py +386 -0
- gobby/sessions/transcripts/codex.py +143 -0
- gobby/sessions/transcripts/gemini.py +195 -0
- gobby/storage/__init__.py +21 -0
- gobby/storage/agents.py +409 -0
- gobby/storage/artifact_classifier.py +341 -0
- gobby/storage/artifacts.py +285 -0
- gobby/storage/compaction.py +67 -0
- gobby/storage/database.py +357 -0
- gobby/storage/inter_session_messages.py +194 -0
- gobby/storage/mcp.py +680 -0
- gobby/storage/memories.py +562 -0
- gobby/storage/merge_resolutions.py +550 -0
- gobby/storage/migrations.py +860 -0
- gobby/storage/migrations_legacy.py +1359 -0
- gobby/storage/projects.py +166 -0
- gobby/storage/session_messages.py +251 -0
- gobby/storage/session_tasks.py +97 -0
- gobby/storage/sessions.py +817 -0
- gobby/storage/task_dependencies.py +223 -0
- gobby/storage/tasks/__init__.py +42 -0
- gobby/storage/tasks/_aggregates.py +180 -0
- gobby/storage/tasks/_crud.py +449 -0
- gobby/storage/tasks/_id.py +104 -0
- gobby/storage/tasks/_lifecycle.py +311 -0
- gobby/storage/tasks/_manager.py +889 -0
- gobby/storage/tasks/_models.py +300 -0
- gobby/storage/tasks/_ordering.py +119 -0
- gobby/storage/tasks/_path_cache.py +110 -0
- gobby/storage/tasks/_queries.py +343 -0
- gobby/storage/tasks/_search.py +143 -0
- gobby/storage/workflow_audit.py +393 -0
- gobby/storage/worktrees.py +547 -0
- gobby/sync/__init__.py +29 -0
- gobby/sync/github.py +333 -0
- gobby/sync/linear.py +304 -0
- gobby/sync/memories.py +284 -0
- gobby/sync/tasks.py +641 -0
- gobby/tasks/__init__.py +8 -0
- gobby/tasks/build_verification.py +193 -0
- gobby/tasks/commits.py +633 -0
- gobby/tasks/context.py +747 -0
- gobby/tasks/criteria.py +342 -0
- gobby/tasks/enhanced_validator.py +226 -0
- gobby/tasks/escalation.py +263 -0
- gobby/tasks/expansion.py +626 -0
- gobby/tasks/external_validator.py +764 -0
- gobby/tasks/issue_extraction.py +171 -0
- gobby/tasks/prompts/expand.py +327 -0
- gobby/tasks/research.py +421 -0
- gobby/tasks/tdd.py +352 -0
- gobby/tasks/tree_builder.py +263 -0
- gobby/tasks/validation.py +712 -0
- gobby/tasks/validation_history.py +357 -0
- gobby/tasks/validation_models.py +89 -0
- gobby/tools/__init__.py +0 -0
- gobby/tools/summarizer.py +170 -0
- gobby/tui/__init__.py +5 -0
- gobby/tui/api_client.py +281 -0
- gobby/tui/app.py +327 -0
- gobby/tui/screens/__init__.py +25 -0
- gobby/tui/screens/agents.py +333 -0
- gobby/tui/screens/chat.py +450 -0
- gobby/tui/screens/dashboard.py +377 -0
- gobby/tui/screens/memory.py +305 -0
- gobby/tui/screens/metrics.py +231 -0
- gobby/tui/screens/orchestrator.py +904 -0
- gobby/tui/screens/sessions.py +412 -0
- gobby/tui/screens/tasks.py +442 -0
- gobby/tui/screens/workflows.py +289 -0
- gobby/tui/screens/worktrees.py +174 -0
- gobby/tui/widgets/__init__.py +21 -0
- gobby/tui/widgets/chat.py +210 -0
- gobby/tui/widgets/conductor.py +104 -0
- gobby/tui/widgets/menu.py +132 -0
- gobby/tui/widgets/message_panel.py +160 -0
- gobby/tui/widgets/review_gate.py +224 -0
- gobby/tui/widgets/task_tree.py +99 -0
- gobby/tui/widgets/token_budget.py +166 -0
- gobby/tui/ws_client.py +258 -0
- gobby/utils/__init__.py +3 -0
- gobby/utils/daemon_client.py +235 -0
- gobby/utils/git.py +222 -0
- gobby/utils/id.py +38 -0
- gobby/utils/json_helpers.py +161 -0
- gobby/utils/logging.py +376 -0
- gobby/utils/machine_id.py +135 -0
- gobby/utils/metrics.py +589 -0
- gobby/utils/project_context.py +182 -0
- gobby/utils/project_init.py +263 -0
- gobby/utils/status.py +256 -0
- gobby/utils/validation.py +80 -0
- gobby/utils/version.py +23 -0
- gobby/workflows/__init__.py +4 -0
- gobby/workflows/actions.py +1310 -0
- gobby/workflows/approval_flow.py +138 -0
- gobby/workflows/artifact_actions.py +103 -0
- gobby/workflows/audit_helpers.py +110 -0
- gobby/workflows/autonomous_actions.py +286 -0
- gobby/workflows/context_actions.py +394 -0
- gobby/workflows/definitions.py +130 -0
- gobby/workflows/detection_helpers.py +208 -0
- gobby/workflows/engine.py +485 -0
- gobby/workflows/evaluator.py +669 -0
- gobby/workflows/git_utils.py +96 -0
- gobby/workflows/hooks.py +169 -0
- gobby/workflows/lifecycle_evaluator.py +613 -0
- gobby/workflows/llm_actions.py +70 -0
- gobby/workflows/loader.py +333 -0
- gobby/workflows/mcp_actions.py +60 -0
- gobby/workflows/memory_actions.py +272 -0
- gobby/workflows/premature_stop.py +164 -0
- gobby/workflows/session_actions.py +139 -0
- gobby/workflows/state_actions.py +123 -0
- gobby/workflows/state_manager.py +104 -0
- gobby/workflows/stop_signal_actions.py +163 -0
- gobby/workflows/summary_actions.py +344 -0
- gobby/workflows/task_actions.py +249 -0
- gobby/workflows/task_enforcement_actions.py +901 -0
- gobby/workflows/templates.py +52 -0
- gobby/workflows/todo_actions.py +84 -0
- gobby/workflows/webhook.py +223 -0
- gobby/workflows/webhook_executor.py +399 -0
- gobby/worktrees/__init__.py +5 -0
- gobby/worktrees/git.py +690 -0
- gobby/worktrees/merge/__init__.py +20 -0
- gobby/worktrees/merge/conflict_parser.py +177 -0
- gobby/worktrees/merge/resolver.py +485 -0
- gobby-0.2.5.dist-info/METADATA +351 -0
- gobby-0.2.5.dist-info/RECORD +383 -0
- gobby-0.2.5.dist-info/WHEEL +5 -0
- gobby-0.2.5.dist-info/entry_points.txt +2 -0
- gobby-0.2.5.dist-info/licenses/LICENSE.md +193 -0
- gobby-0.2.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
"""Summary generation workflow actions.
|
|
2
|
+
|
|
3
|
+
Extracted from actions.py as part of strangler fig decomposition.
|
|
4
|
+
These functions handle session summary generation, title synthesis, and handoff creation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Literal
|
|
13
|
+
|
|
14
|
+
from gobby.workflows.git_utils import get_file_changes, get_git_status
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def format_turns_for_llm(turns: list[dict[str, Any]]) -> str:
|
|
20
|
+
"""Format transcript turns for LLM analysis.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
turns: List of transcript turn dicts
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Formatted string with turn summaries
|
|
27
|
+
"""
|
|
28
|
+
formatted: list[str] = []
|
|
29
|
+
for i, turn in enumerate(turns):
|
|
30
|
+
message = turn.get("message", {})
|
|
31
|
+
role = message.get("role", "unknown")
|
|
32
|
+
content = message.get("content", "")
|
|
33
|
+
|
|
34
|
+
# Assistant messages have content as array of blocks
|
|
35
|
+
if isinstance(content, list):
|
|
36
|
+
text_parts: list[str] = []
|
|
37
|
+
for block in content:
|
|
38
|
+
if isinstance(block, dict):
|
|
39
|
+
if block.get("type") == "text":
|
|
40
|
+
text_parts.append(block.get("text", ""))
|
|
41
|
+
elif block.get("type") == "thinking":
|
|
42
|
+
text_parts.append(f"[Thinking: {block.get('thinking', '')}]")
|
|
43
|
+
elif block.get("type") == "tool_use":
|
|
44
|
+
text_parts.append(f"[Tool: {block.get('name', 'unknown')}]")
|
|
45
|
+
content = " ".join(text_parts)
|
|
46
|
+
|
|
47
|
+
formatted.append(f"[Turn {i + 1} - {role}]: {content}")
|
|
48
|
+
|
|
49
|
+
return "\n\n".join(formatted)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def extract_todowrite_state(turns: list[dict[str, Any]]) -> str:
|
|
53
|
+
"""Extract the last TodoWrite tool call's todos list from transcript.
|
|
54
|
+
|
|
55
|
+
Scans turns in reverse to find the most recent TodoWrite tool call
|
|
56
|
+
and formats it as a markdown checklist.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
turns: List of transcript turns
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Formatted markdown string with todo list, or empty string if not found
|
|
63
|
+
"""
|
|
64
|
+
for turn in reversed(turns):
|
|
65
|
+
message = turn.get("message", {})
|
|
66
|
+
content = message.get("content", [])
|
|
67
|
+
|
|
68
|
+
if isinstance(content, list):
|
|
69
|
+
for block in content:
|
|
70
|
+
if isinstance(block, dict) and block.get("type") == "tool_use":
|
|
71
|
+
if block.get("name") == "TodoWrite":
|
|
72
|
+
tool_input = block.get("input", {})
|
|
73
|
+
todos = tool_input.get("todos", [])
|
|
74
|
+
|
|
75
|
+
if not todos:
|
|
76
|
+
return ""
|
|
77
|
+
|
|
78
|
+
# Format as markdown checklist
|
|
79
|
+
lines: list[str] = []
|
|
80
|
+
for todo in todos:
|
|
81
|
+
content_text = todo.get("content", "")
|
|
82
|
+
status = todo.get("status", "pending")
|
|
83
|
+
|
|
84
|
+
# Map status to checkbox style
|
|
85
|
+
if status == "completed":
|
|
86
|
+
checkbox = "[x]"
|
|
87
|
+
elif status == "in_progress":
|
|
88
|
+
checkbox = "[>]"
|
|
89
|
+
else:
|
|
90
|
+
checkbox = "[ ]"
|
|
91
|
+
|
|
92
|
+
lines.append(f"- {checkbox} {content_text}")
|
|
93
|
+
|
|
94
|
+
return "\n".join(lines)
|
|
95
|
+
|
|
96
|
+
return ""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
async def synthesize_title(
|
|
100
|
+
session_manager: Any,
|
|
101
|
+
session_id: str,
|
|
102
|
+
llm_service: Any,
|
|
103
|
+
transcript_processor: Any,
|
|
104
|
+
template_engine: Any,
|
|
105
|
+
template: str | None = None,
|
|
106
|
+
prompt: str | None = None,
|
|
107
|
+
) -> dict[str, Any] | None:
|
|
108
|
+
"""Synthesize and set a session title.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
session_manager: The session manager instance
|
|
112
|
+
session_id: Current session ID
|
|
113
|
+
llm_service: LLM service instance
|
|
114
|
+
transcript_processor: Transcript processor instance
|
|
115
|
+
template_engine: Template engine for rendering
|
|
116
|
+
template: Optional prompt template
|
|
117
|
+
prompt: Optional user prompt to generate title from (preferred over transcript)
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Dict with title_synthesized or error
|
|
121
|
+
"""
|
|
122
|
+
if not llm_service:
|
|
123
|
+
return {"error": "Missing LLM service"}
|
|
124
|
+
|
|
125
|
+
current_session = session_manager.get(session_id)
|
|
126
|
+
if not current_session:
|
|
127
|
+
return {"error": "Session not found"}
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
# If prompt provided directly, use it (preferred path)
|
|
131
|
+
if prompt:
|
|
132
|
+
llm_prompt = (
|
|
133
|
+
"Create a short title (3-5 words) for this coding session based on "
|
|
134
|
+
"the user's first message. Output ONLY the title, no quotes or explanation.\n\n"
|
|
135
|
+
f"User message: {prompt}"
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
# Fall back to reading transcript
|
|
139
|
+
transcript_path = getattr(current_session, "jsonl_path", None)
|
|
140
|
+
if not transcript_path:
|
|
141
|
+
return {"error": "No transcript path and no prompt provided"}
|
|
142
|
+
|
|
143
|
+
turns = []
|
|
144
|
+
path = Path(transcript_path)
|
|
145
|
+
if path.exists():
|
|
146
|
+
with open(path, encoding="utf-8") as f:
|
|
147
|
+
for i, line in enumerate(f):
|
|
148
|
+
if i >= 20:
|
|
149
|
+
break
|
|
150
|
+
if line.strip():
|
|
151
|
+
turns.append(json.loads(line))
|
|
152
|
+
|
|
153
|
+
if not turns:
|
|
154
|
+
return {"error": "Empty transcript"}
|
|
155
|
+
|
|
156
|
+
formatted_turns = format_turns_for_llm(turns)
|
|
157
|
+
|
|
158
|
+
if not template:
|
|
159
|
+
template = (
|
|
160
|
+
"Create a short, concise title (3-5 words) for this coding session "
|
|
161
|
+
"based on the transcript.\n\nTranscript:\n{{ transcript }}"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
llm_prompt = template_engine.render(template, {"transcript": formatted_turns})
|
|
165
|
+
|
|
166
|
+
provider = llm_service.get_default_provider()
|
|
167
|
+
title = await provider.generate_text(llm_prompt)
|
|
168
|
+
|
|
169
|
+
# Clean title (remove quotes, etc)
|
|
170
|
+
title = title.strip().strip('"').strip("'")
|
|
171
|
+
|
|
172
|
+
session_manager.update_title(session_id, title)
|
|
173
|
+
return {"title_synthesized": title}
|
|
174
|
+
|
|
175
|
+
except Exception as e:
|
|
176
|
+
logger.error(f"synthesize_title: Failed: {e}")
|
|
177
|
+
return {"error": str(e)}
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
async def generate_summary(
|
|
181
|
+
session_manager: Any,
|
|
182
|
+
session_id: str,
|
|
183
|
+
llm_service: Any,
|
|
184
|
+
transcript_processor: Any,
|
|
185
|
+
template: str | None = None,
|
|
186
|
+
previous_summary: str | None = None,
|
|
187
|
+
mode: Literal["clear", "compact"] = "clear",
|
|
188
|
+
) -> dict[str, Any] | None:
|
|
189
|
+
"""Generate a session summary using LLM and store it in the session record.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
session_manager: The session manager instance
|
|
193
|
+
session_id: Current session ID
|
|
194
|
+
llm_service: LLM service instance
|
|
195
|
+
transcript_processor: Transcript processor instance
|
|
196
|
+
template: Optional prompt template
|
|
197
|
+
previous_summary: Previous summary_markdown for cumulative compression (compact mode)
|
|
198
|
+
mode: "clear" or "compact" - passed to LLM context to control summarization density
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Dict with summary_generated and summary_length, or error
|
|
202
|
+
|
|
203
|
+
Raises:
|
|
204
|
+
ValueError: If mode is not "clear" or "compact"
|
|
205
|
+
"""
|
|
206
|
+
# Validate mode parameter
|
|
207
|
+
valid_modes = {"clear", "compact"}
|
|
208
|
+
if mode not in valid_modes:
|
|
209
|
+
raise ValueError(f"Invalid mode '{mode}'. Must be one of: {', '.join(sorted(valid_modes))}")
|
|
210
|
+
|
|
211
|
+
if not llm_service or not transcript_processor:
|
|
212
|
+
logger.warning("generate_summary: Missing LLM service or transcript processor")
|
|
213
|
+
return {"error": "Missing services"}
|
|
214
|
+
|
|
215
|
+
current_session = session_manager.get(session_id)
|
|
216
|
+
if not current_session:
|
|
217
|
+
return {"error": "Session not found"}
|
|
218
|
+
|
|
219
|
+
transcript_path = getattr(current_session, "jsonl_path", None)
|
|
220
|
+
if not transcript_path:
|
|
221
|
+
logger.warning(f"generate_summary: No transcript path for session {session_id}")
|
|
222
|
+
return {"error": "No transcript path"}
|
|
223
|
+
|
|
224
|
+
if not template:
|
|
225
|
+
template = (
|
|
226
|
+
"Summarize this session, focusing on what was accomplished, "
|
|
227
|
+
"key decisions, and what is left to do.\n\n"
|
|
228
|
+
"Transcript:\n{transcript_summary}"
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
# 1. Process Transcript
|
|
232
|
+
try:
|
|
233
|
+
transcript_file = Path(transcript_path)
|
|
234
|
+
if not transcript_file.exists():
|
|
235
|
+
logger.warning(f"Transcript file not found: {transcript_path}")
|
|
236
|
+
return {"error": "Transcript not found"}
|
|
237
|
+
|
|
238
|
+
turns = []
|
|
239
|
+
with open(transcript_file) as f:
|
|
240
|
+
for line in f:
|
|
241
|
+
if line.strip():
|
|
242
|
+
turns.append(json.loads(line))
|
|
243
|
+
|
|
244
|
+
# Turn extraction is deliberately mode-agnostic: we always extract the most
|
|
245
|
+
# recent turns since the last /clear and let the prompt control summarization
|
|
246
|
+
# density. The mode parameter is passed to the LLM context where the template
|
|
247
|
+
# can adjust output format (e.g., compact mode may instruct denser summaries).
|
|
248
|
+
recent_turns = transcript_processor.extract_turns_since_clear(turns, max_turns=50)
|
|
249
|
+
|
|
250
|
+
# Format turns for LLM
|
|
251
|
+
transcript_summary = format_turns_for_llm(recent_turns)
|
|
252
|
+
except Exception as e:
|
|
253
|
+
logger.error(f"Failed to process transcript: {e}")
|
|
254
|
+
return {"error": str(e)}
|
|
255
|
+
|
|
256
|
+
# 2. Gather context variables for template
|
|
257
|
+
last_messages = transcript_processor.extract_last_messages(recent_turns, num_pairs=2)
|
|
258
|
+
last_messages_str = format_turns_for_llm(last_messages) if last_messages else ""
|
|
259
|
+
|
|
260
|
+
# Get git status and file changes
|
|
261
|
+
git_status = get_git_status()
|
|
262
|
+
file_changes = get_file_changes()
|
|
263
|
+
|
|
264
|
+
# Extract TodoWrite state from transcript
|
|
265
|
+
todo_list = extract_todowrite_state(recent_turns)
|
|
266
|
+
|
|
267
|
+
# 3. Call LLM
|
|
268
|
+
try:
|
|
269
|
+
llm_context = {
|
|
270
|
+
"turns": recent_turns,
|
|
271
|
+
"transcript_summary": transcript_summary,
|
|
272
|
+
"session": current_session,
|
|
273
|
+
"last_messages": last_messages_str,
|
|
274
|
+
"git_status": git_status,
|
|
275
|
+
"file_changes": file_changes,
|
|
276
|
+
"todo_list": f"## Agent's TODO List\n{todo_list}" if todo_list else "",
|
|
277
|
+
"previous_summary": previous_summary or "",
|
|
278
|
+
"mode": mode,
|
|
279
|
+
}
|
|
280
|
+
provider = llm_service.get_default_provider()
|
|
281
|
+
summary_content = await provider.generate_summary(
|
|
282
|
+
context=llm_context,
|
|
283
|
+
prompt_template=template,
|
|
284
|
+
)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
logger.error(f"LLM generation failed: {e}")
|
|
287
|
+
return {"error": f"LLM error: {e}"}
|
|
288
|
+
|
|
289
|
+
# 4. Save to session
|
|
290
|
+
session_manager.update_summary(session_id, summary_markdown=summary_content)
|
|
291
|
+
|
|
292
|
+
logger.info(f"Generated summary for session {session_id} (mode={mode})")
|
|
293
|
+
return {"summary_generated": True, "summary_length": len(summary_content)}
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
async def generate_handoff(
|
|
297
|
+
session_manager: Any,
|
|
298
|
+
session_id: str,
|
|
299
|
+
llm_service: Any,
|
|
300
|
+
transcript_processor: Any,
|
|
301
|
+
template: str | None = None,
|
|
302
|
+
previous_summary: str | None = None,
|
|
303
|
+
mode: Literal["clear", "compact"] = "clear",
|
|
304
|
+
) -> dict[str, Any] | None:
|
|
305
|
+
"""Generate a handoff record by summarizing the session.
|
|
306
|
+
|
|
307
|
+
This is a convenience action that combines generate_summary + mark status.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
session_manager: The session manager instance
|
|
311
|
+
session_id: Current session ID
|
|
312
|
+
llm_service: LLM service instance
|
|
313
|
+
transcript_processor: Transcript processor instance
|
|
314
|
+
template: Optional prompt template
|
|
315
|
+
previous_summary: Previous summary for cumulative compression (compact mode)
|
|
316
|
+
mode: "clear" or "compact"
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Dict with handoff_created and summary_length, or error
|
|
320
|
+
|
|
321
|
+
Raises:
|
|
322
|
+
ValueError: If mode is not "clear" or "compact" (via generate_summary)
|
|
323
|
+
"""
|
|
324
|
+
# Reuse generate_summary logic
|
|
325
|
+
summary_result = await generate_summary(
|
|
326
|
+
session_manager=session_manager,
|
|
327
|
+
session_id=session_id,
|
|
328
|
+
llm_service=llm_service,
|
|
329
|
+
transcript_processor=transcript_processor,
|
|
330
|
+
template=template,
|
|
331
|
+
previous_summary=previous_summary,
|
|
332
|
+
mode=mode,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
if summary_result and "error" in summary_result:
|
|
336
|
+
return summary_result
|
|
337
|
+
|
|
338
|
+
# Mark Session Status
|
|
339
|
+
session_manager.update_status(session_id, "handoff_ready")
|
|
340
|
+
|
|
341
|
+
if not summary_result:
|
|
342
|
+
return {"error": "Failed to generate summary"}
|
|
343
|
+
|
|
344
|
+
return {"handoff_created": True, "summary_length": summary_result.get("summary_length", 0)}
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
"""Workflow-Task integration module.
|
|
2
|
+
|
|
3
|
+
Provides functions for integrating the task system with the workflow engine:
|
|
4
|
+
- persist_decomposed_tasks(): Create tasks from workflow decomposition with ID mapping
|
|
5
|
+
- update_task_from_workflow(): Update task fields from workflow state
|
|
6
|
+
- get_workflow_tasks(): Retrieve tasks for a workflow state
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from gobby.storage.database import DatabaseProtocol
|
|
13
|
+
from gobby.storage.tasks import LocalTaskManager, Task
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def persist_decomposed_tasks(
|
|
19
|
+
db: DatabaseProtocol,
|
|
20
|
+
project_id: str,
|
|
21
|
+
tasks_data: list[dict[str, Any]],
|
|
22
|
+
workflow_name: str,
|
|
23
|
+
parent_task_id: str | None = None,
|
|
24
|
+
created_in_session_id: str | None = None,
|
|
25
|
+
) -> dict[str, str]:
|
|
26
|
+
"""Persist a list of decomposed tasks to the database with ID mapping.
|
|
27
|
+
|
|
28
|
+
Takes task data from workflow decomposition (e.g., from LLM output) and creates
|
|
29
|
+
persistent tasks in the database. Returns a mapping from original task references
|
|
30
|
+
(e.g., "1", "task_1") to the generated database IDs.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
db: LocalDatabase instance
|
|
34
|
+
project_id: Project ID to create tasks in
|
|
35
|
+
tasks_data: List of task dicts from decomposition, each with:
|
|
36
|
+
- id (str/int): Original reference ID (optional, uses index if missing)
|
|
37
|
+
- title or description (str): Task title (required)
|
|
38
|
+
- verification (str): How to verify completion (optional)
|
|
39
|
+
- priority (int): Task priority 1-3 (optional, default 2)
|
|
40
|
+
- labels (list[str]): Task labels (optional)
|
|
41
|
+
workflow_name: Name of the workflow these tasks belong to
|
|
42
|
+
parent_task_id: Optional parent task ID for all created tasks
|
|
43
|
+
created_in_session_id: Optional session ID where task was created
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Dict mapping original task references to database task UUIDs.
|
|
47
|
+
Example: {"1": "550e8400-e29b-41d4-a716-446655440000", "2": "6ba7b810-9dad-11d1-80b4-00c04fd430c8"}
|
|
48
|
+
|
|
49
|
+
Raises:
|
|
50
|
+
ValueError: If no tasks provided or tasks_data is invalid
|
|
51
|
+
"""
|
|
52
|
+
if not tasks_data:
|
|
53
|
+
raise ValueError("No tasks provided for persistence")
|
|
54
|
+
|
|
55
|
+
task_manager = LocalTaskManager(db)
|
|
56
|
+
id_mapping: dict[str, str] = {}
|
|
57
|
+
|
|
58
|
+
for index, task_data in enumerate(tasks_data):
|
|
59
|
+
# Get original reference ID (could be int from LLM JSON)
|
|
60
|
+
original_id = str(task_data.get("id", index + 1))
|
|
61
|
+
|
|
62
|
+
# Get title - support both 'title' and 'description' keys
|
|
63
|
+
title = task_data.get("title") or task_data.get("description")
|
|
64
|
+
if not title:
|
|
65
|
+
logger.warning(f"Skipping task {original_id}: no title or description")
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
# Extract other fields
|
|
69
|
+
verification = task_data.get("verification")
|
|
70
|
+
priority = task_data.get("priority", 2)
|
|
71
|
+
labels = task_data.get("labels", [])
|
|
72
|
+
description = task_data.get("description")
|
|
73
|
+
|
|
74
|
+
# Don't use description as both title and description
|
|
75
|
+
if description == title:
|
|
76
|
+
description = None
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
task = task_manager.create_task(
|
|
80
|
+
project_id=project_id,
|
|
81
|
+
title=title,
|
|
82
|
+
description=description,
|
|
83
|
+
priority=priority,
|
|
84
|
+
labels=labels,
|
|
85
|
+
parent_task_id=parent_task_id,
|
|
86
|
+
created_in_session_id=created_in_session_id,
|
|
87
|
+
workflow_name=workflow_name,
|
|
88
|
+
verification=verification,
|
|
89
|
+
sequence_order=index,
|
|
90
|
+
)
|
|
91
|
+
id_mapping[original_id] = task.id
|
|
92
|
+
logger.debug(f"Created task {task.id} from decomposition ref {original_id}")
|
|
93
|
+
except Exception as e:
|
|
94
|
+
logger.error(f"Failed to create task for ref {original_id}: {e}")
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
logger.info(
|
|
98
|
+
f"Persisted {len(id_mapping)} tasks for workflow '{workflow_name}' in project {project_id}"
|
|
99
|
+
)
|
|
100
|
+
return id_mapping
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def update_task_from_workflow(
|
|
104
|
+
db: DatabaseProtocol,
|
|
105
|
+
task_id: str,
|
|
106
|
+
status: str | None = None,
|
|
107
|
+
verification: str | None = None,
|
|
108
|
+
validation_status: str | None = None,
|
|
109
|
+
validation_feedback: str | None = None,
|
|
110
|
+
) -> Task | None:
|
|
111
|
+
"""Update a task based on workflow state changes.
|
|
112
|
+
|
|
113
|
+
Called when workflow transitions or verifications occur to update the
|
|
114
|
+
corresponding task record.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
db: LocalDatabase instance
|
|
118
|
+
task_id: ID of the task to update
|
|
119
|
+
status: New status ('open', 'in_progress', 'closed')
|
|
120
|
+
verification: Updated verification instructions/result
|
|
121
|
+
validation_status: Validation status ('pending', 'valid', 'invalid')
|
|
122
|
+
validation_feedback: Feedback from validation
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Updated Task object, or None if task not found
|
|
126
|
+
"""
|
|
127
|
+
task_manager = LocalTaskManager(db)
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
# Build update kwargs only for provided values
|
|
131
|
+
update_kwargs: dict[str, Any] = {}
|
|
132
|
+
if status is not None:
|
|
133
|
+
update_kwargs["status"] = status
|
|
134
|
+
if verification is not None:
|
|
135
|
+
update_kwargs["verification"] = verification
|
|
136
|
+
if validation_status is not None:
|
|
137
|
+
update_kwargs["validation_status"] = validation_status
|
|
138
|
+
if validation_feedback is not None:
|
|
139
|
+
update_kwargs["validation_feedback"] = validation_feedback
|
|
140
|
+
|
|
141
|
+
if not update_kwargs:
|
|
142
|
+
# No updates to apply, just return current task
|
|
143
|
+
return task_manager.get_task(task_id)
|
|
144
|
+
|
|
145
|
+
task = task_manager.update_task(task_id, **update_kwargs)
|
|
146
|
+
logger.debug(f"Updated task {task_id} from workflow: {list(update_kwargs.keys())}")
|
|
147
|
+
return task
|
|
148
|
+
|
|
149
|
+
except ValueError as e:
|
|
150
|
+
logger.warning(f"Task {task_id} not found for workflow update: {e}")
|
|
151
|
+
return None
|
|
152
|
+
except Exception as e:
|
|
153
|
+
logger.error(f"Failed to update task {task_id} from workflow: {e}")
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def get_workflow_tasks(
|
|
158
|
+
db: DatabaseProtocol,
|
|
159
|
+
workflow_name: str,
|
|
160
|
+
project_id: str | None = None,
|
|
161
|
+
include_closed: bool = False,
|
|
162
|
+
) -> list[Task]:
|
|
163
|
+
"""Retrieve all tasks associated with a workflow state.
|
|
164
|
+
|
|
165
|
+
Returns tasks ordered by sequence_order for workflows that use ordered task
|
|
166
|
+
execution (like plan-to-tasks).
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
db: LocalDatabase instance
|
|
170
|
+
workflow_name: Name of the workflow to get tasks for
|
|
171
|
+
project_id: Optional project ID filter
|
|
172
|
+
include_closed: If True, include closed tasks; otherwise only open/in_progress
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
List of Task objects ordered by sequence_order, then created_at
|
|
176
|
+
"""
|
|
177
|
+
task_manager = LocalTaskManager(db)
|
|
178
|
+
|
|
179
|
+
# Determine status filter
|
|
180
|
+
status = None if include_closed else None # We'll handle in the method
|
|
181
|
+
|
|
182
|
+
tasks = task_manager.list_workflow_tasks(
|
|
183
|
+
workflow_name=workflow_name,
|
|
184
|
+
project_id=project_id,
|
|
185
|
+
status=status,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
# Filter out closed if not including them
|
|
189
|
+
if not include_closed:
|
|
190
|
+
tasks = [t for t in tasks if t.status != "closed"]
|
|
191
|
+
|
|
192
|
+
logger.debug(f"Retrieved {len(tasks)} tasks for workflow '{workflow_name}'")
|
|
193
|
+
return tasks
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def get_next_workflow_task(
|
|
197
|
+
db: DatabaseProtocol,
|
|
198
|
+
workflow_name: str,
|
|
199
|
+
project_id: str | None = None,
|
|
200
|
+
) -> Task | None:
|
|
201
|
+
"""Get the next task to work on for a workflow.
|
|
202
|
+
|
|
203
|
+
Returns the first open task by sequence_order that hasn't been started.
|
|
204
|
+
Useful for workflows that execute tasks sequentially.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
db: LocalDatabase instance
|
|
208
|
+
workflow_name: Name of the workflow
|
|
209
|
+
project_id: Optional project ID filter
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Next Task to work on, or None if all tasks are complete
|
|
213
|
+
"""
|
|
214
|
+
task_manager = LocalTaskManager(db)
|
|
215
|
+
|
|
216
|
+
tasks = task_manager.list_workflow_tasks(
|
|
217
|
+
workflow_name=workflow_name,
|
|
218
|
+
project_id=project_id,
|
|
219
|
+
status="open",
|
|
220
|
+
limit=1,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if tasks:
|
|
224
|
+
return tasks[0]
|
|
225
|
+
return None
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def mark_workflow_task_complete(
|
|
229
|
+
db: DatabaseProtocol,
|
|
230
|
+
task_id: str,
|
|
231
|
+
verification_result: str | None = None,
|
|
232
|
+
) -> Task | None:
|
|
233
|
+
"""Mark a workflow task as complete with optional verification result.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
db: LocalDatabase instance
|
|
237
|
+
task_id: ID of the task to complete
|
|
238
|
+
verification_result: Optional result/notes from verification
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
Updated Task object, or None if task not found
|
|
242
|
+
"""
|
|
243
|
+
return update_task_from_workflow(
|
|
244
|
+
db=db,
|
|
245
|
+
task_id=task_id,
|
|
246
|
+
status="closed",
|
|
247
|
+
verification=verification_result,
|
|
248
|
+
validation_status="valid",
|
|
249
|
+
)
|