draft-board 0.1.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/app/backend/.env.example +9 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_patch.txt +195 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_stat.txt +6 -0
- package/app/backend/CURL_EXAMPLES.md +335 -0
- package/app/backend/ENV_SETUP.md +65 -0
- package/app/backend/alembic/env.py +71 -0
- package/app/backend/alembic/script.py.mako +28 -0
- package/app/backend/alembic/versions/001_initial_schema.py +104 -0
- package/app/backend/alembic/versions/002_add_jobs_table.py +52 -0
- package/app/backend/alembic/versions/003_add_workspace_table.py +48 -0
- package/app/backend/alembic/versions/004_add_evidence_table.py +56 -0
- package/app/backend/alembic/versions/005_add_verification_commands.py +32 -0
- package/app/backend/alembic/versions/006_add_planner_lock_table.py +39 -0
- package/app/backend/alembic/versions/007_add_revision_review_tables.py +126 -0
- package/app/backend/alembic/versions/008_add_revision_idempotency_and_traceability.py +52 -0
- package/app/backend/alembic/versions/009_add_job_health_fields.py +46 -0
- package/app/backend/alembic/versions/010_add_review_comment_line_content.py +36 -0
- package/app/backend/alembic/versions/011_add_analysis_cache.py +47 -0
- package/app/backend/alembic/versions/012_add_boards_table.py +102 -0
- package/app/backend/alembic/versions/013_add_ticket_blocking.py +45 -0
- package/app/backend/alembic/versions/014_add_agent_sessions.py +220 -0
- package/app/backend/alembic/versions/015_add_ticket_sort_order.py +33 -0
- package/app/backend/alembic/versions/03220f0b93ae_add_pr_fields_to_ticket.py +49 -0
- package/app/backend/alembic/versions/0c2d89fff3b1_seed_board_configs_from_yaml.py +206 -0
- package/app/backend/alembic/versions/3348e5cf54c1_add_merge_checklist_table.py +67 -0
- package/app/backend/alembic/versions/357c780ee445_add_goal_status.py +34 -0
- package/app/backend/alembic/versions/553340b7e26c_add_autonomy_fields_to_goal.py +65 -0
- package/app/backend/alembic/versions/774dc335c679_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/7b307e847cbd_merge_heads.py +23 -0
- package/app/backend/alembic/versions/82ecd978cc70_add_missing_indexes.py +48 -0
- package/app/backend/alembic/versions/8ef5054dc280_add_normalized_log_entries.py +173 -0
- package/app/backend/alembic/versions/8f3e2bd8ea3b_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/9d17f0698d3b_add_config_column_to_boards_table.py +30 -0
- package/app/backend/alembic/versions/add_agent_conversation_history.py +72 -0
- package/app/backend/alembic/versions/add_job_variant.py +34 -0
- package/app/backend/alembic/versions/add_performance_indexes.py +95 -0
- package/app/backend/alembic/versions/add_repos_and_board_repos.py +174 -0
- package/app/backend/alembic/versions/add_session_id_to_jobs.py +27 -0
- package/app/backend/alembic/versions/add_sqlite_backend_tables.py +104 -0
- package/app/backend/alembic/versions/b10fb0b62240_add_diff_content_to_revisions.py +34 -0
- package/app/backend/alembic.ini +89 -0
- package/app/backend/app/__init__.py +3 -0
- package/app/backend/app/data_dir.py +85 -0
- package/app/backend/app/database.py +70 -0
- package/app/backend/app/database_sync.py +64 -0
- package/app/backend/app/dependencies/__init__.py +5 -0
- package/app/backend/app/dependencies/auth.py +80 -0
- package/app/backend/app/dependencies.py +43 -0
- package/app/backend/app/exceptions.py +178 -0
- package/app/backend/app/executors/__init__.py +1 -0
- package/app/backend/app/executors/adapters/__init__.py +1 -0
- package/app/backend/app/executors/adapters/aider.py +152 -0
- package/app/backend/app/executors/adapters/amazon_q.py +103 -0
- package/app/backend/app/executors/adapters/amp.py +123 -0
- package/app/backend/app/executors/adapters/claude.py +177 -0
- package/app/backend/app/executors/adapters/cline.py +127 -0
- package/app/backend/app/executors/adapters/codex.py +167 -0
- package/app/backend/app/executors/adapters/copilot.py +202 -0
- package/app/backend/app/executors/adapters/cursor.py +87 -0
- package/app/backend/app/executors/adapters/droid.py +123 -0
- package/app/backend/app/executors/adapters/gemini.py +132 -0
- package/app/backend/app/executors/adapters/goose.py +131 -0
- package/app/backend/app/executors/adapters/opencode.py +123 -0
- package/app/backend/app/executors/adapters/qwen.py +123 -0
- package/app/backend/app/executors/plugins/__init__.py +1 -0
- package/app/backend/app/executors/registry.py +202 -0
- package/app/backend/app/executors/spec.py +226 -0
- package/app/backend/app/main.py +486 -0
- package/app/backend/app/middleware/__init__.py +13 -0
- package/app/backend/app/middleware/idempotency.py +426 -0
- package/app/backend/app/middleware/rate_limit.py +312 -0
- package/app/backend/app/middleware/security_headers.py +43 -0
- package/app/backend/app/middleware/timeout.py +37 -0
- package/app/backend/app/models/__init__.py +56 -0
- package/app/backend/app/models/agent_conversation_history.py +56 -0
- package/app/backend/app/models/agent_session.py +127 -0
- package/app/backend/app/models/analysis_cache.py +49 -0
- package/app/backend/app/models/base.py +9 -0
- package/app/backend/app/models/board.py +79 -0
- package/app/backend/app/models/board_repo.py +68 -0
- package/app/backend/app/models/cost_budget.py +42 -0
- package/app/backend/app/models/enums.py +40 -0
- package/app/backend/app/models/evidence.py +132 -0
- package/app/backend/app/models/goal.py +102 -0
- package/app/backend/app/models/idempotency_entry.py +30 -0
- package/app/backend/app/models/job.py +163 -0
- package/app/backend/app/models/job_queue.py +39 -0
- package/app/backend/app/models/kv_store.py +28 -0
- package/app/backend/app/models/merge_checklist.py +87 -0
- package/app/backend/app/models/normalized_log.py +100 -0
- package/app/backend/app/models/planner_lock.py +43 -0
- package/app/backend/app/models/rate_limit_entry.py +25 -0
- package/app/backend/app/models/repo.py +66 -0
- package/app/backend/app/models/review_comment.py +91 -0
- package/app/backend/app/models/review_summary.py +69 -0
- package/app/backend/app/models/revision.py +130 -0
- package/app/backend/app/models/ticket.py +223 -0
- package/app/backend/app/models/ticket_event.py +83 -0
- package/app/backend/app/models/user.py +47 -0
- package/app/backend/app/models/workspace.py +71 -0
- package/app/backend/app/redis_client.py +119 -0
- package/app/backend/app/routers/__init__.py +29 -0
- package/app/backend/app/routers/agents.py +296 -0
- package/app/backend/app/routers/auth.py +94 -0
- package/app/backend/app/routers/board.py +885 -0
- package/app/backend/app/routers/dashboard.py +351 -0
- package/app/backend/app/routers/debug.py +528 -0
- package/app/backend/app/routers/evidence.py +96 -0
- package/app/backend/app/routers/executors.py +324 -0
- package/app/backend/app/routers/goals.py +574 -0
- package/app/backend/app/routers/jobs.py +448 -0
- package/app/backend/app/routers/maintenance.py +172 -0
- package/app/backend/app/routers/merge.py +360 -0
- package/app/backend/app/routers/planner.py +537 -0
- package/app/backend/app/routers/pull_requests.py +382 -0
- package/app/backend/app/routers/repos.py +263 -0
- package/app/backend/app/routers/revisions.py +939 -0
- package/app/backend/app/routers/settings.py +267 -0
- package/app/backend/app/routers/tickets.py +2003 -0
- package/app/backend/app/routers/webhooks.py +143 -0
- package/app/backend/app/routers/websocket.py +249 -0
- package/app/backend/app/schemas/__init__.py +109 -0
- package/app/backend/app/schemas/board.py +87 -0
- package/app/backend/app/schemas/common.py +33 -0
- package/app/backend/app/schemas/evidence.py +87 -0
- package/app/backend/app/schemas/goal.py +90 -0
- package/app/backend/app/schemas/job.py +97 -0
- package/app/backend/app/schemas/merge.py +139 -0
- package/app/backend/app/schemas/planner.py +500 -0
- package/app/backend/app/schemas/repo.py +187 -0
- package/app/backend/app/schemas/review.py +137 -0
- package/app/backend/app/schemas/revision.py +114 -0
- package/app/backend/app/schemas/ticket.py +238 -0
- package/app/backend/app/schemas/ticket_event.py +72 -0
- package/app/backend/app/schemas/workspace.py +19 -0
- package/app/backend/app/services/__init__.py +31 -0
- package/app/backend/app/services/agent_memory_service.py +223 -0
- package/app/backend/app/services/agent_registry.py +346 -0
- package/app/backend/app/services/agent_session_manager.py +318 -0
- package/app/backend/app/services/agent_session_service.py +219 -0
- package/app/backend/app/services/agent_tools.py +379 -0
- package/app/backend/app/services/auth_service.py +98 -0
- package/app/backend/app/services/autonomy_service.py +380 -0
- package/app/backend/app/services/board_repo_service.py +201 -0
- package/app/backend/app/services/board_service.py +326 -0
- package/app/backend/app/services/cleanup_service.py +1085 -0
- package/app/backend/app/services/config_service.py +908 -0
- package/app/backend/app/services/context_gatherer.py +557 -0
- package/app/backend/app/services/cost_tracking_service.py +293 -0
- package/app/backend/app/services/cursor_log_normalizer.py +536 -0
- package/app/backend/app/services/delivery_pipeline.py +440 -0
- package/app/backend/app/services/executor_service.py +634 -0
- package/app/backend/app/services/git_host/__init__.py +11 -0
- package/app/backend/app/services/git_host/factory.py +87 -0
- package/app/backend/app/services/git_host/github.py +270 -0
- package/app/backend/app/services/git_host/gitlab.py +194 -0
- package/app/backend/app/services/git_host/protocol.py +75 -0
- package/app/backend/app/services/git_merge_simple.py +346 -0
- package/app/backend/app/services/git_ops.py +384 -0
- package/app/backend/app/services/github_service.py +233 -0
- package/app/backend/app/services/goal_service.py +113 -0
- package/app/backend/app/services/job_service.py +423 -0
- package/app/backend/app/services/job_watchdog_service.py +424 -0
- package/app/backend/app/services/langchain_adapter.py +122 -0
- package/app/backend/app/services/llm_provider_clients.py +351 -0
- package/app/backend/app/services/llm_service.py +285 -0
- package/app/backend/app/services/log_normalizer.py +342 -0
- package/app/backend/app/services/log_stream_service.py +276 -0
- package/app/backend/app/services/merge_checklist_service.py +264 -0
- package/app/backend/app/services/merge_service.py +784 -0
- package/app/backend/app/services/orchestrator_log.py +84 -0
- package/app/backend/app/services/planner_service.py +1662 -0
- package/app/backend/app/services/planner_tick_sync.py +1040 -0
- package/app/backend/app/services/queued_message_service.py +156 -0
- package/app/backend/app/services/reliability_wrapper.py +389 -0
- package/app/backend/app/services/repo_discovery_service.py +318 -0
- package/app/backend/app/services/review_service.py +334 -0
- package/app/backend/app/services/revision_service.py +389 -0
- package/app/backend/app/services/safe_autopilot.py +510 -0
- package/app/backend/app/services/sqlite_worker.py +372 -0
- package/app/backend/app/services/task_dispatch.py +135 -0
- package/app/backend/app/services/ticket_generation_service.py +1781 -0
- package/app/backend/app/services/ticket_service.py +486 -0
- package/app/backend/app/services/udar_planner_service.py +1007 -0
- package/app/backend/app/services/webhook_service.py +126 -0
- package/app/backend/app/services/workspace_service.py +465 -0
- package/app/backend/app/services/worktree_file_service.py +92 -0
- package/app/backend/app/services/worktree_validator.py +213 -0
- package/app/backend/app/sqlite_kv.py +278 -0
- package/app/backend/app/state_machine.py +128 -0
- package/app/backend/app/templates/__init__.py +5 -0
- package/app/backend/app/templates/registry.py +243 -0
- package/app/backend/app/utils/__init__.py +5 -0
- package/app/backend/app/utils/artifact_reader.py +87 -0
- package/app/backend/app/utils/circuit_breaker.py +229 -0
- package/app/backend/app/utils/db_retry.py +136 -0
- package/app/backend/app/utils/ignored_fields.py +123 -0
- package/app/backend/app/utils/validators.py +54 -0
- package/app/backend/app/websocket/__init__.py +5 -0
- package/app/backend/app/websocket/manager.py +179 -0
- package/app/backend/app/websocket/state_tracker.py +113 -0
- package/app/backend/app/worker.py +3190 -0
- package/app/backend/calculator_tickets.json +40 -0
- package/app/backend/canary_tests.sh +591 -0
- package/app/backend/celerybeat-schedule +0 -0
- package/app/backend/celerybeat-schedule-shm +0 -0
- package/app/backend/celerybeat-schedule-wal +0 -0
- package/app/backend/logs/.gitkeep +3 -0
- package/app/backend/multiplication_division_implementation_tickets.json +55 -0
- package/app/backend/multiplication_division_tickets.json +42 -0
- package/app/backend/pyproject.toml +45 -0
- package/app/backend/requirements-dev.txt +8 -0
- package/app/backend/requirements.txt +20 -0
- package/app/backend/run.sh +30 -0
- package/app/backend/run_with_logs.sh +10 -0
- package/app/backend/scientific_calculator_tickets.json +40 -0
- package/app/backend/scripts/extract_openapi.py +21 -0
- package/app/backend/scripts/seed_demo.py +187 -0
- package/app/backend/setup_demo_review.py +302 -0
- package/app/backend/test_actual_parse.py +41 -0
- package/app/backend/test_agent_streaming.py +61 -0
- package/app/backend/test_parse.py +51 -0
- package/app/backend/test_streaming.py +51 -0
- package/app/backend/test_subprocess_streaming.py +50 -0
- package/app/backend/tests/__init__.py +1 -0
- package/app/backend/tests/conftest.py +46 -0
- package/app/backend/tests/test_auth.py +341 -0
- package/app/backend/tests/test_autonomy_service.py +391 -0
- package/app/backend/tests/test_cleanup_service_safety.py +417 -0
- package/app/backend/tests/test_middleware.py +279 -0
- package/app/backend/tests/test_planner_providers.py +290 -0
- package/app/backend/tests/test_planner_unblock.py +183 -0
- package/app/backend/tests/test_revision_invariants.py +618 -0
- package/app/backend/tests/test_sqlite_kv.py +290 -0
- package/app/backend/tests/test_sqlite_worker.py +353 -0
- package/app/backend/tests/test_task_dispatch.py +100 -0
- package/app/backend/tests/test_ticket_validation.py +304 -0
- package/app/backend/tests/test_udar_agent.py +693 -0
- package/app/backend/tests/test_webhook_service.py +184 -0
- package/app/backend/tickets_output.json +59 -0
- package/app/backend/user_management_tickets.json +50 -0
- package/app/backend/uvicorn.log +0 -0
- package/app/draft.yaml +313 -0
- package/app/frontend/dist/assets/index-LcjCczu5.js +155 -0
- package/app/frontend/dist/assets/index-_FP_279e.css +1 -0
- package/app/frontend/dist/index.html +14 -0
- package/app/frontend/dist/vite.svg +1 -0
- package/app/frontend/package.json +101 -0
- package/bin/cli.js +527 -0
- package/package.json +37 -0
|
@@ -0,0 +1,1662 @@
|
|
|
1
|
+
"""Planner service for automated workflow decisions.
|
|
2
|
+
|
|
3
|
+
The planner runs in "tick" mode - each tick evaluates the board state
|
|
4
|
+
and takes actions to move work forward:
|
|
5
|
+
|
|
6
|
+
1. Pick next ticket (deterministic):
|
|
7
|
+
- If no EXECUTING or VERIFYING ticket exists, pick highest priority PLANNED ticket
|
|
8
|
+
- Enqueue execute job for the selected ticket
|
|
9
|
+
|
|
10
|
+
2. Handle blocked tickets (LLM-powered):
|
|
11
|
+
- For BLOCKED tickets without follow-ups, generate follow-up proposals
|
|
12
|
+
- Auto-create follow-up tickets in PROPOSED state
|
|
13
|
+
- Respects caps: max_followups_per_ticket and max_followups_per_tick
|
|
14
|
+
- Skips certain blocker reasons (e.g., "no changes produced")
|
|
15
|
+
|
|
16
|
+
3. Generate reflections (LLM-powered):
|
|
17
|
+
- For DONE tickets without reflections, generate summary comments
|
|
18
|
+
- Create TicketEvent with the reflection (never modifies ticket text)
|
|
19
|
+
|
|
20
|
+
PERMISSIONS (what the planner CAN and CANNOT do):
|
|
21
|
+
CAN:
|
|
22
|
+
- Create tickets in PROPOSED state only (follow-ups)
|
|
23
|
+
- Enqueue EXECUTE jobs for PLANNED tickets
|
|
24
|
+
- Add COMMENT events (reflections, action logs)
|
|
25
|
+
CANNOT:
|
|
26
|
+
- Transition tickets between states
|
|
27
|
+
- Delete anything
|
|
28
|
+
- Modify ticket title/description
|
|
29
|
+
- Create tickets in any state other than PROPOSED
|
|
30
|
+
|
|
31
|
+
CONCURRENCY SAFETY:
|
|
32
|
+
- Uses a lock row in planner_locks table
|
|
33
|
+
- Only one tick can run at a time
|
|
34
|
+
- Celery jobs are enqueued AFTER DB commit
|
|
35
|
+
|
|
36
|
+
NOTE: For ticket generation from goals, use TicketGenerationService instead.
|
|
37
|
+
This service focuses on the tick-based autopilot workflow.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
import json
|
|
41
|
+
import logging
|
|
42
|
+
import uuid
|
|
43
|
+
from dataclasses import dataclass
|
|
44
|
+
from datetime import UTC, datetime, timedelta
|
|
45
|
+
from typing import TYPE_CHECKING
|
|
46
|
+
|
|
47
|
+
from sqlalchemy import and_, delete, select, update
|
|
48
|
+
from sqlalchemy.exc import IntegrityError
|
|
49
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
50
|
+
from sqlalchemy.orm import selectinload
|
|
51
|
+
|
|
52
|
+
from app.models.job import Job, JobKind, JobStatus
|
|
53
|
+
from app.models.planner_lock import PlannerLock
|
|
54
|
+
from app.models.ticket import Ticket
|
|
55
|
+
from app.models.ticket_event import TicketEvent
|
|
56
|
+
|
|
57
|
+
# Deferred import to avoid circular dependency with async database
|
|
58
|
+
# from app.services.orchestrator_log import add_orchestrator_log # imported inside methods
|
|
59
|
+
from app.schemas.planner import PlannerAction, PlannerActionType, PlannerTickResponse
|
|
60
|
+
from app.services.config_service import PlannerConfig
|
|
61
|
+
from app.services.llm_service import LLMService
|
|
62
|
+
from app.state_machine import ActorType, EventType, TicketState
|
|
63
|
+
|
|
64
|
+
if TYPE_CHECKING:
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
logger = logging.getLogger(__name__)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
# Event type for planner reflections (using COMMENT)
|
|
71
|
+
REFLECTION_EVENT_TYPE = EventType.COMMENT.value
|
|
72
|
+
REFLECTION_MARKER = "planner_reflection"
|
|
73
|
+
|
|
74
|
+
# Payload marker for follow-up link
|
|
75
|
+
FOLLOWUP_MARKER = "planner_followup_created"
|
|
76
|
+
|
|
77
|
+
# Lock settings
|
|
78
|
+
PLANNER_LOCK_KEY = "planner_tick"
|
|
79
|
+
LOCK_STALE_MINUTES = 10 # Consider lock stale after this many minutes
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class PlannerLockError(Exception):
|
|
83
|
+
"""Raised when planner lock cannot be acquired."""
|
|
84
|
+
|
|
85
|
+
pass
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
@dataclass
|
|
89
|
+
class FollowUpProposal:
|
|
90
|
+
"""Proposed follow-up ticket for a blocked ticket."""
|
|
91
|
+
|
|
92
|
+
title: str
|
|
93
|
+
description: str
|
|
94
|
+
verification: list[str]
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@dataclass
|
|
98
|
+
class ReflectionSummary:
|
|
99
|
+
"""Generated reflection summary for a completed ticket."""
|
|
100
|
+
|
|
101
|
+
summary: str
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class PlannerService:
|
|
105
|
+
"""Service for automated workflow planning decisions.
|
|
106
|
+
|
|
107
|
+
The planner operates deterministically for ticket selection and uses
|
|
108
|
+
LLM only for generating follow-up proposals and reflections.
|
|
109
|
+
|
|
110
|
+
Thread safety:
|
|
111
|
+
Uses a database lock row to ensure only one tick runs at a time.
|
|
112
|
+
This prevents race conditions where two concurrent ticks might
|
|
113
|
+
both see "no executing ticket" and both enqueue jobs.
|
|
114
|
+
|
|
115
|
+
For ticket generation from goals, use TicketGenerationService instead.
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
def __init__(
|
|
119
|
+
self,
|
|
120
|
+
db: AsyncSession,
|
|
121
|
+
config: PlannerConfig | None = None,
|
|
122
|
+
llm_service: LLMService | None = None,
|
|
123
|
+
):
|
|
124
|
+
"""Initialize the planner service.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
db: Async database session.
|
|
128
|
+
config: Planner configuration. If None, loads from config file.
|
|
129
|
+
llm_service: LLM service instance. If None, creates one.
|
|
130
|
+
"""
|
|
131
|
+
self.db = db
|
|
132
|
+
self._lock_owner_id = str(uuid.uuid4()) # Unique ID for this tick instance
|
|
133
|
+
|
|
134
|
+
if config is None:
|
|
135
|
+
config = PlannerConfig()
|
|
136
|
+
|
|
137
|
+
self.config = config
|
|
138
|
+
|
|
139
|
+
if llm_service is None:
|
|
140
|
+
llm_service = LLMService(config)
|
|
141
|
+
|
|
142
|
+
self.llm_service = llm_service
|
|
143
|
+
|
|
144
|
+
async def tick(self, force_execute: bool = False) -> PlannerTickResponse:
|
|
145
|
+
"""Run one decision cycle of the planner.
|
|
146
|
+
|
|
147
|
+
Evaluates the current board state and takes appropriate actions:
|
|
148
|
+
1. Pick and execute next planned ticket (if no active execution)
|
|
149
|
+
2. Generate follow-ups for blocked tickets (with caps)
|
|
150
|
+
3. Generate reflections for done tickets
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
force_execute: If True, queue planned tickets regardless of config.
|
|
154
|
+
Used by /planner/start to explicitly trigger execution.
|
|
155
|
+
|
|
156
|
+
Thread safety:
|
|
157
|
+
Acquires a database lock before processing. If another tick
|
|
158
|
+
is already running, raises PlannerLockError.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
PlannerTickResponse with actions taken and summary.
|
|
162
|
+
|
|
163
|
+
Raises:
|
|
164
|
+
PlannerLockError: If lock cannot be acquired (another tick is running).
|
|
165
|
+
|
|
166
|
+
INVARIANT: Lock acquisition MUST be the first DB operation in this method.
|
|
167
|
+
The lock acquire may rollback on IntegrityError, which would wipe any
|
|
168
|
+
previously staged changes. Do not add DB writes before _acquire_lock().
|
|
169
|
+
"""
|
|
170
|
+
# Local import to avoid circular dependency with async database at module load
|
|
171
|
+
from app.services.orchestrator_log import add_orchestrator_log
|
|
172
|
+
|
|
173
|
+
actions: list[PlannerAction] = []
|
|
174
|
+
jobs_to_enqueue: list[str] = [] # Job IDs to enqueue AFTER commit
|
|
175
|
+
|
|
176
|
+
# INVARIANT: This MUST be the first DB operation. See docstring.
|
|
177
|
+
# Lock acquisition may rollback on IntegrityError.
|
|
178
|
+
await self._acquire_lock()
|
|
179
|
+
add_orchestrator_log(
|
|
180
|
+
"INFO", "Planner tick started", {"owner": self._lock_owner_id}
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
# 1. Queue planned tickets for execution
|
|
185
|
+
# force_execute bypasses config AND queues ALL planned tickets (used by /planner/start)
|
|
186
|
+
# Normal auto_execute only queues one at a time
|
|
187
|
+
if force_execute:
|
|
188
|
+
# Queue ALL planned tickets when user explicitly starts autopilot
|
|
189
|
+
add_orchestrator_log(
|
|
190
|
+
"INFO", "Force execute: queueing all planned tickets"
|
|
191
|
+
)
|
|
192
|
+
execute_results = await self._queue_all_planned_tickets()
|
|
193
|
+
for action, job_id in execute_results:
|
|
194
|
+
actions.append(action)
|
|
195
|
+
if job_id:
|
|
196
|
+
jobs_to_enqueue.append(job_id)
|
|
197
|
+
add_orchestrator_log(
|
|
198
|
+
"INFO",
|
|
199
|
+
f"Queued ticket for execution: {action.ticket_title}",
|
|
200
|
+
{"ticket_id": action.ticket_id, "job_id": job_id},
|
|
201
|
+
)
|
|
202
|
+
if not execute_results:
|
|
203
|
+
add_orchestrator_log("INFO", "No planned tickets to queue")
|
|
204
|
+
elif self.config.features.auto_execute:
|
|
205
|
+
# Normal periodic tick: queue one at a time
|
|
206
|
+
if not await self._has_active_execution():
|
|
207
|
+
add_orchestrator_log(
|
|
208
|
+
"INFO", "No active execution, checking for planned tickets"
|
|
209
|
+
)
|
|
210
|
+
execute_results = await self._pick_and_execute_next()
|
|
211
|
+
for action, job_id in execute_results:
|
|
212
|
+
actions.append(action)
|
|
213
|
+
if job_id:
|
|
214
|
+
jobs_to_enqueue.append(job_id)
|
|
215
|
+
add_orchestrator_log(
|
|
216
|
+
"INFO",
|
|
217
|
+
f"Queued ticket for execution: {action.ticket_title}",
|
|
218
|
+
{"ticket_id": action.ticket_id, "job_id": job_id},
|
|
219
|
+
)
|
|
220
|
+
if not execute_results:
|
|
221
|
+
# No planned tickets to queue
|
|
222
|
+
logger.debug("No planned tickets to queue")
|
|
223
|
+
add_orchestrator_log("INFO", "No planned tickets to queue")
|
|
224
|
+
else:
|
|
225
|
+
# Log that we skipped due to active execution
|
|
226
|
+
add_orchestrator_log(
|
|
227
|
+
"INFO",
|
|
228
|
+
"Skipped: Active execution in progress",
|
|
229
|
+
{"reason": "executing or verifying ticket exists"},
|
|
230
|
+
)
|
|
231
|
+
actions.append(
|
|
232
|
+
PlannerAction(
|
|
233
|
+
action_type=PlannerActionType.SKIPPED,
|
|
234
|
+
ticket_id="",
|
|
235
|
+
ticket_title=None,
|
|
236
|
+
details={
|
|
237
|
+
"reason": "Active execution in progress (executing or verifying ticket exists)"
|
|
238
|
+
},
|
|
239
|
+
)
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# 2. Unblock tickets whose blockers are now done
|
|
243
|
+
unblock_actions = await self._unblock_ready_tickets()
|
|
244
|
+
actions.extend(unblock_actions)
|
|
245
|
+
if unblock_actions:
|
|
246
|
+
for ua in unblock_actions:
|
|
247
|
+
add_orchestrator_log(
|
|
248
|
+
"INFO",
|
|
249
|
+
f"Unblocked ticket: '{ua.ticket_title}'",
|
|
250
|
+
{
|
|
251
|
+
"ticket_id": ua.ticket_id,
|
|
252
|
+
"blocker_ticket_id": ua.details.get("blocker_ticket_id"),
|
|
253
|
+
"blocker_title": ua.details.get("blocker_title"),
|
|
254
|
+
},
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
# 3. Handle blocked tickets (LLM-powered, with caps)
|
|
258
|
+
if self.config.features.propose_followups:
|
|
259
|
+
followup_actions = await self._handle_blocked_tickets()
|
|
260
|
+
actions.extend(followup_actions)
|
|
261
|
+
|
|
262
|
+
# 4. Generate reflections (LLM-powered)
|
|
263
|
+
if self.config.features.generate_reflections:
|
|
264
|
+
reflection_actions = await self._generate_reflections()
|
|
265
|
+
actions.extend(reflection_actions)
|
|
266
|
+
|
|
267
|
+
# 5. UDAR incremental replanning (Phase 3)
|
|
268
|
+
if (
|
|
269
|
+
self.config.udar.enabled
|
|
270
|
+
and self.config.udar.enable_incremental_replanning
|
|
271
|
+
):
|
|
272
|
+
add_orchestrator_log(
|
|
273
|
+
"INFO",
|
|
274
|
+
"UDAR incremental replanning enabled, checking for completed tickets",
|
|
275
|
+
)
|
|
276
|
+
replan_actions = await self._udar_incremental_replan()
|
|
277
|
+
actions.extend(replan_actions)
|
|
278
|
+
if replan_actions:
|
|
279
|
+
add_orchestrator_log(
|
|
280
|
+
"INFO",
|
|
281
|
+
f"UDAR generated {len(replan_actions)} follow-up proposals",
|
|
282
|
+
{"count": len(replan_actions)},
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
# 6. Auto-merge DONE tickets with autonomy enabled
|
|
286
|
+
auto_merge_actions = await self._auto_merge_done_tickets()
|
|
287
|
+
actions.extend(auto_merge_actions)
|
|
288
|
+
if auto_merge_actions:
|
|
289
|
+
add_orchestrator_log(
|
|
290
|
+
"INFO",
|
|
291
|
+
f"Auto-merged {len(auto_merge_actions)} ticket(s)",
|
|
292
|
+
{"count": len(auto_merge_actions)},
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Commit all DB changes BEFORE enqueueing Celery jobs
|
|
296
|
+
await self.db.commit()
|
|
297
|
+
add_orchestrator_log("DEBUG", "DB changes committed")
|
|
298
|
+
|
|
299
|
+
finally:
|
|
300
|
+
# Always release lock, even on error
|
|
301
|
+
await self._release_lock()
|
|
302
|
+
|
|
303
|
+
# Enqueue Celery jobs AFTER commit (prevents stale DB state)
|
|
304
|
+
for job_id in jobs_to_enqueue:
|
|
305
|
+
await self._enqueue_celery_job(job_id)
|
|
306
|
+
add_orchestrator_log(
|
|
307
|
+
"INFO", f"Celery task enqueued for job {job_id[:8]}..."
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
# Generate summary
|
|
311
|
+
summary = self._generate_summary(actions)
|
|
312
|
+
add_orchestrator_log(
|
|
313
|
+
"INFO",
|
|
314
|
+
f"Planner tick completed: {summary}",
|
|
315
|
+
{"actions_count": len(actions), "jobs_enqueued": len(jobs_to_enqueue)},
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
return PlannerTickResponse(
|
|
319
|
+
actions=actions,
|
|
320
|
+
summary=summary,
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# =========================================================================
|
|
324
|
+
# LOCK MANAGEMENT
|
|
325
|
+
# =========================================================================
|
|
326
|
+
|
|
327
|
+
async def _acquire_lock(self) -> None:
|
|
328
|
+
"""Acquire the planner lock atomically.
|
|
329
|
+
|
|
330
|
+
Uses UPDATE-then-INSERT pattern to prevent race conditions:
|
|
331
|
+
1. Try to UPDATE an existing stale lock (atomic claim)
|
|
332
|
+
2. If no rows updated, try INSERT (no lock exists)
|
|
333
|
+
3. If INSERT fails with IntegrityError, lock is held by another tick
|
|
334
|
+
|
|
335
|
+
This prevents the race where two requests both see a stale lock
|
|
336
|
+
and both try to claim it.
|
|
337
|
+
|
|
338
|
+
Raises:
|
|
339
|
+
PlannerLockError: If lock cannot be acquired.
|
|
340
|
+
"""
|
|
341
|
+
stale_threshold = datetime.now(UTC) - timedelta(minutes=LOCK_STALE_MINUTES)
|
|
342
|
+
now = datetime.now(UTC)
|
|
343
|
+
|
|
344
|
+
# STEP 1: Try to atomically claim a stale lock via UPDATE
|
|
345
|
+
# This is safe because UPDATE with WHERE is atomic - only one request wins
|
|
346
|
+
update_result = await self.db.execute(
|
|
347
|
+
update(PlannerLock)
|
|
348
|
+
.where(
|
|
349
|
+
and_(
|
|
350
|
+
PlannerLock.lock_key == PLANNER_LOCK_KEY,
|
|
351
|
+
PlannerLock.acquired_at < stale_threshold,
|
|
352
|
+
)
|
|
353
|
+
)
|
|
354
|
+
.values(
|
|
355
|
+
owner_id=self._lock_owner_id,
|
|
356
|
+
acquired_at=now,
|
|
357
|
+
)
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
if update_result.rowcount > 0:
|
|
361
|
+
# Successfully claimed a stale lock - flush to ensure visibility
|
|
362
|
+
await self.db.flush()
|
|
363
|
+
logger.debug(
|
|
364
|
+
f"Acquired planner lock by claiming stale (owner={self._lock_owner_id})"
|
|
365
|
+
)
|
|
366
|
+
return
|
|
367
|
+
|
|
368
|
+
# STEP 2: No stale lock to claim, try INSERT (no lock exists yet)
|
|
369
|
+
lock = PlannerLock(
|
|
370
|
+
lock_key=PLANNER_LOCK_KEY,
|
|
371
|
+
owner_id=self._lock_owner_id,
|
|
372
|
+
acquired_at=now,
|
|
373
|
+
)
|
|
374
|
+
self.db.add(lock)
|
|
375
|
+
|
|
376
|
+
try:
|
|
377
|
+
await self.db.flush()
|
|
378
|
+
logger.debug(
|
|
379
|
+
f"Acquired planner lock via insert (owner={self._lock_owner_id})"
|
|
380
|
+
)
|
|
381
|
+
except IntegrityError:
|
|
382
|
+
await self.db.rollback()
|
|
383
|
+
# Lock already held by another tick (and it's not stale)
|
|
384
|
+
existing = await self.db.execute(
|
|
385
|
+
select(PlannerLock).where(PlannerLock.lock_key == PLANNER_LOCK_KEY)
|
|
386
|
+
)
|
|
387
|
+
existing_lock = existing.scalar_one_or_none()
|
|
388
|
+
if existing_lock:
|
|
389
|
+
raise PlannerLockError(
|
|
390
|
+
f"Planner tick already in progress (started at {existing_lock.acquired_at}, "
|
|
391
|
+
f"owner={existing_lock.owner_id})"
|
|
392
|
+
)
|
|
393
|
+
raise PlannerLockError("Failed to acquire planner lock")
|
|
394
|
+
|
|
395
|
+
async def _release_lock(self) -> None:
|
|
396
|
+
"""Release the planner lock."""
|
|
397
|
+
try:
|
|
398
|
+
await self.db.execute(
|
|
399
|
+
delete(PlannerLock).where(
|
|
400
|
+
and_(
|
|
401
|
+
PlannerLock.lock_key == PLANNER_LOCK_KEY,
|
|
402
|
+
PlannerLock.owner_id == self._lock_owner_id,
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
)
|
|
406
|
+
await self.db.commit()
|
|
407
|
+
logger.debug(f"Released planner lock (owner={self._lock_owner_id})")
|
|
408
|
+
except Exception as e:
|
|
409
|
+
logger.warning(f"Failed to release planner lock: {e}")
|
|
410
|
+
|
|
411
|
+
# =========================================================================
|
|
412
|
+
# EXECUTOR GATE
|
|
413
|
+
# =========================================================================
|
|
414
|
+
|
|
415
|
+
async def _has_active_execution(self) -> bool:
|
|
416
|
+
"""Check if there's an active execution (hard gate for running only).
|
|
417
|
+
|
|
418
|
+
Returns True if ANY of:
|
|
419
|
+
- A ticket is in EXECUTING state
|
|
420
|
+
- A ticket is in VERIFYING state
|
|
421
|
+
|
|
422
|
+
Note: We DON'T block on QUEUED jobs - this allows the planner to
|
|
423
|
+
queue all planned tickets, while still respecting that only one
|
|
424
|
+
ticket should be actively executing at a time.
|
|
425
|
+
"""
|
|
426
|
+
# Check for executing or verifying tickets
|
|
427
|
+
active_ticket_result = await self.db.execute(
|
|
428
|
+
select(Ticket.id)
|
|
429
|
+
.where(
|
|
430
|
+
Ticket.state.in_(
|
|
431
|
+
[
|
|
432
|
+
TicketState.EXECUTING.value,
|
|
433
|
+
TicketState.VERIFYING.value,
|
|
434
|
+
]
|
|
435
|
+
)
|
|
436
|
+
)
|
|
437
|
+
.limit(1)
|
|
438
|
+
)
|
|
439
|
+
if active_ticket_result.scalar_one_or_none():
|
|
440
|
+
logger.debug("Active execution gate: ticket in executing/verifying state")
|
|
441
|
+
return True
|
|
442
|
+
|
|
443
|
+
# Check for RUNNING execute jobs only (not queued)
|
|
444
|
+
running_job_result = await self.db.execute(
|
|
445
|
+
select(Job.id)
|
|
446
|
+
.where(
|
|
447
|
+
and_(
|
|
448
|
+
Job.kind == JobKind.EXECUTE.value,
|
|
449
|
+
Job.status == JobStatus.RUNNING.value,
|
|
450
|
+
)
|
|
451
|
+
)
|
|
452
|
+
.limit(1)
|
|
453
|
+
)
|
|
454
|
+
if running_job_result.scalar_one_or_none():
|
|
455
|
+
logger.debug("Active execution gate: execute job running")
|
|
456
|
+
return True
|
|
457
|
+
|
|
458
|
+
return False
|
|
459
|
+
|
|
460
|
+
# =========================================================================
|
|
461
|
+
# PICK AND EXECUTE
|
|
462
|
+
# =========================================================================
|
|
463
|
+
|
|
464
|
+
async def _pick_and_execute_next(self) -> list[tuple[PlannerAction, str | None]]:
|
|
465
|
+
"""Queue the SINGLE highest-priority planned ticket for execution.
|
|
466
|
+
|
|
467
|
+
Policy:
|
|
468
|
+
- Only one ticket can be actively executing at a time (enforced by _has_active_execution)
|
|
469
|
+
- Only ONE ticket is queued at a time - no new jobs if any QUEUED execute jobs exist
|
|
470
|
+
- This ensures Celery always executes highest priority tickets first
|
|
471
|
+
- Tickets blocked by incomplete dependencies are moved to BLOCKED state
|
|
472
|
+
|
|
473
|
+
NOTE: This only creates job rows. Celery enqueueing happens AFTER commit.
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
List of tuples (PlannerAction, job_id) - will have 0 or 1 elements.
|
|
477
|
+
"""
|
|
478
|
+
results: list[tuple[PlannerAction, str | None]] = []
|
|
479
|
+
|
|
480
|
+
# Check if there are ANY queued or running execute jobs
|
|
481
|
+
# If so, don't queue anything new - wait for the current job to complete
|
|
482
|
+
active_job_result = await self.db.execute(
|
|
483
|
+
select(Job.id)
|
|
484
|
+
.where(
|
|
485
|
+
and_(
|
|
486
|
+
Job.kind == JobKind.EXECUTE.value,
|
|
487
|
+
Job.status.in_([JobStatus.QUEUED.value, JobStatus.RUNNING.value]),
|
|
488
|
+
)
|
|
489
|
+
)
|
|
490
|
+
.limit(1)
|
|
491
|
+
)
|
|
492
|
+
if active_job_result.scalar_one_or_none():
|
|
493
|
+
logger.debug(
|
|
494
|
+
"Execute job already queued or running, not queuing new tickets"
|
|
495
|
+
)
|
|
496
|
+
return results
|
|
497
|
+
|
|
498
|
+
# Find planned tickets ordered by priority
|
|
499
|
+
# We need to check multiple in case the first ones are blocked
|
|
500
|
+
planned_result = await self.db.execute(
|
|
501
|
+
select(Ticket)
|
|
502
|
+
.where(Ticket.state == TicketState.PLANNED.value)
|
|
503
|
+
.options(selectinload(Ticket.blocked_by)) # Load blocker relationship
|
|
504
|
+
.order_by(
|
|
505
|
+
Ticket.priority.desc().nulls_last(),
|
|
506
|
+
Ticket.created_at.asc(),
|
|
507
|
+
)
|
|
508
|
+
.limit(10) # Check up to 10 tickets
|
|
509
|
+
)
|
|
510
|
+
planned_tickets = list(planned_result.scalars().all())
|
|
511
|
+
|
|
512
|
+
if not planned_tickets:
|
|
513
|
+
logger.info("No planned tickets to queue")
|
|
514
|
+
return results
|
|
515
|
+
|
|
516
|
+
# Find the first ticket that is NOT blocked by an incomplete dependency
|
|
517
|
+
selected_ticket = None
|
|
518
|
+
for ticket in planned_tickets:
|
|
519
|
+
if ticket.blocked_by_ticket_id:
|
|
520
|
+
# Check if the blocker is done
|
|
521
|
+
if (
|
|
522
|
+
ticket.blocked_by
|
|
523
|
+
and ticket.blocked_by.state == TicketState.DONE.value
|
|
524
|
+
):
|
|
525
|
+
# Blocker is done, this ticket can be executed
|
|
526
|
+
selected_ticket = ticket
|
|
527
|
+
logger.info(
|
|
528
|
+
f"Ticket {ticket.id} was blocked by {ticket.blocked_by_ticket_id} "
|
|
529
|
+
f"but blocker is now DONE, can proceed"
|
|
530
|
+
)
|
|
531
|
+
break
|
|
532
|
+
else:
|
|
533
|
+
# Blocker is not done, move ticket to BLOCKED state
|
|
534
|
+
blocker_title = (
|
|
535
|
+
ticket.blocked_by.title if ticket.blocked_by else "unknown"
|
|
536
|
+
)
|
|
537
|
+
logger.info(
|
|
538
|
+
f"Ticket {ticket.id} is blocked by incomplete ticket "
|
|
539
|
+
f"{ticket.blocked_by_ticket_id} ({blocker_title}), moving to BLOCKED"
|
|
540
|
+
)
|
|
541
|
+
ticket.state = TicketState.BLOCKED.value
|
|
542
|
+
|
|
543
|
+
# Create event for the transition
|
|
544
|
+
event = TicketEvent(
|
|
545
|
+
ticket_id=ticket.id,
|
|
546
|
+
event_type=EventType.TRANSITIONED.value,
|
|
547
|
+
from_state=TicketState.PLANNED.value,
|
|
548
|
+
to_state=TicketState.BLOCKED.value,
|
|
549
|
+
actor_type=ActorType.PLANNER.value,
|
|
550
|
+
actor_id="planner",
|
|
551
|
+
reason=f"Blocked by incomplete ticket: {blocker_title}",
|
|
552
|
+
payload_json=json.dumps(
|
|
553
|
+
{
|
|
554
|
+
"blocked_by_ticket_id": ticket.blocked_by_ticket_id,
|
|
555
|
+
"blocked_by_title": blocker_title,
|
|
556
|
+
}
|
|
557
|
+
),
|
|
558
|
+
)
|
|
559
|
+
self.db.add(event)
|
|
560
|
+
# Continue to check next ticket
|
|
561
|
+
else:
|
|
562
|
+
# No blocker, can be executed
|
|
563
|
+
selected_ticket = ticket
|
|
564
|
+
break
|
|
565
|
+
|
|
566
|
+
if not selected_ticket:
|
|
567
|
+
logger.info("All planned tickets are blocked by dependencies")
|
|
568
|
+
return results
|
|
569
|
+
|
|
570
|
+
# Create execute job (do NOT enqueue Celery yet)
|
|
571
|
+
# Inherit board_id from ticket for permission scoping
|
|
572
|
+
job = Job(
|
|
573
|
+
ticket_id=selected_ticket.id,
|
|
574
|
+
board_id=selected_ticket.board_id,
|
|
575
|
+
kind=JobKind.EXECUTE.value,
|
|
576
|
+
status=JobStatus.QUEUED.value,
|
|
577
|
+
)
|
|
578
|
+
self.db.add(job)
|
|
579
|
+
await self.db.flush()
|
|
580
|
+
await self.db.refresh(job)
|
|
581
|
+
|
|
582
|
+
# Create event for the action
|
|
583
|
+
event = TicketEvent(
|
|
584
|
+
ticket_id=selected_ticket.id,
|
|
585
|
+
event_type=EventType.COMMENT.value,
|
|
586
|
+
from_state=selected_ticket.state,
|
|
587
|
+
to_state=selected_ticket.state,
|
|
588
|
+
actor_type=ActorType.PLANNER.value,
|
|
589
|
+
actor_id="planner",
|
|
590
|
+
reason="Planner enqueued execute job (queue position: 1)",
|
|
591
|
+
payload_json=json.dumps(
|
|
592
|
+
{
|
|
593
|
+
"action": "enqueued_execute",
|
|
594
|
+
"job_id": job.id,
|
|
595
|
+
"queue_position": 1,
|
|
596
|
+
}
|
|
597
|
+
),
|
|
598
|
+
)
|
|
599
|
+
self.db.add(event)
|
|
600
|
+
|
|
601
|
+
logger.info(
|
|
602
|
+
f"Planner created execute job {job.id} for ticket {selected_ticket.id} "
|
|
603
|
+
f"(priority: {selected_ticket.priority})"
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
results.append(
|
|
607
|
+
(
|
|
608
|
+
PlannerAction(
|
|
609
|
+
action_type=PlannerActionType.ENQUEUED_EXECUTE,
|
|
610
|
+
ticket_id=selected_ticket.id,
|
|
611
|
+
ticket_title=selected_ticket.title,
|
|
612
|
+
details={"job_id": job.id, "queue_position": 1},
|
|
613
|
+
),
|
|
614
|
+
job.id,
|
|
615
|
+
)
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
return results
|
|
619
|
+
|
|
620
|
+
async def _queue_all_planned_tickets(
|
|
621
|
+
self,
|
|
622
|
+
) -> list[tuple[PlannerAction, str | None]]:
|
|
623
|
+
"""Queue ALL planned tickets for execution in priority order.
|
|
624
|
+
|
|
625
|
+
Used by /planner/start to queue the entire backlog at once.
|
|
626
|
+
Tickets are ordered by priority (highest first), then by created_at (oldest first).
|
|
627
|
+
Tickets blocked by incomplete dependencies are moved to BLOCKED state.
|
|
628
|
+
|
|
629
|
+
NOTE: This only creates job rows. Celery enqueueing happens AFTER commit.
|
|
630
|
+
|
|
631
|
+
Returns:
|
|
632
|
+
List of tuples (PlannerAction, job_id) for each queued ticket.
|
|
633
|
+
"""
|
|
634
|
+
results: list[tuple[PlannerAction, str | None]] = []
|
|
635
|
+
|
|
636
|
+
# Find ALL planned tickets ordered by priority, with blocker relationship loaded
|
|
637
|
+
planned_result = await self.db.execute(
|
|
638
|
+
select(Ticket)
|
|
639
|
+
.where(Ticket.state == TicketState.PLANNED.value)
|
|
640
|
+
.options(selectinload(Ticket.blocked_by)) # Load blocker relationship
|
|
641
|
+
.order_by(
|
|
642
|
+
Ticket.priority.desc().nulls_last(),
|
|
643
|
+
Ticket.created_at.asc(),
|
|
644
|
+
)
|
|
645
|
+
)
|
|
646
|
+
planned_tickets = list(planned_result.scalars().all())
|
|
647
|
+
|
|
648
|
+
if not planned_tickets:
|
|
649
|
+
logger.info("No planned tickets to queue")
|
|
650
|
+
return results
|
|
651
|
+
|
|
652
|
+
# Check which tickets already have queued jobs to avoid duplicates
|
|
653
|
+
ticket_ids = [t.id for t in planned_tickets]
|
|
654
|
+
existing_jobs_result = await self.db.execute(
|
|
655
|
+
select(Job.ticket_id).where(
|
|
656
|
+
and_(
|
|
657
|
+
Job.ticket_id.in_(ticket_ids),
|
|
658
|
+
Job.kind == JobKind.EXECUTE.value,
|
|
659
|
+
Job.status.in_([JobStatus.QUEUED.value, JobStatus.RUNNING.value]),
|
|
660
|
+
)
|
|
661
|
+
)
|
|
662
|
+
)
|
|
663
|
+
already_queued = set(existing_jobs_result.scalars().all())
|
|
664
|
+
|
|
665
|
+
# Queue each ticket that doesn't already have a job and is not blocked
|
|
666
|
+
queue_position = 0
|
|
667
|
+
blocked_count = 0
|
|
668
|
+
for ticket in planned_tickets:
|
|
669
|
+
if ticket.id in already_queued:
|
|
670
|
+
logger.debug(f"Ticket {ticket.id} already has a queued job, skipping")
|
|
671
|
+
continue
|
|
672
|
+
|
|
673
|
+
# Check if ticket is blocked by an incomplete dependency
|
|
674
|
+
if ticket.blocked_by_ticket_id:
|
|
675
|
+
if (
|
|
676
|
+
ticket.blocked_by
|
|
677
|
+
and ticket.blocked_by.state == TicketState.DONE.value
|
|
678
|
+
):
|
|
679
|
+
# Blocker is done, this ticket can be queued
|
|
680
|
+
logger.info(
|
|
681
|
+
f"Ticket {ticket.id} was blocked by {ticket.blocked_by_ticket_id} "
|
|
682
|
+
f"but blocker is now DONE, can proceed"
|
|
683
|
+
)
|
|
684
|
+
else:
|
|
685
|
+
# Blocker is not done, move ticket to BLOCKED state
|
|
686
|
+
blocker_title = (
|
|
687
|
+
ticket.blocked_by.title if ticket.blocked_by else "unknown"
|
|
688
|
+
)
|
|
689
|
+
logger.info(
|
|
690
|
+
f"Ticket {ticket.id} is blocked by incomplete ticket "
|
|
691
|
+
f"{ticket.blocked_by_ticket_id} ({blocker_title}), moving to BLOCKED"
|
|
692
|
+
)
|
|
693
|
+
ticket.state = TicketState.BLOCKED.value
|
|
694
|
+
blocked_count += 1
|
|
695
|
+
|
|
696
|
+
# Create event for the transition
|
|
697
|
+
event = TicketEvent(
|
|
698
|
+
ticket_id=ticket.id,
|
|
699
|
+
event_type=EventType.TRANSITIONED.value,
|
|
700
|
+
from_state=TicketState.PLANNED.value,
|
|
701
|
+
to_state=TicketState.BLOCKED.value,
|
|
702
|
+
actor_type=ActorType.PLANNER.value,
|
|
703
|
+
actor_id="planner",
|
|
704
|
+
reason=f"Blocked by incomplete ticket: {blocker_title}",
|
|
705
|
+
payload_json=json.dumps(
|
|
706
|
+
{
|
|
707
|
+
"blocked_by_ticket_id": ticket.blocked_by_ticket_id,
|
|
708
|
+
"blocked_by_title": blocker_title,
|
|
709
|
+
}
|
|
710
|
+
),
|
|
711
|
+
)
|
|
712
|
+
self.db.add(event)
|
|
713
|
+
continue # Skip to next ticket
|
|
714
|
+
|
|
715
|
+
queue_position += 1
|
|
716
|
+
|
|
717
|
+
# Create execute job
|
|
718
|
+
job = Job(
|
|
719
|
+
ticket_id=ticket.id,
|
|
720
|
+
board_id=ticket.board_id,
|
|
721
|
+
kind=JobKind.EXECUTE.value,
|
|
722
|
+
status=JobStatus.QUEUED.value,
|
|
723
|
+
)
|
|
724
|
+
self.db.add(job)
|
|
725
|
+
await self.db.flush()
|
|
726
|
+
await self.db.refresh(job)
|
|
727
|
+
|
|
728
|
+
# Create event
|
|
729
|
+
event = TicketEvent(
|
|
730
|
+
ticket_id=ticket.id,
|
|
731
|
+
event_type=EventType.COMMENT.value,
|
|
732
|
+
from_state=ticket.state,
|
|
733
|
+
to_state=ticket.state,
|
|
734
|
+
actor_type=ActorType.PLANNER.value,
|
|
735
|
+
actor_id="planner",
|
|
736
|
+
reason=f"Planner enqueued execute job (queue position: {queue_position})",
|
|
737
|
+
payload_json=json.dumps(
|
|
738
|
+
{
|
|
739
|
+
"action": "enqueued_execute",
|
|
740
|
+
"job_id": job.id,
|
|
741
|
+
"queue_position": queue_position,
|
|
742
|
+
}
|
|
743
|
+
),
|
|
744
|
+
)
|
|
745
|
+
self.db.add(event)
|
|
746
|
+
|
|
747
|
+
logger.info(
|
|
748
|
+
f"Planner created execute job {job.id} for ticket {ticket.id} "
|
|
749
|
+
f"(priority: {ticket.priority}, queue position: {queue_position})"
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
results.append(
|
|
753
|
+
(
|
|
754
|
+
PlannerAction(
|
|
755
|
+
action_type=PlannerActionType.ENQUEUED_EXECUTE,
|
|
756
|
+
ticket_id=ticket.id,
|
|
757
|
+
ticket_title=ticket.title,
|
|
758
|
+
details={"job_id": job.id, "queue_position": queue_position},
|
|
759
|
+
),
|
|
760
|
+
job.id,
|
|
761
|
+
)
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
logger.info(
|
|
765
|
+
f"Queued {len(results)} planned tickets for execution, "
|
|
766
|
+
f"{blocked_count} moved to BLOCKED due to dependencies"
|
|
767
|
+
)
|
|
768
|
+
return results
|
|
769
|
+
|
|
770
|
+
async def _unblock_ready_tickets(self) -> list[PlannerAction]:
|
|
771
|
+
"""Check BLOCKED tickets and unblock those whose blockers are now done.
|
|
772
|
+
|
|
773
|
+
This method runs during each tick to automatically transition tickets
|
|
774
|
+
from BLOCKED back to PLANNED when their blocking dependency is completed.
|
|
775
|
+
|
|
776
|
+
Returns:
|
|
777
|
+
List of PlannerActions for tickets that were unblocked.
|
|
778
|
+
"""
|
|
779
|
+
actions: list[PlannerAction] = []
|
|
780
|
+
|
|
781
|
+
# Find all BLOCKED tickets that have a blocked_by_ticket_id
|
|
782
|
+
blocked_result = await self.db.execute(
|
|
783
|
+
select(Ticket)
|
|
784
|
+
.where(
|
|
785
|
+
and_(
|
|
786
|
+
Ticket.state == TicketState.BLOCKED.value,
|
|
787
|
+
Ticket.blocked_by_ticket_id.isnot(None),
|
|
788
|
+
)
|
|
789
|
+
)
|
|
790
|
+
.options(selectinload(Ticket.blocked_by))
|
|
791
|
+
)
|
|
792
|
+
blocked_tickets = list(blocked_result.scalars().all())
|
|
793
|
+
|
|
794
|
+
for ticket in blocked_tickets:
|
|
795
|
+
# Check if the blocker is now done
|
|
796
|
+
if ticket.blocked_by and ticket.blocked_by.state == TicketState.DONE.value:
|
|
797
|
+
# Unblock: transition from BLOCKED to PLANNED
|
|
798
|
+
logger.info(
|
|
799
|
+
f"Unblocking ticket {ticket.id}: blocker {ticket.blocked_by_ticket_id} "
|
|
800
|
+
f"is now DONE"
|
|
801
|
+
)
|
|
802
|
+
old_state = ticket.state
|
|
803
|
+
ticket.state = TicketState.PLANNED.value
|
|
804
|
+
|
|
805
|
+
# Create event for the transition
|
|
806
|
+
event = TicketEvent(
|
|
807
|
+
ticket_id=ticket.id,
|
|
808
|
+
event_type=EventType.TRANSITIONED.value,
|
|
809
|
+
from_state=old_state,
|
|
810
|
+
to_state=TicketState.PLANNED.value,
|
|
811
|
+
actor_type=ActorType.PLANNER.value,
|
|
812
|
+
actor_id="planner",
|
|
813
|
+
reason=f"Unblocked: blocking ticket '{ticket.blocked_by.title}' is now done",
|
|
814
|
+
payload_json=json.dumps(
|
|
815
|
+
{
|
|
816
|
+
"blocker_ticket_id": ticket.blocked_by_ticket_id,
|
|
817
|
+
"blocker_title": ticket.blocked_by.title,
|
|
818
|
+
"action": "unblocked",
|
|
819
|
+
}
|
|
820
|
+
),
|
|
821
|
+
)
|
|
822
|
+
self.db.add(event)
|
|
823
|
+
|
|
824
|
+
actions.append(
|
|
825
|
+
PlannerAction(
|
|
826
|
+
action_type="unblocked",
|
|
827
|
+
ticket_id=ticket.id,
|
|
828
|
+
ticket_title=ticket.title,
|
|
829
|
+
details={
|
|
830
|
+
"blocker_ticket_id": ticket.blocked_by_ticket_id,
|
|
831
|
+
"blocker_title": ticket.blocked_by.title,
|
|
832
|
+
},
|
|
833
|
+
)
|
|
834
|
+
)
|
|
835
|
+
|
|
836
|
+
# Clear the dependency FK so UI stops showing the badge
|
|
837
|
+
ticket.blocked_by_ticket_id = None
|
|
838
|
+
|
|
839
|
+
if actions:
|
|
840
|
+
logger.info(f"Unblocked {len(actions)} tickets")
|
|
841
|
+
|
|
842
|
+
return actions
|
|
843
|
+
|
|
844
|
+
async def _enqueue_celery_job(self, job_id: str) -> None:
|
|
845
|
+
"""Enqueue a Celery task for a job (called AFTER commit).
|
|
846
|
+
|
|
847
|
+
This method is designed to be idempotent and failure-tolerant:
|
|
848
|
+
- If the Celery task fails to enqueue, the job watchdog will recover it
|
|
849
|
+
- If the DB update fails after Celery enqueue, the task ID is lost but
|
|
850
|
+
the watchdog will re-enqueue (Celery task deduplicates by job_id)
|
|
851
|
+
"""
|
|
852
|
+
from app.services.task_dispatch import enqueue_task
|
|
853
|
+
|
|
854
|
+
try:
|
|
855
|
+
# Re-fetch the job to update task ID
|
|
856
|
+
result = await self.db.execute(select(Job).where(Job.id == job_id))
|
|
857
|
+
job = result.scalar_one_or_none()
|
|
858
|
+
if not job:
|
|
859
|
+
logger.error(f"Job {job_id} not found when enqueueing task")
|
|
860
|
+
return
|
|
861
|
+
|
|
862
|
+
# Skip if job already has a task ID (idempotency)
|
|
863
|
+
if job.celery_task_id:
|
|
864
|
+
logger.debug(f"Job {job_id} already has task {job.celery_task_id}")
|
|
865
|
+
return
|
|
866
|
+
|
|
867
|
+
# Enqueue the task via unified dispatch
|
|
868
|
+
task = enqueue_task("execute_ticket", args=[job_id])
|
|
869
|
+
job.celery_task_id = task.id
|
|
870
|
+
await self.db.commit()
|
|
871
|
+
|
|
872
|
+
logger.info(f"Enqueued task {task.id} for job {job_id}")
|
|
873
|
+
except Exception as e:
|
|
874
|
+
logger.error(f"Failed to enqueue Celery task for job {job_id}: {e}")
|
|
875
|
+
# Don't re-raise - the watchdog will recover this job
|
|
876
|
+
# Rolling back to ensure clean session state
|
|
877
|
+
await self.db.rollback()
|
|
878
|
+
|
|
879
|
+
# =========================================================================
|
|
880
|
+
# BLOCKED TICKET HANDLING (with caps)
|
|
881
|
+
# =========================================================================
|
|
882
|
+
|
|
883
|
+
async def _handle_blocked_tickets(self) -> list[PlannerAction]:
|
|
884
|
+
"""Generate follow-up proposals for blocked tickets.
|
|
885
|
+
|
|
886
|
+
Enforces caps:
|
|
887
|
+
- max_followups_per_ticket: Skip tickets that already have this many follow-ups
|
|
888
|
+
- max_followups_per_tick: Stop creating follow-ups after this limit
|
|
889
|
+
- skip_followup_reasons: Skip certain blocker reasons
|
|
890
|
+
|
|
891
|
+
Returns:
|
|
892
|
+
List of PlannerActions for follow-ups created.
|
|
893
|
+
"""
|
|
894
|
+
actions: list[PlannerAction] = []
|
|
895
|
+
followups_created_this_tick = 0
|
|
896
|
+
|
|
897
|
+
# Find blocked tickets
|
|
898
|
+
blocked_result = await self.db.execute(
|
|
899
|
+
select(Ticket)
|
|
900
|
+
.where(Ticket.state == TicketState.BLOCKED.value)
|
|
901
|
+
.options(selectinload(Ticket.goal), selectinload(Ticket.events))
|
|
902
|
+
)
|
|
903
|
+
blocked_tickets = blocked_result.scalars().all()
|
|
904
|
+
|
|
905
|
+
for ticket in blocked_tickets:
|
|
906
|
+
# Cap: max follow-ups per tick
|
|
907
|
+
if followups_created_this_tick >= self.config.max_followups_per_tick:
|
|
908
|
+
logger.debug(
|
|
909
|
+
f"Hit max_followups_per_tick ({self.config.max_followups_per_tick}), stopping"
|
|
910
|
+
)
|
|
911
|
+
break
|
|
912
|
+
|
|
913
|
+
# Cap: count existing follow-ups for this ticket
|
|
914
|
+
existing_followup_count = sum(
|
|
915
|
+
1
|
|
916
|
+
for event in ticket.events
|
|
917
|
+
if event.payload_json and FOLLOWUP_MARKER in event.payload_json
|
|
918
|
+
)
|
|
919
|
+
if existing_followup_count >= self.config.max_followups_per_ticket:
|
|
920
|
+
logger.debug(
|
|
921
|
+
f"Ticket {ticket.id} already has {existing_followup_count} follow-ups "
|
|
922
|
+
f"(max {self.config.max_followups_per_ticket}), skipping"
|
|
923
|
+
)
|
|
924
|
+
continue
|
|
925
|
+
|
|
926
|
+
# Get the blocker reason and payload from the most recent blocking event
|
|
927
|
+
blocker_reason = None
|
|
928
|
+
blocker_payload = {}
|
|
929
|
+
for event in reversed(ticket.events):
|
|
930
|
+
if event.to_state == TicketState.BLOCKED.value and event.reason:
|
|
931
|
+
blocker_reason = event.reason
|
|
932
|
+
if event.payload_json:
|
|
933
|
+
try:
|
|
934
|
+
blocker_payload = json.loads(event.payload_json)
|
|
935
|
+
except (json.JSONDecodeError, TypeError):
|
|
936
|
+
blocker_payload = {}
|
|
937
|
+
break
|
|
938
|
+
|
|
939
|
+
# Skip: tickets with skip_followup flag (no changes needed)
|
|
940
|
+
if blocker_payload.get("skip_followup"):
|
|
941
|
+
logger.debug(
|
|
942
|
+
f"Skipping follow-up for ticket {ticket.id}: "
|
|
943
|
+
f"skip_followup flag is set (no changes needed)"
|
|
944
|
+
)
|
|
945
|
+
continue
|
|
946
|
+
|
|
947
|
+
# Skip: tickets that already have a manual work follow-up
|
|
948
|
+
if blocker_payload.get("manual_work_followup_id"):
|
|
949
|
+
logger.debug(
|
|
950
|
+
f"Skipping follow-up for ticket {ticket.id}: "
|
|
951
|
+
f"already has manual work follow-up {blocker_payload.get('manual_work_followup_id')}"
|
|
952
|
+
)
|
|
953
|
+
continue
|
|
954
|
+
|
|
955
|
+
# Skip: certain blocker reasons should not trigger follow-ups
|
|
956
|
+
if blocker_reason and self._should_skip_followup(blocker_reason):
|
|
957
|
+
logger.debug(
|
|
958
|
+
f"Skipping follow-up for ticket {ticket.id}: "
|
|
959
|
+
f"blocker reason '{blocker_reason}' is in skip list"
|
|
960
|
+
)
|
|
961
|
+
continue
|
|
962
|
+
|
|
963
|
+
# Fetch sibling ticket titles in the same goal to avoid duplicates
|
|
964
|
+
sibling_titles: list[str] = []
|
|
965
|
+
if ticket.goal_id:
|
|
966
|
+
sibling_result = await self.db.execute(
|
|
967
|
+
select(Ticket.title).where(
|
|
968
|
+
and_(
|
|
969
|
+
Ticket.goal_id == ticket.goal_id,
|
|
970
|
+
Ticket.id != ticket.id,
|
|
971
|
+
)
|
|
972
|
+
)
|
|
973
|
+
)
|
|
974
|
+
sibling_titles = [row[0] for row in sibling_result.fetchall()]
|
|
975
|
+
|
|
976
|
+
# Generate follow-up proposal using LLM
|
|
977
|
+
try:
|
|
978
|
+
proposal = await self._generate_followup_proposal(
|
|
979
|
+
ticket_title=ticket.title,
|
|
980
|
+
ticket_description=ticket.description,
|
|
981
|
+
blocker_reason=blocker_reason,
|
|
982
|
+
goal_title=ticket.goal.title if ticket.goal else None,
|
|
983
|
+
goal_description=ticket.goal.description if ticket.goal else None,
|
|
984
|
+
existing_ticket_titles=sibling_titles,
|
|
985
|
+
)
|
|
986
|
+
except Exception as e:
|
|
987
|
+
logger.error(
|
|
988
|
+
f"Failed to generate follow-up for ticket {ticket.id}: {e}"
|
|
989
|
+
)
|
|
990
|
+
continue
|
|
991
|
+
|
|
992
|
+
# Determine initial state: auto-approve if goal has autonomy enabled
|
|
993
|
+
initial_state = TicketState.PROPOSED.value
|
|
994
|
+
auto_approved_followup = False
|
|
995
|
+
try:
|
|
996
|
+
goal = ticket.goal
|
|
997
|
+
if goal and goal.autonomy_enabled and goal.auto_approve_followups:
|
|
998
|
+
initial_state = TicketState.PLANNED.value
|
|
999
|
+
auto_approved_followup = True
|
|
1000
|
+
except Exception:
|
|
1001
|
+
pass
|
|
1002
|
+
|
|
1003
|
+
# Create follow-up ticket
|
|
1004
|
+
followup_ticket = Ticket(
|
|
1005
|
+
goal_id=ticket.goal_id,
|
|
1006
|
+
board_id=ticket.board_id,
|
|
1007
|
+
title=proposal.title,
|
|
1008
|
+
description=proposal.description,
|
|
1009
|
+
state=initial_state,
|
|
1010
|
+
priority=ticket.priority, # Inherit priority
|
|
1011
|
+
)
|
|
1012
|
+
self.db.add(followup_ticket)
|
|
1013
|
+
await self.db.flush()
|
|
1014
|
+
await self.db.refresh(followup_ticket)
|
|
1015
|
+
|
|
1016
|
+
# Create creation event for follow-up ticket (with parent link)
|
|
1017
|
+
creation_event = TicketEvent(
|
|
1018
|
+
ticket_id=followup_ticket.id,
|
|
1019
|
+
event_type=EventType.CREATED.value,
|
|
1020
|
+
from_state=None,
|
|
1021
|
+
to_state=initial_state,
|
|
1022
|
+
actor_type=ActorType.PLANNER.value,
|
|
1023
|
+
actor_id="planner",
|
|
1024
|
+
reason=f"Follow-up for blocked ticket: {ticket.title}",
|
|
1025
|
+
payload_json=json.dumps(
|
|
1026
|
+
{
|
|
1027
|
+
"parent_ticket_id": ticket.id, # Link to blocked ticket
|
|
1028
|
+
"blocked_ticket_id": ticket.id, # Legacy field
|
|
1029
|
+
"verification": proposal.verification,
|
|
1030
|
+
"auto_approved": auto_approved_followup,
|
|
1031
|
+
}
|
|
1032
|
+
),
|
|
1033
|
+
)
|
|
1034
|
+
self.db.add(creation_event)
|
|
1035
|
+
|
|
1036
|
+
# Record autonomy event if auto-approved
|
|
1037
|
+
if auto_approved_followup:
|
|
1038
|
+
autonomy_event = TicketEvent(
|
|
1039
|
+
ticket_id=followup_ticket.id,
|
|
1040
|
+
event_type=EventType.TRANSITIONED.value,
|
|
1041
|
+
from_state=TicketState.PROPOSED.value,
|
|
1042
|
+
to_state=TicketState.PLANNED.value,
|
|
1043
|
+
actor_type=ActorType.SYSTEM.value,
|
|
1044
|
+
actor_id="autonomy_service",
|
|
1045
|
+
reason="Auto-approved follow-up ticket (autonomy mode)",
|
|
1046
|
+
payload_json=json.dumps({"autonomy_action": "approve_followup"}),
|
|
1047
|
+
)
|
|
1048
|
+
self.db.add(autonomy_event)
|
|
1049
|
+
|
|
1050
|
+
# Create event on blocked ticket noting the follow-up
|
|
1051
|
+
link_event = TicketEvent(
|
|
1052
|
+
ticket_id=ticket.id,
|
|
1053
|
+
event_type=EventType.COMMENT.value,
|
|
1054
|
+
from_state=ticket.state,
|
|
1055
|
+
to_state=ticket.state,
|
|
1056
|
+
actor_type=ActorType.PLANNER.value,
|
|
1057
|
+
actor_id="planner",
|
|
1058
|
+
reason=f"Created follow-up ticket: {followup_ticket.title}",
|
|
1059
|
+
payload_json=json.dumps(
|
|
1060
|
+
{
|
|
1061
|
+
FOLLOWUP_MARKER: True,
|
|
1062
|
+
"followup_ticket_id": followup_ticket.id,
|
|
1063
|
+
}
|
|
1064
|
+
),
|
|
1065
|
+
)
|
|
1066
|
+
self.db.add(link_event)
|
|
1067
|
+
|
|
1068
|
+
followups_created_this_tick += 1
|
|
1069
|
+
logger.info(
|
|
1070
|
+
f"Created follow-up ticket {followup_ticket.id} for blocked ticket {ticket.id}"
|
|
1071
|
+
)
|
|
1072
|
+
|
|
1073
|
+
add_orchestrator_log(
|
|
1074
|
+
"INFO",
|
|
1075
|
+
f"Follow-up created: '{proposal.title}'",
|
|
1076
|
+
{
|
|
1077
|
+
"followup_ticket_id": followup_ticket.id,
|
|
1078
|
+
"blocked_ticket_id": ticket.id,
|
|
1079
|
+
"blocked_ticket_title": ticket.title,
|
|
1080
|
+
"blocker_reason": blocker_reason,
|
|
1081
|
+
"existing_siblings": len(sibling_titles),
|
|
1082
|
+
},
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
actions.append(
|
|
1086
|
+
PlannerAction(
|
|
1087
|
+
action_type=PlannerActionType.PROPOSED_FOLLOWUP,
|
|
1088
|
+
ticket_id=ticket.id,
|
|
1089
|
+
ticket_title=ticket.title,
|
|
1090
|
+
details={
|
|
1091
|
+
"followup_ticket_id": followup_ticket.id,
|
|
1092
|
+
"followup_title": proposal.title,
|
|
1093
|
+
"parent_ticket_id": ticket.id,
|
|
1094
|
+
},
|
|
1095
|
+
)
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
return actions
|
|
1099
|
+
|
|
1100
|
+
async def _generate_followup_proposal(
|
|
1101
|
+
self,
|
|
1102
|
+
ticket_title: str,
|
|
1103
|
+
ticket_description: str | None,
|
|
1104
|
+
blocker_reason: str | None,
|
|
1105
|
+
goal_title: str | None = None,
|
|
1106
|
+
goal_description: str | None = None,
|
|
1107
|
+
existing_ticket_titles: list[str] | None = None,
|
|
1108
|
+
) -> FollowUpProposal:
|
|
1109
|
+
"""Generate a follow-up ticket proposal for a blocked ticket using LLM.
|
|
1110
|
+
|
|
1111
|
+
Uses asyncio.to_thread() to avoid blocking the event loop during LLM calls.
|
|
1112
|
+
"""
|
|
1113
|
+
import asyncio
|
|
1114
|
+
|
|
1115
|
+
context_parts = []
|
|
1116
|
+
if goal_title:
|
|
1117
|
+
context_parts.append(f"Goal: {goal_title}")
|
|
1118
|
+
if goal_description:
|
|
1119
|
+
context_parts.append(f"Goal description: {goal_description}")
|
|
1120
|
+
context_parts.append(f"Blocked ticket: {ticket_title}")
|
|
1121
|
+
if ticket_description:
|
|
1122
|
+
context_parts.append(f"Ticket description: {ticket_description}")
|
|
1123
|
+
if blocker_reason:
|
|
1124
|
+
context_parts.append(f"Blocker reason: {blocker_reason}")
|
|
1125
|
+
|
|
1126
|
+
# Include existing tickets so LLM avoids duplicates
|
|
1127
|
+
existing_section = ""
|
|
1128
|
+
if existing_ticket_titles:
|
|
1129
|
+
ticket_list = "\n".join(f"- {t}" for t in existing_ticket_titles)
|
|
1130
|
+
existing_section = f"""
|
|
1131
|
+
|
|
1132
|
+
## Existing Tickets (DO NOT DUPLICATE)
|
|
1133
|
+
These tickets already exist in the same goal. Do NOT create a follow-up that overlaps with any of these:
|
|
1134
|
+
{ticket_list}"""
|
|
1135
|
+
|
|
1136
|
+
context = "\n".join(context_parts)
|
|
1137
|
+
|
|
1138
|
+
system_prompt = """You are a technical project planner. Given a blocked ticket, propose a follow-up ticket that addresses the blocker.
|
|
1139
|
+
|
|
1140
|
+
Your response MUST be valid JSON with this exact structure:
|
|
1141
|
+
{
|
|
1142
|
+
"title": "Short, actionable title for the follow-up ticket",
|
|
1143
|
+
"description": "Clear description of what needs to be done to unblock the original ticket",
|
|
1144
|
+
"verification": ["command1", "command2"]
|
|
1145
|
+
}
|
|
1146
|
+
|
|
1147
|
+
Guidelines:
|
|
1148
|
+
- The title should be concise and action-oriented
|
|
1149
|
+
- The description should explain what specifically needs to be done
|
|
1150
|
+
- Verification commands should be shell commands that can verify the follow-up is complete
|
|
1151
|
+
- Focus on the immediate blocker, not the entire original ticket
|
|
1152
|
+
- Do NOT create a ticket that duplicates an existing one"""
|
|
1153
|
+
|
|
1154
|
+
user_prompt = f"""A ticket is blocked and needs a follow-up ticket to address the blocker.
|
|
1155
|
+
|
|
1156
|
+
{context}{existing_section}
|
|
1157
|
+
|
|
1158
|
+
Generate a follow-up ticket proposal as JSON."""
|
|
1159
|
+
|
|
1160
|
+
def _blocking_llm_call():
|
|
1161
|
+
"""Execute blocking LLM call in thread pool."""
|
|
1162
|
+
return self.llm_service.call_completion(
|
|
1163
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
1164
|
+
max_tokens=self.config.max_tokens_followup,
|
|
1165
|
+
system_prompt=system_prompt,
|
|
1166
|
+
)
|
|
1167
|
+
|
|
1168
|
+
try:
|
|
1169
|
+
# Run blocking LLM call in thread pool to avoid blocking event loop
|
|
1170
|
+
response = await asyncio.to_thread(_blocking_llm_call)
|
|
1171
|
+
data = self.llm_service.safe_parse_json(response.content, {})
|
|
1172
|
+
|
|
1173
|
+
return FollowUpProposal(
|
|
1174
|
+
title=data.get("title", "Follow-up for blocked ticket"),
|
|
1175
|
+
description=data.get(
|
|
1176
|
+
"description", "Address the blocker from the original ticket."
|
|
1177
|
+
),
|
|
1178
|
+
verification=data.get("verification", []),
|
|
1179
|
+
)
|
|
1180
|
+
except Exception as e:
|
|
1181
|
+
logger.error(f"LLM API call failed: {e}")
|
|
1182
|
+
# Return a fallback proposal
|
|
1183
|
+
return FollowUpProposal(
|
|
1184
|
+
title=f"Follow-up: {ticket_title}",
|
|
1185
|
+
description=f"Address blocker: {blocker_reason or 'Unknown blocker'}",
|
|
1186
|
+
verification=[],
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
def _should_skip_followup(self, blocker_reason: str) -> bool:
|
|
1190
|
+
"""Check if this blocker reason should skip follow-up generation."""
|
|
1191
|
+
reason_lower = blocker_reason.lower()
|
|
1192
|
+
for skip_reason in self.config.skip_followup_reasons:
|
|
1193
|
+
if skip_reason.lower() in reason_lower:
|
|
1194
|
+
return True
|
|
1195
|
+
return False
|
|
1196
|
+
|
|
1197
|
+
# =========================================================================
|
|
1198
|
+
# REFLECTION GENERATION
|
|
1199
|
+
# =========================================================================
|
|
1200
|
+
|
|
1201
|
+
async def _generate_reflections(self) -> list[PlannerAction]:
|
|
1202
|
+
"""Generate reflection summaries for completed tickets.
|
|
1203
|
+
|
|
1204
|
+
Reflections are stored as TicketEvents (type=COMMENT), never in ticket text.
|
|
1205
|
+
This keeps ticket data clean and reflections as evidence.
|
|
1206
|
+
|
|
1207
|
+
Returns:
|
|
1208
|
+
List of PlannerActions for reflections generated.
|
|
1209
|
+
"""
|
|
1210
|
+
actions: list[PlannerAction] = []
|
|
1211
|
+
|
|
1212
|
+
# Find done tickets
|
|
1213
|
+
done_result = await self.db.execute(
|
|
1214
|
+
select(Ticket)
|
|
1215
|
+
.where(Ticket.state == TicketState.DONE.value)
|
|
1216
|
+
.options(selectinload(Ticket.events), selectinload(Ticket.evidence))
|
|
1217
|
+
)
|
|
1218
|
+
done_tickets = done_result.scalars().all()
|
|
1219
|
+
|
|
1220
|
+
for ticket in done_tickets:
|
|
1221
|
+
# Check if this ticket already has a reflection
|
|
1222
|
+
has_reflection = any(
|
|
1223
|
+
event.payload_json and REFLECTION_MARKER in event.payload_json
|
|
1224
|
+
for event in ticket.events
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
if has_reflection:
|
|
1228
|
+
logger.debug(f"Ticket {ticket.id} already has a reflection")
|
|
1229
|
+
continue
|
|
1230
|
+
|
|
1231
|
+
# Build events summary
|
|
1232
|
+
events_summary = self._summarize_events(ticket.events)
|
|
1233
|
+
|
|
1234
|
+
# Build evidence summary
|
|
1235
|
+
evidence_summary = self._summarize_evidence(ticket.evidence)
|
|
1236
|
+
|
|
1237
|
+
# Generate reflection using LLM
|
|
1238
|
+
try:
|
|
1239
|
+
reflection = await self._generate_reflection_summary(
|
|
1240
|
+
ticket_title=ticket.title,
|
|
1241
|
+
ticket_description=ticket.description,
|
|
1242
|
+
events_summary=events_summary,
|
|
1243
|
+
evidence_summary=evidence_summary,
|
|
1244
|
+
)
|
|
1245
|
+
except Exception as e:
|
|
1246
|
+
logger.error(
|
|
1247
|
+
f"Failed to generate reflection for ticket {ticket.id}: {e}"
|
|
1248
|
+
)
|
|
1249
|
+
continue
|
|
1250
|
+
|
|
1251
|
+
# Create reflection event (NEVER modify ticket text)
|
|
1252
|
+
reflection_event = TicketEvent(
|
|
1253
|
+
ticket_id=ticket.id,
|
|
1254
|
+
event_type=REFLECTION_EVENT_TYPE,
|
|
1255
|
+
from_state=ticket.state,
|
|
1256
|
+
to_state=ticket.state,
|
|
1257
|
+
actor_type=ActorType.PLANNER.value,
|
|
1258
|
+
actor_id="planner",
|
|
1259
|
+
reason=reflection.summary,
|
|
1260
|
+
payload_json=json.dumps(
|
|
1261
|
+
{
|
|
1262
|
+
REFLECTION_MARKER: True,
|
|
1263
|
+
"type": "reflection_added",
|
|
1264
|
+
}
|
|
1265
|
+
),
|
|
1266
|
+
)
|
|
1267
|
+
self.db.add(reflection_event)
|
|
1268
|
+
|
|
1269
|
+
logger.info(f"Generated reflection for ticket {ticket.id}")
|
|
1270
|
+
|
|
1271
|
+
actions.append(
|
|
1272
|
+
PlannerAction(
|
|
1273
|
+
action_type=PlannerActionType.GENERATED_REFLECTION,
|
|
1274
|
+
ticket_id=ticket.id,
|
|
1275
|
+
ticket_title=ticket.title,
|
|
1276
|
+
details={"summary": reflection.summary},
|
|
1277
|
+
)
|
|
1278
|
+
)
|
|
1279
|
+
|
|
1280
|
+
return actions
|
|
1281
|
+
|
|
1282
|
+
async def _auto_merge_done_tickets(self) -> list[PlannerAction]:
|
|
1283
|
+
"""Auto-merge DONE tickets where goal has auto_merge enabled.
|
|
1284
|
+
|
|
1285
|
+
Scans for DONE tickets whose goals have autonomy_enabled + auto_merge,
|
|
1286
|
+
and where a workspace still exists (not yet merged).
|
|
1287
|
+
|
|
1288
|
+
Returns:
|
|
1289
|
+
List of PlannerActions for merge results.
|
|
1290
|
+
"""
|
|
1291
|
+
from app.services.merge_service import MergeService
|
|
1292
|
+
|
|
1293
|
+
actions: list[PlannerAction] = []
|
|
1294
|
+
|
|
1295
|
+
# Find DONE tickets with active workspaces
|
|
1296
|
+
done_result = await self.db.execute(
|
|
1297
|
+
select(Ticket)
|
|
1298
|
+
.where(Ticket.state == TicketState.DONE.value)
|
|
1299
|
+
.options(
|
|
1300
|
+
selectinload(Ticket.workspace),
|
|
1301
|
+
selectinload(Ticket.goal),
|
|
1302
|
+
selectinload(Ticket.revisions),
|
|
1303
|
+
selectinload(Ticket.events),
|
|
1304
|
+
)
|
|
1305
|
+
)
|
|
1306
|
+
done_tickets = done_result.scalars().all()
|
|
1307
|
+
|
|
1308
|
+
for ticket in done_tickets:
|
|
1309
|
+
# Skip if no active workspace (already merged or cleaned up)
|
|
1310
|
+
if not ticket.workspace or not ticket.workspace.is_active:
|
|
1311
|
+
continue
|
|
1312
|
+
|
|
1313
|
+
# Skip if goal doesn't have auto_merge enabled
|
|
1314
|
+
goal = ticket.goal
|
|
1315
|
+
if not goal or not goal.autonomy_enabled or not goal.auto_merge:
|
|
1316
|
+
continue
|
|
1317
|
+
|
|
1318
|
+
# Skip if already merged (check events)
|
|
1319
|
+
already_merged = any(
|
|
1320
|
+
e.event_type == EventType.MERGE_SUCCEEDED.value for e in ticket.events
|
|
1321
|
+
)
|
|
1322
|
+
if already_merged:
|
|
1323
|
+
continue
|
|
1324
|
+
|
|
1325
|
+
# Skip if no approved revision
|
|
1326
|
+
from app.models.revision import RevisionStatus
|
|
1327
|
+
|
|
1328
|
+
has_approved = any(
|
|
1329
|
+
r.status == RevisionStatus.APPROVED.value for r in ticket.revisions
|
|
1330
|
+
)
|
|
1331
|
+
if not has_approved:
|
|
1332
|
+
# Auto-approve the latest open revision if autonomy allows it
|
|
1333
|
+
open_revision = next(
|
|
1334
|
+
(r for r in ticket.revisions if r.status == "open"),
|
|
1335
|
+
None,
|
|
1336
|
+
)
|
|
1337
|
+
if open_revision:
|
|
1338
|
+
open_revision.status = RevisionStatus.APPROVED.value
|
|
1339
|
+
await self.db.flush()
|
|
1340
|
+
logger.info(
|
|
1341
|
+
f"Auto-approved revision {open_revision.id} for ticket {ticket.id}"
|
|
1342
|
+
)
|
|
1343
|
+
else:
|
|
1344
|
+
continue
|
|
1345
|
+
|
|
1346
|
+
try:
|
|
1347
|
+
merge_service = MergeService(self.db)
|
|
1348
|
+
merge_result = await merge_service.merge_ticket(
|
|
1349
|
+
ticket_id=ticket.id,
|
|
1350
|
+
actor_id="autonomy_service",
|
|
1351
|
+
)
|
|
1352
|
+
|
|
1353
|
+
if merge_result.success:
|
|
1354
|
+
actions.append(
|
|
1355
|
+
PlannerAction(
|
|
1356
|
+
action_type=PlannerActionType.SKIPPED, # No specific merge type exists
|
|
1357
|
+
ticket_id=ticket.id,
|
|
1358
|
+
ticket_title=ticket.title,
|
|
1359
|
+
details={
|
|
1360
|
+
"action": "auto_merge",
|
|
1361
|
+
"success": True,
|
|
1362
|
+
"message": merge_result.message,
|
|
1363
|
+
},
|
|
1364
|
+
)
|
|
1365
|
+
)
|
|
1366
|
+
logger.info(
|
|
1367
|
+
f"Auto-merged ticket {ticket.id}: {merge_result.message}"
|
|
1368
|
+
)
|
|
1369
|
+
else:
|
|
1370
|
+
# Auto-merge failed — guard with state machine validation
|
|
1371
|
+
from app.state_machine import validate_transition
|
|
1372
|
+
|
|
1373
|
+
if validate_transition(
|
|
1374
|
+
TicketState.DONE.value, TicketState.BLOCKED.value
|
|
1375
|
+
):
|
|
1376
|
+
ticket.state = TicketState.BLOCKED.value
|
|
1377
|
+
blocked_event = TicketEvent(
|
|
1378
|
+
ticket_id=ticket.id,
|
|
1379
|
+
event_type=EventType.TRANSITIONED.value,
|
|
1380
|
+
from_state=TicketState.DONE.value,
|
|
1381
|
+
to_state=TicketState.BLOCKED.value,
|
|
1382
|
+
actor_type=ActorType.SYSTEM.value,
|
|
1383
|
+
actor_id="autonomy_service",
|
|
1384
|
+
reason=f"Auto-merge failed: {merge_result.message}",
|
|
1385
|
+
payload_json=json.dumps(
|
|
1386
|
+
{
|
|
1387
|
+
"autonomy_action": "auto_merge_failed",
|
|
1388
|
+
"merge_error": merge_result.message,
|
|
1389
|
+
}
|
|
1390
|
+
),
|
|
1391
|
+
)
|
|
1392
|
+
self.db.add(blocked_event)
|
|
1393
|
+
else:
|
|
1394
|
+
# DONE → BLOCKED is not a valid transition;
|
|
1395
|
+
# leave ticket in DONE, log warning for manual merge
|
|
1396
|
+
logger.warning(
|
|
1397
|
+
f"Auto-merge failed for ticket {ticket.id}: "
|
|
1398
|
+
f"{merge_result.message}. Cannot transition "
|
|
1399
|
+
f"DONE → BLOCKED. Manual merge required."
|
|
1400
|
+
)
|
|
1401
|
+
logger.warning(
|
|
1402
|
+
f"Auto-merge failed for ticket {ticket.id}: {merge_result.message}"
|
|
1403
|
+
)
|
|
1404
|
+
|
|
1405
|
+
except Exception as e:
|
|
1406
|
+
logger.error(f"Auto-merge error for ticket {ticket.id}: {e}")
|
|
1407
|
+
|
|
1408
|
+
return actions
|
|
1409
|
+
|
|
1410
|
+
async def _udar_incremental_replan(self) -> list[PlannerAction]:
|
|
1411
|
+
"""UDAR incremental replanning: analyze completed tickets and generate follow-ups.
|
|
1412
|
+
|
|
1413
|
+
COST OPTIMIZATION:
|
|
1414
|
+
- Batches tickets (waits for 5 tickets before analyzing)
|
|
1415
|
+
- Only calls LLM if changes significant (>10 files OR verification failed)
|
|
1416
|
+
- Target: 1 LLM call per 5 completed tickets (or 0 if minor changes)
|
|
1417
|
+
|
|
1418
|
+
Returns:
|
|
1419
|
+
List of PlannerActions for follow-ups generated.
|
|
1420
|
+
"""
|
|
1421
|
+
from datetime import datetime, timedelta
|
|
1422
|
+
|
|
1423
|
+
from app.services.udar_planner_service import UDARPlannerService
|
|
1424
|
+
|
|
1425
|
+
actions: list[PlannerAction] = []
|
|
1426
|
+
|
|
1427
|
+
# Find recently completed tickets (last 30 minutes, not yet analyzed)
|
|
1428
|
+
# Note: We track analyzed status in ticket metadata
|
|
1429
|
+
recent_cutoff = datetime.utcnow() - timedelta(minutes=30)
|
|
1430
|
+
done_result = await self.db.execute(
|
|
1431
|
+
select(Ticket).where(
|
|
1432
|
+
Ticket.state == TicketState.DONE.value,
|
|
1433
|
+
Ticket.updated_at >= recent_cutoff,
|
|
1434
|
+
)
|
|
1435
|
+
)
|
|
1436
|
+
recent_done = done_result.scalars().all()
|
|
1437
|
+
|
|
1438
|
+
# Filter to only unanalyzed tickets
|
|
1439
|
+
unanalyzed = [
|
|
1440
|
+
t
|
|
1441
|
+
for t in recent_done
|
|
1442
|
+
if not (t.metadata_ and t.metadata_.get("udar_analyzed_at"))
|
|
1443
|
+
]
|
|
1444
|
+
|
|
1445
|
+
if not unanalyzed:
|
|
1446
|
+
logger.debug("No unanalyzed completed tickets for UDAR replanning")
|
|
1447
|
+
return actions
|
|
1448
|
+
|
|
1449
|
+
# Check if batch size reached (from config)
|
|
1450
|
+
# This prevents frequent small LLM calls
|
|
1451
|
+
batch_size = self.config.udar.replan_batch_size
|
|
1452
|
+
if len(unanalyzed) < batch_size:
|
|
1453
|
+
logger.debug(
|
|
1454
|
+
f"UDAR replanning: Only {len(unanalyzed)} tickets, waiting for batch of {batch_size}"
|
|
1455
|
+
)
|
|
1456
|
+
return actions
|
|
1457
|
+
|
|
1458
|
+
# Take up to batch_size tickets for analysis
|
|
1459
|
+
tickets_to_analyze = unanalyzed[:batch_size]
|
|
1460
|
+
ticket_ids = [t.id for t in tickets_to_analyze]
|
|
1461
|
+
|
|
1462
|
+
logger.info(
|
|
1463
|
+
f"UDAR replanning: Analyzing batch of {len(ticket_ids)} completed tickets"
|
|
1464
|
+
)
|
|
1465
|
+
|
|
1466
|
+
# Call UDAR agent for batched replanning
|
|
1467
|
+
udar_service = UDARPlannerService(self.db)
|
|
1468
|
+
try:
|
|
1469
|
+
result = await udar_service.replan_after_completion(ticket_ids)
|
|
1470
|
+
|
|
1471
|
+
logger.info(
|
|
1472
|
+
f"UDAR replanning: {result['summary']} "
|
|
1473
|
+
f"(LLM calls: {result['llm_calls_made']})"
|
|
1474
|
+
)
|
|
1475
|
+
|
|
1476
|
+
# Mark tickets as analyzed to avoid duplicate analysis
|
|
1477
|
+
for ticket in tickets_to_analyze:
|
|
1478
|
+
if not ticket.metadata_:
|
|
1479
|
+
ticket.metadata_ = {}
|
|
1480
|
+
ticket.metadata_["udar_analyzed_at"] = datetime.utcnow().isoformat()
|
|
1481
|
+
ticket.metadata_["udar_batch_id"] = ticket_ids[0] # Track batch
|
|
1482
|
+
|
|
1483
|
+
# Create PlannerActions for created follow-ups
|
|
1484
|
+
if result["follow_ups_created"] > 0:
|
|
1485
|
+
actions.append(
|
|
1486
|
+
PlannerAction(
|
|
1487
|
+
action_type=PlannerActionType.PROPOSED_FOLLOWUP,
|
|
1488
|
+
ticket_id=ticket_ids[0], # Reference first ticket in batch
|
|
1489
|
+
ticket_title=f"Batch of {len(ticket_ids)} tickets",
|
|
1490
|
+
details={
|
|
1491
|
+
"follow_ups_created": result["follow_ups_created"],
|
|
1492
|
+
"tickets_analyzed": result["tickets_analyzed"],
|
|
1493
|
+
"significant_tickets": result["significant_tickets"],
|
|
1494
|
+
"llm_calls": result["llm_calls_made"],
|
|
1495
|
+
"batch_size": len(ticket_ids),
|
|
1496
|
+
},
|
|
1497
|
+
)
|
|
1498
|
+
)
|
|
1499
|
+
|
|
1500
|
+
except Exception as e:
|
|
1501
|
+
logger.error(f"UDAR replanning failed: {e}")
|
|
1502
|
+
# Don't fail the entire tick, just log the error
|
|
1503
|
+
|
|
1504
|
+
return actions
|
|
1505
|
+
|
|
1506
|
+
async def _generate_reflection_summary(
|
|
1507
|
+
self,
|
|
1508
|
+
ticket_title: str,
|
|
1509
|
+
ticket_description: str | None,
|
|
1510
|
+
events_summary: str | None = None,
|
|
1511
|
+
evidence_summary: str | None = None,
|
|
1512
|
+
) -> ReflectionSummary:
|
|
1513
|
+
"""Generate a reflection summary for a completed ticket using LLM.
|
|
1514
|
+
|
|
1515
|
+
Uses asyncio.to_thread() to avoid blocking the event loop during LLM calls.
|
|
1516
|
+
"""
|
|
1517
|
+
import asyncio
|
|
1518
|
+
|
|
1519
|
+
context_parts = [f"Ticket: {ticket_title}"]
|
|
1520
|
+
if ticket_description:
|
|
1521
|
+
context_parts.append(f"Description: {ticket_description}")
|
|
1522
|
+
if events_summary:
|
|
1523
|
+
context_parts.append(f"Journey: {events_summary}")
|
|
1524
|
+
if evidence_summary:
|
|
1525
|
+
context_parts.append(f"Evidence: {evidence_summary}")
|
|
1526
|
+
|
|
1527
|
+
context = "\n".join(context_parts)
|
|
1528
|
+
|
|
1529
|
+
system_prompt = """You are a technical project assistant. Generate a brief reflection summary for a completed ticket.
|
|
1530
|
+
|
|
1531
|
+
Your response MUST be valid JSON with this exact structure:
|
|
1532
|
+
{
|
|
1533
|
+
"summary": "A concise 2-3 sentence reflection on what was accomplished and any lessons learned"
|
|
1534
|
+
}
|
|
1535
|
+
|
|
1536
|
+
Guidelines:
|
|
1537
|
+
- Keep it brief and factual
|
|
1538
|
+
- Highlight what was achieved
|
|
1539
|
+
- Note any interesting patterns or challenges overcome
|
|
1540
|
+
- Write in past tense"""
|
|
1541
|
+
|
|
1542
|
+
user_prompt = f"""A ticket has been completed. Generate a reflection summary.
|
|
1543
|
+
|
|
1544
|
+
{context}
|
|
1545
|
+
|
|
1546
|
+
Generate a reflection summary as JSON."""
|
|
1547
|
+
|
|
1548
|
+
def _blocking_llm_call():
|
|
1549
|
+
"""Execute blocking LLM call in thread pool."""
|
|
1550
|
+
return self.llm_service.call_completion(
|
|
1551
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
1552
|
+
max_tokens=self.config.max_tokens_reflection,
|
|
1553
|
+
system_prompt=system_prompt,
|
|
1554
|
+
)
|
|
1555
|
+
|
|
1556
|
+
try:
|
|
1557
|
+
# Run blocking LLM call in thread pool to avoid blocking event loop
|
|
1558
|
+
response = await asyncio.to_thread(_blocking_llm_call)
|
|
1559
|
+
data = self.llm_service.safe_parse_json(response.content, {})
|
|
1560
|
+
|
|
1561
|
+
return ReflectionSummary(
|
|
1562
|
+
summary=data.get("summary", f"Completed: {ticket_title}"),
|
|
1563
|
+
)
|
|
1564
|
+
except Exception as e:
|
|
1565
|
+
logger.error(f"LLM API call failed: {e}")
|
|
1566
|
+
# Return a fallback summary
|
|
1567
|
+
return ReflectionSummary(
|
|
1568
|
+
summary=f"Ticket '{ticket_title}' was completed successfully.",
|
|
1569
|
+
)
|
|
1570
|
+
|
|
1571
|
+
# =========================================================================
|
|
1572
|
+
# HELPERS
|
|
1573
|
+
# =========================================================================
|
|
1574
|
+
|
|
1575
|
+
def _summarize_events(self, events: list[TicketEvent]) -> str:
|
|
1576
|
+
"""Summarize ticket events for context."""
|
|
1577
|
+
if not events:
|
|
1578
|
+
return "No events"
|
|
1579
|
+
|
|
1580
|
+
transitions = []
|
|
1581
|
+
for event in events:
|
|
1582
|
+
if event.event_type == EventType.TRANSITIONED.value:
|
|
1583
|
+
transitions.append(f"{event.from_state} → {event.to_state}")
|
|
1584
|
+
elif event.event_type == EventType.CREATED.value:
|
|
1585
|
+
transitions.append(f"created ({event.to_state})")
|
|
1586
|
+
|
|
1587
|
+
if transitions:
|
|
1588
|
+
return " → ".join(transitions[:5]) # Limit to first 5 transitions
|
|
1589
|
+
return "No state transitions"
|
|
1590
|
+
|
|
1591
|
+
def _summarize_evidence(self, evidence: list) -> str:
|
|
1592
|
+
"""Summarize verification evidence for context."""
|
|
1593
|
+
if not evidence:
|
|
1594
|
+
return "No verification evidence"
|
|
1595
|
+
|
|
1596
|
+
passed = sum(1 for e in evidence if e.succeeded)
|
|
1597
|
+
failed = len(evidence) - passed
|
|
1598
|
+
|
|
1599
|
+
parts = []
|
|
1600
|
+
if passed:
|
|
1601
|
+
parts.append(f"{passed} passed")
|
|
1602
|
+
if failed:
|
|
1603
|
+
parts.append(f"{failed} failed")
|
|
1604
|
+
|
|
1605
|
+
return ", ".join(parts) if parts else "No evidence"
|
|
1606
|
+
|
|
1607
|
+
def _generate_summary(self, actions: list[PlannerAction]) -> str:
|
|
1608
|
+
"""Generate a human-readable summary of actions taken."""
|
|
1609
|
+
if not actions:
|
|
1610
|
+
return "No actions taken. Board is stable."
|
|
1611
|
+
|
|
1612
|
+
# Filter out SKIPPED actions for the main summary
|
|
1613
|
+
real_actions = [
|
|
1614
|
+
a for a in actions if a.action_type != PlannerActionType.SKIPPED
|
|
1615
|
+
]
|
|
1616
|
+
skipped_actions = [
|
|
1617
|
+
a for a in actions if a.action_type == PlannerActionType.SKIPPED
|
|
1618
|
+
]
|
|
1619
|
+
|
|
1620
|
+
if not real_actions:
|
|
1621
|
+
if skipped_actions:
|
|
1622
|
+
reasons = [
|
|
1623
|
+
a.details.get("reason", "unknown") if a.details else "unknown"
|
|
1624
|
+
for a in skipped_actions
|
|
1625
|
+
]
|
|
1626
|
+
return f"No actions taken. Skipped: {'; '.join(reasons)}"
|
|
1627
|
+
return "No actions taken. Board is stable."
|
|
1628
|
+
|
|
1629
|
+
parts = []
|
|
1630
|
+
|
|
1631
|
+
# Count actions by type
|
|
1632
|
+
executes = [
|
|
1633
|
+
a
|
|
1634
|
+
for a in real_actions
|
|
1635
|
+
if a.action_type == PlannerActionType.ENQUEUED_EXECUTE
|
|
1636
|
+
]
|
|
1637
|
+
followups = [
|
|
1638
|
+
a
|
|
1639
|
+
for a in real_actions
|
|
1640
|
+
if a.action_type == PlannerActionType.PROPOSED_FOLLOWUP
|
|
1641
|
+
]
|
|
1642
|
+
reflections = [
|
|
1643
|
+
a
|
|
1644
|
+
for a in real_actions
|
|
1645
|
+
if a.action_type == PlannerActionType.GENERATED_REFLECTION
|
|
1646
|
+
]
|
|
1647
|
+
|
|
1648
|
+
if executes:
|
|
1649
|
+
titles = [a.ticket_title or a.ticket_id for a in executes]
|
|
1650
|
+
parts.append(f"Enqueued execution for: {', '.join(titles)}")
|
|
1651
|
+
|
|
1652
|
+
if followups:
|
|
1653
|
+
parts.append(
|
|
1654
|
+
f"Created {len(followups)} follow-up ticket(s) for blocked items"
|
|
1655
|
+
)
|
|
1656
|
+
|
|
1657
|
+
if reflections:
|
|
1658
|
+
parts.append(
|
|
1659
|
+
f"Generated {len(reflections)} reflection(s) for completed tickets"
|
|
1660
|
+
)
|
|
1661
|
+
|
|
1662
|
+
return ". ".join(parts) + "."
|