draft-board 0.1.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/app/backend/.env.example +9 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_patch.txt +195 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_stat.txt +6 -0
- package/app/backend/CURL_EXAMPLES.md +335 -0
- package/app/backend/ENV_SETUP.md +65 -0
- package/app/backend/alembic/env.py +71 -0
- package/app/backend/alembic/script.py.mako +28 -0
- package/app/backend/alembic/versions/001_initial_schema.py +104 -0
- package/app/backend/alembic/versions/002_add_jobs_table.py +52 -0
- package/app/backend/alembic/versions/003_add_workspace_table.py +48 -0
- package/app/backend/alembic/versions/004_add_evidence_table.py +56 -0
- package/app/backend/alembic/versions/005_add_verification_commands.py +32 -0
- package/app/backend/alembic/versions/006_add_planner_lock_table.py +39 -0
- package/app/backend/alembic/versions/007_add_revision_review_tables.py +126 -0
- package/app/backend/alembic/versions/008_add_revision_idempotency_and_traceability.py +52 -0
- package/app/backend/alembic/versions/009_add_job_health_fields.py +46 -0
- package/app/backend/alembic/versions/010_add_review_comment_line_content.py +36 -0
- package/app/backend/alembic/versions/011_add_analysis_cache.py +47 -0
- package/app/backend/alembic/versions/012_add_boards_table.py +102 -0
- package/app/backend/alembic/versions/013_add_ticket_blocking.py +45 -0
- package/app/backend/alembic/versions/014_add_agent_sessions.py +220 -0
- package/app/backend/alembic/versions/015_add_ticket_sort_order.py +33 -0
- package/app/backend/alembic/versions/03220f0b93ae_add_pr_fields_to_ticket.py +49 -0
- package/app/backend/alembic/versions/0c2d89fff3b1_seed_board_configs_from_yaml.py +206 -0
- package/app/backend/alembic/versions/3348e5cf54c1_add_merge_checklist_table.py +67 -0
- package/app/backend/alembic/versions/357c780ee445_add_goal_status.py +34 -0
- package/app/backend/alembic/versions/553340b7e26c_add_autonomy_fields_to_goal.py +65 -0
- package/app/backend/alembic/versions/774dc335c679_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/7b307e847cbd_merge_heads.py +23 -0
- package/app/backend/alembic/versions/82ecd978cc70_add_missing_indexes.py +48 -0
- package/app/backend/alembic/versions/8ef5054dc280_add_normalized_log_entries.py +173 -0
- package/app/backend/alembic/versions/8f3e2bd8ea3b_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/9d17f0698d3b_add_config_column_to_boards_table.py +30 -0
- package/app/backend/alembic/versions/add_agent_conversation_history.py +72 -0
- package/app/backend/alembic/versions/add_job_variant.py +34 -0
- package/app/backend/alembic/versions/add_performance_indexes.py +95 -0
- package/app/backend/alembic/versions/add_repos_and_board_repos.py +174 -0
- package/app/backend/alembic/versions/add_session_id_to_jobs.py +27 -0
- package/app/backend/alembic/versions/add_sqlite_backend_tables.py +104 -0
- package/app/backend/alembic/versions/b10fb0b62240_add_diff_content_to_revisions.py +34 -0
- package/app/backend/alembic.ini +89 -0
- package/app/backend/app/__init__.py +3 -0
- package/app/backend/app/data_dir.py +85 -0
- package/app/backend/app/database.py +70 -0
- package/app/backend/app/database_sync.py +64 -0
- package/app/backend/app/dependencies/__init__.py +5 -0
- package/app/backend/app/dependencies/auth.py +80 -0
- package/app/backend/app/dependencies.py +43 -0
- package/app/backend/app/exceptions.py +178 -0
- package/app/backend/app/executors/__init__.py +1 -0
- package/app/backend/app/executors/adapters/__init__.py +1 -0
- package/app/backend/app/executors/adapters/aider.py +152 -0
- package/app/backend/app/executors/adapters/amazon_q.py +103 -0
- package/app/backend/app/executors/adapters/amp.py +123 -0
- package/app/backend/app/executors/adapters/claude.py +177 -0
- package/app/backend/app/executors/adapters/cline.py +127 -0
- package/app/backend/app/executors/adapters/codex.py +167 -0
- package/app/backend/app/executors/adapters/copilot.py +202 -0
- package/app/backend/app/executors/adapters/cursor.py +87 -0
- package/app/backend/app/executors/adapters/droid.py +123 -0
- package/app/backend/app/executors/adapters/gemini.py +132 -0
- package/app/backend/app/executors/adapters/goose.py +131 -0
- package/app/backend/app/executors/adapters/opencode.py +123 -0
- package/app/backend/app/executors/adapters/qwen.py +123 -0
- package/app/backend/app/executors/plugins/__init__.py +1 -0
- package/app/backend/app/executors/registry.py +202 -0
- package/app/backend/app/executors/spec.py +226 -0
- package/app/backend/app/main.py +486 -0
- package/app/backend/app/middleware/__init__.py +13 -0
- package/app/backend/app/middleware/idempotency.py +426 -0
- package/app/backend/app/middleware/rate_limit.py +312 -0
- package/app/backend/app/middleware/security_headers.py +43 -0
- package/app/backend/app/middleware/timeout.py +37 -0
- package/app/backend/app/models/__init__.py +56 -0
- package/app/backend/app/models/agent_conversation_history.py +56 -0
- package/app/backend/app/models/agent_session.py +127 -0
- package/app/backend/app/models/analysis_cache.py +49 -0
- package/app/backend/app/models/base.py +9 -0
- package/app/backend/app/models/board.py +79 -0
- package/app/backend/app/models/board_repo.py +68 -0
- package/app/backend/app/models/cost_budget.py +42 -0
- package/app/backend/app/models/enums.py +40 -0
- package/app/backend/app/models/evidence.py +132 -0
- package/app/backend/app/models/goal.py +102 -0
- package/app/backend/app/models/idempotency_entry.py +30 -0
- package/app/backend/app/models/job.py +163 -0
- package/app/backend/app/models/job_queue.py +39 -0
- package/app/backend/app/models/kv_store.py +28 -0
- package/app/backend/app/models/merge_checklist.py +87 -0
- package/app/backend/app/models/normalized_log.py +100 -0
- package/app/backend/app/models/planner_lock.py +43 -0
- package/app/backend/app/models/rate_limit_entry.py +25 -0
- package/app/backend/app/models/repo.py +66 -0
- package/app/backend/app/models/review_comment.py +91 -0
- package/app/backend/app/models/review_summary.py +69 -0
- package/app/backend/app/models/revision.py +130 -0
- package/app/backend/app/models/ticket.py +223 -0
- package/app/backend/app/models/ticket_event.py +83 -0
- package/app/backend/app/models/user.py +47 -0
- package/app/backend/app/models/workspace.py +71 -0
- package/app/backend/app/redis_client.py +119 -0
- package/app/backend/app/routers/__init__.py +29 -0
- package/app/backend/app/routers/agents.py +296 -0
- package/app/backend/app/routers/auth.py +94 -0
- package/app/backend/app/routers/board.py +885 -0
- package/app/backend/app/routers/dashboard.py +351 -0
- package/app/backend/app/routers/debug.py +528 -0
- package/app/backend/app/routers/evidence.py +96 -0
- package/app/backend/app/routers/executors.py +324 -0
- package/app/backend/app/routers/goals.py +574 -0
- package/app/backend/app/routers/jobs.py +448 -0
- package/app/backend/app/routers/maintenance.py +172 -0
- package/app/backend/app/routers/merge.py +360 -0
- package/app/backend/app/routers/planner.py +537 -0
- package/app/backend/app/routers/pull_requests.py +382 -0
- package/app/backend/app/routers/repos.py +263 -0
- package/app/backend/app/routers/revisions.py +939 -0
- package/app/backend/app/routers/settings.py +267 -0
- package/app/backend/app/routers/tickets.py +2003 -0
- package/app/backend/app/routers/webhooks.py +143 -0
- package/app/backend/app/routers/websocket.py +249 -0
- package/app/backend/app/schemas/__init__.py +109 -0
- package/app/backend/app/schemas/board.py +87 -0
- package/app/backend/app/schemas/common.py +33 -0
- package/app/backend/app/schemas/evidence.py +87 -0
- package/app/backend/app/schemas/goal.py +90 -0
- package/app/backend/app/schemas/job.py +97 -0
- package/app/backend/app/schemas/merge.py +139 -0
- package/app/backend/app/schemas/planner.py +500 -0
- package/app/backend/app/schemas/repo.py +187 -0
- package/app/backend/app/schemas/review.py +137 -0
- package/app/backend/app/schemas/revision.py +114 -0
- package/app/backend/app/schemas/ticket.py +238 -0
- package/app/backend/app/schemas/ticket_event.py +72 -0
- package/app/backend/app/schemas/workspace.py +19 -0
- package/app/backend/app/services/__init__.py +31 -0
- package/app/backend/app/services/agent_memory_service.py +223 -0
- package/app/backend/app/services/agent_registry.py +346 -0
- package/app/backend/app/services/agent_session_manager.py +318 -0
- package/app/backend/app/services/agent_session_service.py +219 -0
- package/app/backend/app/services/agent_tools.py +379 -0
- package/app/backend/app/services/auth_service.py +98 -0
- package/app/backend/app/services/autonomy_service.py +380 -0
- package/app/backend/app/services/board_repo_service.py +201 -0
- package/app/backend/app/services/board_service.py +326 -0
- package/app/backend/app/services/cleanup_service.py +1085 -0
- package/app/backend/app/services/config_service.py +908 -0
- package/app/backend/app/services/context_gatherer.py +557 -0
- package/app/backend/app/services/cost_tracking_service.py +293 -0
- package/app/backend/app/services/cursor_log_normalizer.py +536 -0
- package/app/backend/app/services/delivery_pipeline.py +440 -0
- package/app/backend/app/services/executor_service.py +634 -0
- package/app/backend/app/services/git_host/__init__.py +11 -0
- package/app/backend/app/services/git_host/factory.py +87 -0
- package/app/backend/app/services/git_host/github.py +270 -0
- package/app/backend/app/services/git_host/gitlab.py +194 -0
- package/app/backend/app/services/git_host/protocol.py +75 -0
- package/app/backend/app/services/git_merge_simple.py +346 -0
- package/app/backend/app/services/git_ops.py +384 -0
- package/app/backend/app/services/github_service.py +233 -0
- package/app/backend/app/services/goal_service.py +113 -0
- package/app/backend/app/services/job_service.py +423 -0
- package/app/backend/app/services/job_watchdog_service.py +424 -0
- package/app/backend/app/services/langchain_adapter.py +122 -0
- package/app/backend/app/services/llm_provider_clients.py +351 -0
- package/app/backend/app/services/llm_service.py +285 -0
- package/app/backend/app/services/log_normalizer.py +342 -0
- package/app/backend/app/services/log_stream_service.py +276 -0
- package/app/backend/app/services/merge_checklist_service.py +264 -0
- package/app/backend/app/services/merge_service.py +784 -0
- package/app/backend/app/services/orchestrator_log.py +84 -0
- package/app/backend/app/services/planner_service.py +1662 -0
- package/app/backend/app/services/planner_tick_sync.py +1040 -0
- package/app/backend/app/services/queued_message_service.py +156 -0
- package/app/backend/app/services/reliability_wrapper.py +389 -0
- package/app/backend/app/services/repo_discovery_service.py +318 -0
- package/app/backend/app/services/review_service.py +334 -0
- package/app/backend/app/services/revision_service.py +389 -0
- package/app/backend/app/services/safe_autopilot.py +510 -0
- package/app/backend/app/services/sqlite_worker.py +372 -0
- package/app/backend/app/services/task_dispatch.py +135 -0
- package/app/backend/app/services/ticket_generation_service.py +1781 -0
- package/app/backend/app/services/ticket_service.py +486 -0
- package/app/backend/app/services/udar_planner_service.py +1007 -0
- package/app/backend/app/services/webhook_service.py +126 -0
- package/app/backend/app/services/workspace_service.py +465 -0
- package/app/backend/app/services/worktree_file_service.py +92 -0
- package/app/backend/app/services/worktree_validator.py +213 -0
- package/app/backend/app/sqlite_kv.py +278 -0
- package/app/backend/app/state_machine.py +128 -0
- package/app/backend/app/templates/__init__.py +5 -0
- package/app/backend/app/templates/registry.py +243 -0
- package/app/backend/app/utils/__init__.py +5 -0
- package/app/backend/app/utils/artifact_reader.py +87 -0
- package/app/backend/app/utils/circuit_breaker.py +229 -0
- package/app/backend/app/utils/db_retry.py +136 -0
- package/app/backend/app/utils/ignored_fields.py +123 -0
- package/app/backend/app/utils/validators.py +54 -0
- package/app/backend/app/websocket/__init__.py +5 -0
- package/app/backend/app/websocket/manager.py +179 -0
- package/app/backend/app/websocket/state_tracker.py +113 -0
- package/app/backend/app/worker.py +3190 -0
- package/app/backend/calculator_tickets.json +40 -0
- package/app/backend/canary_tests.sh +591 -0
- package/app/backend/celerybeat-schedule +0 -0
- package/app/backend/celerybeat-schedule-shm +0 -0
- package/app/backend/celerybeat-schedule-wal +0 -0
- package/app/backend/logs/.gitkeep +3 -0
- package/app/backend/multiplication_division_implementation_tickets.json +55 -0
- package/app/backend/multiplication_division_tickets.json +42 -0
- package/app/backend/pyproject.toml +45 -0
- package/app/backend/requirements-dev.txt +8 -0
- package/app/backend/requirements.txt +20 -0
- package/app/backend/run.sh +30 -0
- package/app/backend/run_with_logs.sh +10 -0
- package/app/backend/scientific_calculator_tickets.json +40 -0
- package/app/backend/scripts/extract_openapi.py +21 -0
- package/app/backend/scripts/seed_demo.py +187 -0
- package/app/backend/setup_demo_review.py +302 -0
- package/app/backend/test_actual_parse.py +41 -0
- package/app/backend/test_agent_streaming.py +61 -0
- package/app/backend/test_parse.py +51 -0
- package/app/backend/test_streaming.py +51 -0
- package/app/backend/test_subprocess_streaming.py +50 -0
- package/app/backend/tests/__init__.py +1 -0
- package/app/backend/tests/conftest.py +46 -0
- package/app/backend/tests/test_auth.py +341 -0
- package/app/backend/tests/test_autonomy_service.py +391 -0
- package/app/backend/tests/test_cleanup_service_safety.py +417 -0
- package/app/backend/tests/test_middleware.py +279 -0
- package/app/backend/tests/test_planner_providers.py +290 -0
- package/app/backend/tests/test_planner_unblock.py +183 -0
- package/app/backend/tests/test_revision_invariants.py +618 -0
- package/app/backend/tests/test_sqlite_kv.py +290 -0
- package/app/backend/tests/test_sqlite_worker.py +353 -0
- package/app/backend/tests/test_task_dispatch.py +100 -0
- package/app/backend/tests/test_ticket_validation.py +304 -0
- package/app/backend/tests/test_udar_agent.py +693 -0
- package/app/backend/tests/test_webhook_service.py +184 -0
- package/app/backend/tickets_output.json +59 -0
- package/app/backend/user_management_tickets.json +50 -0
- package/app/backend/uvicorn.log +0 -0
- package/app/draft.yaml +313 -0
- package/app/frontend/dist/assets/index-LcjCczu5.js +155 -0
- package/app/frontend/dist/assets/index-_FP_279e.css +1 -0
- package/app/frontend/dist/index.html +14 -0
- package/app/frontend/dist/vite.svg +1 -0
- package/app/frontend/package.json +101 -0
- package/bin/cli.js +527 -0
- package/package.json +37 -0
|
@@ -0,0 +1,1040 @@
|
|
|
1
|
+
"""Synchronous planner tick for Celery workers.
|
|
2
|
+
|
|
3
|
+
This module provides a synchronous implementation of the planner tick
|
|
4
|
+
logic that can run in Celery worker processes without async issues.
|
|
5
|
+
|
|
6
|
+
The key difference from the async PlannerService is that this uses
|
|
7
|
+
SQLAlchemy's synchronous Session instead of AsyncSession, avoiding
|
|
8
|
+
the "pysqlite is not async" error that occurs when running asyncio
|
|
9
|
+
code in forked Celery worker processes.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
import uuid
|
|
15
|
+
from datetime import UTC, datetime, timedelta
|
|
16
|
+
|
|
17
|
+
from sqlalchemy import and_, delete, select, update
|
|
18
|
+
from sqlalchemy.exc import IntegrityError
|
|
19
|
+
from sqlalchemy.orm import selectinload
|
|
20
|
+
|
|
21
|
+
from app.database_sync import get_sync_db
|
|
22
|
+
from app.models.job import Job, JobKind, JobStatus
|
|
23
|
+
from app.models.planner_lock import PlannerLock
|
|
24
|
+
from app.models.ticket import Ticket
|
|
25
|
+
from app.models.ticket_event import TicketEvent
|
|
26
|
+
from app.services.config_service import PlannerConfig
|
|
27
|
+
from app.services.llm_service import LLMService
|
|
28
|
+
from app.state_machine import ActorType, EventType, TicketState
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
# Lock settings (same as async version)
|
|
33
|
+
PLANNER_LOCK_KEY = "planner_tick"
|
|
34
|
+
LOCK_STALE_MINUTES = 10
|
|
35
|
+
|
|
36
|
+
# Markers (same as async version)
|
|
37
|
+
REFLECTION_EVENT_TYPE = EventType.COMMENT.value
|
|
38
|
+
REFLECTION_MARKER = "planner_reflection"
|
|
39
|
+
FOLLOWUP_MARKER = "planner_followup_created"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class PlannerLockError(Exception):
|
|
43
|
+
"""Raised when planner lock cannot be acquired."""
|
|
44
|
+
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def run_planner_tick_sync() -> dict:
|
|
49
|
+
"""Run a synchronous planner tick.
|
|
50
|
+
|
|
51
|
+
This is the main entry point for Celery worker tasks.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dict with tick results: executed, followups_created, reflections_added, queued_executed
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
PlannerLockError: If lock cannot be acquired
|
|
58
|
+
"""
|
|
59
|
+
lock_owner_id = str(uuid.uuid4())
|
|
60
|
+
|
|
61
|
+
# Load config from DB (first active board's config is the source of truth)
|
|
62
|
+
from app.models.board import Board
|
|
63
|
+
from app.services.config_service import DraftConfig
|
|
64
|
+
|
|
65
|
+
board_config = None
|
|
66
|
+
with get_sync_db() as config_db:
|
|
67
|
+
board = config_db.execute(select(Board).limit(1)).scalar_one_or_none()
|
|
68
|
+
if board and board.config:
|
|
69
|
+
board_config = board.config
|
|
70
|
+
|
|
71
|
+
config = DraftConfig.from_board_config(board_config).planner_config
|
|
72
|
+
|
|
73
|
+
executed = 0
|
|
74
|
+
followups_created = 0
|
|
75
|
+
reflections_added = 0
|
|
76
|
+
queued_executed = 0
|
|
77
|
+
jobs_to_enqueue: list[str] = []
|
|
78
|
+
|
|
79
|
+
with get_sync_db() as db:
|
|
80
|
+
# Acquire lock
|
|
81
|
+
_acquire_lock_sync(db, lock_owner_id)
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
# 0. Check for queued messages on tickets ready for execution
|
|
85
|
+
# This enables the instant follow-up UX like vibe-kanban
|
|
86
|
+
if config.features.auto_execute:
|
|
87
|
+
queued_job_id = _execute_queued_message_sync(db)
|
|
88
|
+
if queued_job_id:
|
|
89
|
+
jobs_to_enqueue.append(queued_job_id)
|
|
90
|
+
queued_executed = 1
|
|
91
|
+
|
|
92
|
+
from app.services.orchestrator_log import add_orchestrator_log
|
|
93
|
+
|
|
94
|
+
# 1. Unblock tickets whose blockers are now done
|
|
95
|
+
unblocked = _unblock_ready_tickets_sync(db)
|
|
96
|
+
if unblocked:
|
|
97
|
+
add_orchestrator_log(
|
|
98
|
+
"INFO",
|
|
99
|
+
f"Unblocked {unblocked} ticket(s) (blockers reached DONE)",
|
|
100
|
+
{"count": unblocked},
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# 2. Pick and execute planned tickets (parallel-aware)
|
|
104
|
+
if config.features.auto_execute and queued_executed == 0:
|
|
105
|
+
new_job_ids = _pick_and_execute_next_sync(db)
|
|
106
|
+
if new_job_ids:
|
|
107
|
+
jobs_to_enqueue.extend(new_job_ids)
|
|
108
|
+
executed = len(new_job_ids)
|
|
109
|
+
|
|
110
|
+
# Commit fast DB operations and release lock BEFORE LLM calls.
|
|
111
|
+
# Steps 3-4 involve LLM API calls (10-60s) which would starve
|
|
112
|
+
# all other SQLite writers if we held the lock.
|
|
113
|
+
db.commit()
|
|
114
|
+
|
|
115
|
+
finally:
|
|
116
|
+
# Always release lock
|
|
117
|
+
_release_lock_sync(db, lock_owner_id)
|
|
118
|
+
|
|
119
|
+
# Enqueue Celery jobs AFTER commit
|
|
120
|
+
for job_id in jobs_to_enqueue:
|
|
121
|
+
_enqueue_celery_job_sync(job_id)
|
|
122
|
+
|
|
123
|
+
# LLM-powered operations run OUTSIDE the planner lock to avoid
|
|
124
|
+
# starving other writers during 10-60s LLM API calls.
|
|
125
|
+
|
|
126
|
+
# 3. Handle blocked tickets (LLM-powered)
|
|
127
|
+
if config.features.propose_followups:
|
|
128
|
+
try:
|
|
129
|
+
with get_sync_db() as db:
|
|
130
|
+
followups_created = _handle_blocked_tickets_sync(db, config)
|
|
131
|
+
if followups_created:
|
|
132
|
+
from app.services.orchestrator_log import add_orchestrator_log
|
|
133
|
+
|
|
134
|
+
add_orchestrator_log(
|
|
135
|
+
"INFO",
|
|
136
|
+
f"Created {followups_created} follow-up ticket(s) for blocked tickets",
|
|
137
|
+
{"count": followups_created},
|
|
138
|
+
)
|
|
139
|
+
db.commit()
|
|
140
|
+
except Exception:
|
|
141
|
+
import logging
|
|
142
|
+
|
|
143
|
+
logging.getLogger(__name__).exception(
|
|
144
|
+
"Error in LLM-powered follow-up generation"
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# 4. Generate reflections (LLM-powered)
|
|
148
|
+
if config.features.generate_reflections:
|
|
149
|
+
try:
|
|
150
|
+
with get_sync_db() as db:
|
|
151
|
+
reflections_added = _generate_reflections_sync(db, config)
|
|
152
|
+
if reflections_added:
|
|
153
|
+
from app.services.orchestrator_log import add_orchestrator_log
|
|
154
|
+
|
|
155
|
+
add_orchestrator_log(
|
|
156
|
+
"INFO",
|
|
157
|
+
f"Generated {reflections_added} reflection(s) for done tickets",
|
|
158
|
+
{"count": reflections_added},
|
|
159
|
+
)
|
|
160
|
+
db.commit()
|
|
161
|
+
except Exception:
|
|
162
|
+
import logging
|
|
163
|
+
|
|
164
|
+
logging.getLogger(__name__).exception(
|
|
165
|
+
"Error in LLM-powered reflection generation"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
"executed": executed,
|
|
170
|
+
"followups_created": followups_created,
|
|
171
|
+
"reflections_added": reflections_added,
|
|
172
|
+
"queued_executed": queued_executed,
|
|
173
|
+
"unblocked": unblocked,
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def _acquire_lock_sync(db, owner_id: str) -> None:
|
|
178
|
+
"""Acquire the planner lock synchronously."""
|
|
179
|
+
stale_threshold = datetime.now(UTC) - timedelta(minutes=LOCK_STALE_MINUTES)
|
|
180
|
+
now = datetime.now(UTC)
|
|
181
|
+
|
|
182
|
+
# Try to claim a stale lock via UPDATE
|
|
183
|
+
update_result = db.execute(
|
|
184
|
+
update(PlannerLock)
|
|
185
|
+
.where(
|
|
186
|
+
and_(
|
|
187
|
+
PlannerLock.lock_key == PLANNER_LOCK_KEY,
|
|
188
|
+
PlannerLock.acquired_at < stale_threshold,
|
|
189
|
+
)
|
|
190
|
+
)
|
|
191
|
+
.values(
|
|
192
|
+
owner_id=owner_id,
|
|
193
|
+
acquired_at=now,
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
if update_result.rowcount > 0:
|
|
198
|
+
db.flush()
|
|
199
|
+
logger.debug(f"Acquired planner lock by claiming stale (owner={owner_id})")
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
# Try INSERT (no lock exists yet)
|
|
203
|
+
lock = PlannerLock(
|
|
204
|
+
lock_key=PLANNER_LOCK_KEY,
|
|
205
|
+
owner_id=owner_id,
|
|
206
|
+
acquired_at=now,
|
|
207
|
+
)
|
|
208
|
+
db.add(lock)
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
db.flush()
|
|
212
|
+
logger.debug(f"Acquired planner lock via insert (owner={owner_id})")
|
|
213
|
+
except IntegrityError:
|
|
214
|
+
db.rollback()
|
|
215
|
+
existing = db.execute(
|
|
216
|
+
select(PlannerLock).where(PlannerLock.lock_key == PLANNER_LOCK_KEY)
|
|
217
|
+
)
|
|
218
|
+
existing_lock = existing.scalar_one_or_none()
|
|
219
|
+
if existing_lock:
|
|
220
|
+
raise PlannerLockError(
|
|
221
|
+
f"Planner tick already in progress (started at {existing_lock.acquired_at})"
|
|
222
|
+
)
|
|
223
|
+
raise PlannerLockError("Failed to acquire planner lock")
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _release_lock_sync(db, owner_id: str) -> None:
|
|
227
|
+
"""Release the planner lock synchronously.
|
|
228
|
+
|
|
229
|
+
Note: Does NOT commit - the caller's context manager handles the final commit.
|
|
230
|
+
This avoids double-commit issues when called from within get_sync_db() context.
|
|
231
|
+
"""
|
|
232
|
+
try:
|
|
233
|
+
db.execute(
|
|
234
|
+
delete(PlannerLock).where(
|
|
235
|
+
and_(
|
|
236
|
+
PlannerLock.lock_key == PLANNER_LOCK_KEY,
|
|
237
|
+
PlannerLock.owner_id == owner_id,
|
|
238
|
+
)
|
|
239
|
+
)
|
|
240
|
+
)
|
|
241
|
+
# Don't commit here - let the context manager handle it
|
|
242
|
+
# This prevents double-commit and ensures atomic behavior
|
|
243
|
+
db.flush() # Flush to ensure the delete is staged
|
|
244
|
+
logger.debug(f"Released planner lock (owner={owner_id})")
|
|
245
|
+
except Exception as e:
|
|
246
|
+
logger.warning(f"Failed to release planner lock: {e}")
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _count_active_executions_sync(db) -> int:
|
|
250
|
+
"""Count active executions (queued + running execute jobs)."""
|
|
251
|
+
from sqlalchemy import func as sql_func
|
|
252
|
+
|
|
253
|
+
count = db.execute(
|
|
254
|
+
select(sql_func.count(Job.id)).where(
|
|
255
|
+
and_(
|
|
256
|
+
Job.kind == JobKind.EXECUTE.value,
|
|
257
|
+
Job.status.in_(
|
|
258
|
+
[
|
|
259
|
+
JobStatus.QUEUED.value,
|
|
260
|
+
JobStatus.RUNNING.value,
|
|
261
|
+
]
|
|
262
|
+
),
|
|
263
|
+
)
|
|
264
|
+
)
|
|
265
|
+
).scalar_one()
|
|
266
|
+
return count or 0
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def _has_active_execution_sync(db) -> bool:
|
|
270
|
+
"""Check if there's an active execution (synchronous)."""
|
|
271
|
+
return _count_active_executions_sync(db) > 0
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _get_max_parallel_jobs() -> int:
|
|
275
|
+
"""Read max_parallel_jobs from config (DB first, then default)."""
|
|
276
|
+
try:
|
|
277
|
+
from app.models.board import Board
|
|
278
|
+
from app.services.config_service import DraftConfig
|
|
279
|
+
|
|
280
|
+
with get_sync_db() as db:
|
|
281
|
+
board = db.execute(select(Board).limit(1)).scalar_one_or_none()
|
|
282
|
+
if board and board.config:
|
|
283
|
+
return DraftConfig.from_board_config(
|
|
284
|
+
board.config
|
|
285
|
+
).execute_config.max_parallel_jobs
|
|
286
|
+
return 1
|
|
287
|
+
except Exception:
|
|
288
|
+
return 1
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
def _unblock_ready_tickets_sync(db) -> int:
|
|
292
|
+
"""Check BLOCKED tickets and unblock those whose blockers are now done.
|
|
293
|
+
|
|
294
|
+
Mirrors the async ``PlannerService._unblock_ready_tickets`` so that the
|
|
295
|
+
sync periodic tick (run by SQLiteWorker) also transitions dependent
|
|
296
|
+
tickets from BLOCKED → PLANNED once their blocker reaches DONE.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
Number of tickets that were unblocked.
|
|
300
|
+
"""
|
|
301
|
+
blocked_tickets = (
|
|
302
|
+
db.execute(
|
|
303
|
+
select(Ticket)
|
|
304
|
+
.where(
|
|
305
|
+
and_(
|
|
306
|
+
Ticket.state == TicketState.BLOCKED.value,
|
|
307
|
+
Ticket.blocked_by_ticket_id.isnot(None),
|
|
308
|
+
)
|
|
309
|
+
)
|
|
310
|
+
.options(selectinload(Ticket.blocked_by))
|
|
311
|
+
)
|
|
312
|
+
.scalars()
|
|
313
|
+
.all()
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
unblocked = 0
|
|
317
|
+
for ticket in blocked_tickets:
|
|
318
|
+
if ticket.blocked_by and ticket.blocked_by.state == TicketState.DONE.value:
|
|
319
|
+
logger.info(
|
|
320
|
+
f"Unblocking ticket {ticket.id}: blocker {ticket.blocked_by_ticket_id} "
|
|
321
|
+
f"is now DONE"
|
|
322
|
+
)
|
|
323
|
+
old_state = ticket.state
|
|
324
|
+
ticket.state = TicketState.PLANNED.value
|
|
325
|
+
|
|
326
|
+
event = TicketEvent(
|
|
327
|
+
ticket_id=ticket.id,
|
|
328
|
+
event_type=EventType.TRANSITIONED.value,
|
|
329
|
+
from_state=old_state,
|
|
330
|
+
to_state=TicketState.PLANNED.value,
|
|
331
|
+
actor_type=ActorType.PLANNER.value,
|
|
332
|
+
actor_id="planner",
|
|
333
|
+
reason=f"Unblocked: blocking ticket '{ticket.blocked_by.title}' is now done",
|
|
334
|
+
payload_json=json.dumps(
|
|
335
|
+
{
|
|
336
|
+
"blocker_ticket_id": ticket.blocked_by_ticket_id,
|
|
337
|
+
"blocker_title": ticket.blocked_by.title,
|
|
338
|
+
"action": "unblocked",
|
|
339
|
+
}
|
|
340
|
+
),
|
|
341
|
+
)
|
|
342
|
+
db.add(event)
|
|
343
|
+
|
|
344
|
+
# Clear the dependency FK so UI stops showing the badge
|
|
345
|
+
ticket.blocked_by_ticket_id = None
|
|
346
|
+
unblocked += 1
|
|
347
|
+
|
|
348
|
+
if unblocked:
|
|
349
|
+
db.flush()
|
|
350
|
+
logger.info(f"Unblocked {unblocked} tickets")
|
|
351
|
+
|
|
352
|
+
return unblocked
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def _pick_and_execute_next_sync(db) -> list[str]:
|
|
356
|
+
"""Pick planned tickets and create execute jobs, respecting parallelism.
|
|
357
|
+
|
|
358
|
+
When max_parallel_jobs > 1, picks multiple independent tickets (those not
|
|
359
|
+
blocked by unfinished dependencies). Dependent tickets are always sequential.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
List of Job IDs that were queued (may be empty).
|
|
363
|
+
"""
|
|
364
|
+
max_parallel = _get_max_parallel_jobs()
|
|
365
|
+
active_count = _count_active_executions_sync(db)
|
|
366
|
+
slots = max_parallel - active_count
|
|
367
|
+
|
|
368
|
+
if slots <= 0:
|
|
369
|
+
logger.debug(
|
|
370
|
+
f"No execution slots available ({active_count}/{max_parallel} active)"
|
|
371
|
+
)
|
|
372
|
+
return []
|
|
373
|
+
|
|
374
|
+
# Find planned tickets ordered by priority
|
|
375
|
+
planned_tickets = (
|
|
376
|
+
db.execute(
|
|
377
|
+
select(Ticket)
|
|
378
|
+
.where(Ticket.state == TicketState.PLANNED.value)
|
|
379
|
+
.options(selectinload(Ticket.blocked_by))
|
|
380
|
+
.order_by(
|
|
381
|
+
Ticket.priority.desc().nulls_last(),
|
|
382
|
+
Ticket.created_at.asc(),
|
|
383
|
+
)
|
|
384
|
+
.limit(slots * 2) # Fetch extra in case some are dependency-blocked
|
|
385
|
+
)
|
|
386
|
+
.scalars()
|
|
387
|
+
.all()
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
if not planned_tickets:
|
|
391
|
+
return []
|
|
392
|
+
|
|
393
|
+
# Collect ticket IDs that already have active jobs (avoid double-scheduling)
|
|
394
|
+
active_ticket_ids = set(
|
|
395
|
+
db.execute(
|
|
396
|
+
select(Job.ticket_id).where(
|
|
397
|
+
and_(
|
|
398
|
+
Job.kind == JobKind.EXECUTE.value,
|
|
399
|
+
Job.status.in_(
|
|
400
|
+
[
|
|
401
|
+
JobStatus.QUEUED.value,
|
|
402
|
+
JobStatus.RUNNING.value,
|
|
403
|
+
]
|
|
404
|
+
),
|
|
405
|
+
)
|
|
406
|
+
)
|
|
407
|
+
)
|
|
408
|
+
.scalars()
|
|
409
|
+
.all()
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
job_ids = []
|
|
413
|
+
for ticket in planned_tickets:
|
|
414
|
+
if len(job_ids) >= slots:
|
|
415
|
+
break
|
|
416
|
+
|
|
417
|
+
# Skip if already has an active job
|
|
418
|
+
if ticket.id in active_ticket_ids:
|
|
419
|
+
continue
|
|
420
|
+
|
|
421
|
+
# Check dependency — push to BLOCKED if blocker isn't done
|
|
422
|
+
if ticket.blocked_by_ticket_id:
|
|
423
|
+
blocker = ticket.blocked_by
|
|
424
|
+
if blocker is None or blocker.state != TicketState.DONE.value:
|
|
425
|
+
blocker_title = blocker.title if blocker else "unknown"
|
|
426
|
+
logger.info(
|
|
427
|
+
"Ticket %s blocked by incomplete %s (%s), moving to BLOCKED",
|
|
428
|
+
ticket.id,
|
|
429
|
+
ticket.blocked_by_ticket_id,
|
|
430
|
+
blocker_title,
|
|
431
|
+
)
|
|
432
|
+
ticket.state = TicketState.BLOCKED.value
|
|
433
|
+
event = TicketEvent(
|
|
434
|
+
ticket_id=ticket.id,
|
|
435
|
+
event_type=EventType.TRANSITIONED.value,
|
|
436
|
+
from_state=TicketState.PLANNED.value,
|
|
437
|
+
to_state=TicketState.BLOCKED.value,
|
|
438
|
+
actor_type=ActorType.PLANNER.value,
|
|
439
|
+
actor_id="planner",
|
|
440
|
+
reason=f"Blocked by incomplete ticket: {blocker_title}",
|
|
441
|
+
payload_json=json.dumps(
|
|
442
|
+
{
|
|
443
|
+
"blocked_by_ticket_id": ticket.blocked_by_ticket_id,
|
|
444
|
+
"blocked_by_title": blocker_title,
|
|
445
|
+
}
|
|
446
|
+
),
|
|
447
|
+
)
|
|
448
|
+
db.add(event)
|
|
449
|
+
continue
|
|
450
|
+
|
|
451
|
+
# Create execute job
|
|
452
|
+
job = Job(
|
|
453
|
+
ticket_id=ticket.id,
|
|
454
|
+
board_id=ticket.board_id,
|
|
455
|
+
kind=JobKind.EXECUTE.value,
|
|
456
|
+
status=JobStatus.QUEUED.value,
|
|
457
|
+
)
|
|
458
|
+
db.add(job)
|
|
459
|
+
db.flush()
|
|
460
|
+
db.refresh(job)
|
|
461
|
+
|
|
462
|
+
event = TicketEvent(
|
|
463
|
+
ticket_id=ticket.id,
|
|
464
|
+
event_type=EventType.COMMENT.value,
|
|
465
|
+
from_state=ticket.state,
|
|
466
|
+
to_state=ticket.state,
|
|
467
|
+
actor_type=ActorType.PLANNER.value,
|
|
468
|
+
actor_id="planner",
|
|
469
|
+
reason="Planner enqueued execute job",
|
|
470
|
+
payload_json=json.dumps(
|
|
471
|
+
{
|
|
472
|
+
"action": "enqueued_execute",
|
|
473
|
+
"job_id": job.id,
|
|
474
|
+
}
|
|
475
|
+
),
|
|
476
|
+
)
|
|
477
|
+
db.add(event)
|
|
478
|
+
job_ids.append(job.id)
|
|
479
|
+
|
|
480
|
+
logger.info(f"Planner created execute job {job.id} for ticket {ticket.id}")
|
|
481
|
+
|
|
482
|
+
if job_ids:
|
|
483
|
+
logger.info(
|
|
484
|
+
f"Planner queued {len(job_ids)} execute job(s) "
|
|
485
|
+
f"({active_count + len(job_ids)}/{max_parallel} slots used)"
|
|
486
|
+
)
|
|
487
|
+
return job_ids
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
def _execute_queued_message_sync(db) -> str | None:
|
|
491
|
+
"""Execute a queued follow-up message if one exists.
|
|
492
|
+
|
|
493
|
+
Checks for tickets that:
|
|
494
|
+
1. Have a queued message in Redis
|
|
495
|
+
2. Are in a state ready for execution (DONE with changes_requested, BLOCKED, or NEEDS_HUMAN)
|
|
496
|
+
3. Have no active jobs running
|
|
497
|
+
|
|
498
|
+
This enables the vibe-kanban-style instant follow-up UX.
|
|
499
|
+
|
|
500
|
+
Returns:
|
|
501
|
+
Job ID if a queued message was executed, None otherwise.
|
|
502
|
+
"""
|
|
503
|
+
from app.services.queued_message_service import queued_message_service
|
|
504
|
+
|
|
505
|
+
# Find tickets that might have queued messages
|
|
506
|
+
# These are tickets ready for re-execution after completing a cycle
|
|
507
|
+
ready_tickets = (
|
|
508
|
+
db.execute(
|
|
509
|
+
select(Ticket).where(
|
|
510
|
+
Ticket.state.in_(
|
|
511
|
+
[
|
|
512
|
+
TicketState.DONE.value, # Approved but has queued follow-up
|
|
513
|
+
TicketState.NEEDS_HUMAN.value, # Ready for human input (with queued message)
|
|
514
|
+
TicketState.BLOCKED.value, # Blocked but has queued fix
|
|
515
|
+
]
|
|
516
|
+
)
|
|
517
|
+
)
|
|
518
|
+
)
|
|
519
|
+
.scalars()
|
|
520
|
+
.all()
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
for ticket in ready_tickets:
|
|
524
|
+
# Check if this ticket has a queued message
|
|
525
|
+
queued = queued_message_service.take_queued(ticket.id)
|
|
526
|
+
if not queued:
|
|
527
|
+
continue
|
|
528
|
+
|
|
529
|
+
# Check no active jobs for this ticket
|
|
530
|
+
active_job = db.execute(
|
|
531
|
+
select(Job.id)
|
|
532
|
+
.where(
|
|
533
|
+
and_(
|
|
534
|
+
Job.ticket_id == ticket.id,
|
|
535
|
+
Job.status.in_([JobStatus.QUEUED.value, JobStatus.RUNNING.value]),
|
|
536
|
+
)
|
|
537
|
+
)
|
|
538
|
+
.limit(1)
|
|
539
|
+
).scalar_one_or_none()
|
|
540
|
+
|
|
541
|
+
if active_job:
|
|
542
|
+
# Put message back if there's already an active job
|
|
543
|
+
queued_message_service.queue_message(ticket.id, queued.message)
|
|
544
|
+
continue
|
|
545
|
+
|
|
546
|
+
# Determine valid target state per state machine:
|
|
547
|
+
# DONE → EXECUTING (the only valid exit from DONE)
|
|
548
|
+
# NEEDS_HUMAN → EXECUTING (human resolved, back to executing)
|
|
549
|
+
# BLOCKED → EXECUTING (retry execution)
|
|
550
|
+
old_state = ticket.state
|
|
551
|
+
old_state_enum = TicketState(old_state)
|
|
552
|
+
target_state = {
|
|
553
|
+
TicketState.DONE: TicketState.EXECUTING,
|
|
554
|
+
TicketState.NEEDS_HUMAN: TicketState.EXECUTING,
|
|
555
|
+
TicketState.BLOCKED: TicketState.EXECUTING,
|
|
556
|
+
}.get(old_state_enum)
|
|
557
|
+
|
|
558
|
+
if target_state is None:
|
|
559
|
+
from app.state_machine import validate_transition
|
|
560
|
+
|
|
561
|
+
if not validate_transition(old_state, TicketState.EXECUTING.value):
|
|
562
|
+
logger.warning(
|
|
563
|
+
f"Cannot transition ticket {ticket.id} from {old_state} "
|
|
564
|
+
f"to EXECUTING via queued message, skipping"
|
|
565
|
+
)
|
|
566
|
+
queued_message_service.queue_message(ticket.id, queued.message)
|
|
567
|
+
continue
|
|
568
|
+
target_state = TicketState.EXECUTING
|
|
569
|
+
|
|
570
|
+
ticket.state = target_state.value
|
|
571
|
+
|
|
572
|
+
# Create event for the queued message execution
|
|
573
|
+
event = TicketEvent(
|
|
574
|
+
ticket_id=ticket.id,
|
|
575
|
+
event_type=EventType.TRANSITIONED.value,
|
|
576
|
+
from_state=old_state,
|
|
577
|
+
to_state=target_state.value,
|
|
578
|
+
actor_type=ActorType.PLANNER.value,
|
|
579
|
+
actor_id="planner",
|
|
580
|
+
reason=f"Executing queued follow-up: {queued.message[:100]}...",
|
|
581
|
+
payload_json=json.dumps(
|
|
582
|
+
{
|
|
583
|
+
"action": "queued_followup",
|
|
584
|
+
"queued_message": queued.message,
|
|
585
|
+
"queued_at": queued.queued_at.isoformat(),
|
|
586
|
+
}
|
|
587
|
+
),
|
|
588
|
+
)
|
|
589
|
+
db.add(event)
|
|
590
|
+
|
|
591
|
+
# Store follow-up prompt in Redis for the worker to pick up
|
|
592
|
+
# The executor will append this to the prompt bundle
|
|
593
|
+
queued_message_service.set_followup_prompt(ticket.id, queued.message)
|
|
594
|
+
|
|
595
|
+
# Create execute job
|
|
596
|
+
job = Job(
|
|
597
|
+
ticket_id=ticket.id,
|
|
598
|
+
board_id=ticket.board_id,
|
|
599
|
+
kind=JobKind.EXECUTE.value,
|
|
600
|
+
status=JobStatus.QUEUED.value,
|
|
601
|
+
)
|
|
602
|
+
db.add(job)
|
|
603
|
+
db.flush()
|
|
604
|
+
db.refresh(job)
|
|
605
|
+
|
|
606
|
+
logger.info(
|
|
607
|
+
f"Executing queued message for ticket {ticket.id}: {queued.message[:50]}..."
|
|
608
|
+
)
|
|
609
|
+
return job.id
|
|
610
|
+
|
|
611
|
+
return None
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
def _enqueue_celery_job_sync(job_id: str) -> None:
|
|
615
|
+
"""Enqueue a task for a job (synchronous).
|
|
616
|
+
|
|
617
|
+
Uses unified task dispatch to support both SQLite and Celery backends.
|
|
618
|
+
"""
|
|
619
|
+
from app.services.task_dispatch import enqueue_task
|
|
620
|
+
|
|
621
|
+
try:
|
|
622
|
+
with get_sync_db() as db:
|
|
623
|
+
job = db.execute(select(Job).where(Job.id == job_id)).scalar_one_or_none()
|
|
624
|
+
if not job:
|
|
625
|
+
logger.error(f"Job {job_id} not found when enqueueing task")
|
|
626
|
+
return
|
|
627
|
+
|
|
628
|
+
# Skip if already has task ID
|
|
629
|
+
if job.celery_task_id:
|
|
630
|
+
logger.debug(f"Job {job_id} already has task {job.celery_task_id}")
|
|
631
|
+
return
|
|
632
|
+
|
|
633
|
+
# Enqueue via unified dispatch
|
|
634
|
+
task = enqueue_task("execute_ticket", args=[job_id])
|
|
635
|
+
job.celery_task_id = task.id
|
|
636
|
+
db.commit()
|
|
637
|
+
|
|
638
|
+
logger.info(f"Enqueued task {task.id} for job {job_id}")
|
|
639
|
+
except Exception as e:
|
|
640
|
+
logger.error(f"Failed to enqueue task for job {job_id}: {e}")
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
def _handle_blocked_tickets_sync(db, config: PlannerConfig) -> int:
|
|
644
|
+
"""Handle blocked tickets and generate follow-ups (synchronous).
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
Number of follow-ups created.
|
|
648
|
+
"""
|
|
649
|
+
followups_created = 0
|
|
650
|
+
|
|
651
|
+
# Find blocked tickets
|
|
652
|
+
blocked_tickets = (
|
|
653
|
+
db.execute(
|
|
654
|
+
select(Ticket)
|
|
655
|
+
.where(Ticket.state == TicketState.BLOCKED.value)
|
|
656
|
+
.options(selectinload(Ticket.goal), selectinload(Ticket.events))
|
|
657
|
+
)
|
|
658
|
+
.scalars()
|
|
659
|
+
.all()
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
llm_service = LLMService(config)
|
|
663
|
+
|
|
664
|
+
for ticket in blocked_tickets:
|
|
665
|
+
# Cap: max follow-ups per tick
|
|
666
|
+
if followups_created >= config.max_followups_per_tick:
|
|
667
|
+
break
|
|
668
|
+
|
|
669
|
+
# Cap: count existing follow-ups
|
|
670
|
+
existing_followup_count = sum(
|
|
671
|
+
1
|
|
672
|
+
for event in ticket.events
|
|
673
|
+
if event.payload_json and FOLLOWUP_MARKER in event.payload_json
|
|
674
|
+
)
|
|
675
|
+
if existing_followup_count >= config.max_followups_per_ticket:
|
|
676
|
+
continue
|
|
677
|
+
|
|
678
|
+
# Get blocker reason and payload
|
|
679
|
+
blocker_reason = None
|
|
680
|
+
blocker_payload = {}
|
|
681
|
+
for event in reversed(ticket.events):
|
|
682
|
+
if event.to_state == TicketState.BLOCKED.value and event.reason:
|
|
683
|
+
blocker_reason = event.reason
|
|
684
|
+
if event.payload_json:
|
|
685
|
+
try:
|
|
686
|
+
blocker_payload = json.loads(event.payload_json)
|
|
687
|
+
except (json.JSONDecodeError, TypeError):
|
|
688
|
+
pass
|
|
689
|
+
break
|
|
690
|
+
|
|
691
|
+
# Skip: tickets with skip_followup flag
|
|
692
|
+
if blocker_payload.get("skip_followup"):
|
|
693
|
+
continue
|
|
694
|
+
|
|
695
|
+
# Skip: tickets with manual work follow-up
|
|
696
|
+
if blocker_payload.get("manual_work_followup_id"):
|
|
697
|
+
continue
|
|
698
|
+
|
|
699
|
+
# Skip certain blocker reasons
|
|
700
|
+
if blocker_reason and _should_skip_followup(blocker_reason, config):
|
|
701
|
+
continue
|
|
702
|
+
|
|
703
|
+
# Fetch sibling ticket titles in the same goal to avoid duplicates
|
|
704
|
+
sibling_titles: list[str] = []
|
|
705
|
+
if ticket.goal_id:
|
|
706
|
+
sibling_result = db.execute(
|
|
707
|
+
select(Ticket.title).where(
|
|
708
|
+
and_(
|
|
709
|
+
Ticket.goal_id == ticket.goal_id,
|
|
710
|
+
Ticket.id != ticket.id,
|
|
711
|
+
)
|
|
712
|
+
)
|
|
713
|
+
)
|
|
714
|
+
sibling_titles = [row[0] for row in sibling_result.fetchall()]
|
|
715
|
+
|
|
716
|
+
# Generate follow-up proposal
|
|
717
|
+
try:
|
|
718
|
+
proposal = _generate_followup_proposal(
|
|
719
|
+
ticket_title=ticket.title,
|
|
720
|
+
ticket_description=ticket.description,
|
|
721
|
+
blocker_reason=blocker_reason,
|
|
722
|
+
goal_title=ticket.goal.title if ticket.goal else None,
|
|
723
|
+
goal_description=ticket.goal.description if ticket.goal else None,
|
|
724
|
+
llm_service=llm_service,
|
|
725
|
+
config=config,
|
|
726
|
+
existing_ticket_titles=sibling_titles,
|
|
727
|
+
)
|
|
728
|
+
except Exception as e:
|
|
729
|
+
logger.error(f"Failed to generate follow-up for ticket {ticket.id}: {e}")
|
|
730
|
+
continue
|
|
731
|
+
|
|
732
|
+
# Create follow-up ticket
|
|
733
|
+
followup_ticket = Ticket(
|
|
734
|
+
goal_id=ticket.goal_id,
|
|
735
|
+
title=proposal["title"],
|
|
736
|
+
description=proposal["description"],
|
|
737
|
+
state=TicketState.PROPOSED.value,
|
|
738
|
+
priority=ticket.priority,
|
|
739
|
+
)
|
|
740
|
+
db.add(followup_ticket)
|
|
741
|
+
db.flush()
|
|
742
|
+
db.refresh(followup_ticket)
|
|
743
|
+
|
|
744
|
+
# Create creation event
|
|
745
|
+
creation_event = TicketEvent(
|
|
746
|
+
ticket_id=followup_ticket.id,
|
|
747
|
+
event_type=EventType.CREATED.value,
|
|
748
|
+
from_state=None,
|
|
749
|
+
to_state=TicketState.PROPOSED.value,
|
|
750
|
+
actor_type=ActorType.PLANNER.value,
|
|
751
|
+
actor_id="planner",
|
|
752
|
+
reason=f"Follow-up for blocked ticket: {ticket.title}",
|
|
753
|
+
payload_json=json.dumps(
|
|
754
|
+
{
|
|
755
|
+
"parent_ticket_id": ticket.id,
|
|
756
|
+
"verification": proposal.get("verification", []),
|
|
757
|
+
}
|
|
758
|
+
),
|
|
759
|
+
)
|
|
760
|
+
db.add(creation_event)
|
|
761
|
+
|
|
762
|
+
# Create link event on blocked ticket
|
|
763
|
+
link_event = TicketEvent(
|
|
764
|
+
ticket_id=ticket.id,
|
|
765
|
+
event_type=EventType.COMMENT.value,
|
|
766
|
+
from_state=ticket.state,
|
|
767
|
+
to_state=ticket.state,
|
|
768
|
+
actor_type=ActorType.PLANNER.value,
|
|
769
|
+
actor_id="planner",
|
|
770
|
+
reason=f"Created follow-up ticket: {followup_ticket.title}",
|
|
771
|
+
payload_json=json.dumps(
|
|
772
|
+
{
|
|
773
|
+
FOLLOWUP_MARKER: True,
|
|
774
|
+
"followup_ticket_id": followup_ticket.id,
|
|
775
|
+
}
|
|
776
|
+
),
|
|
777
|
+
)
|
|
778
|
+
db.add(link_event)
|
|
779
|
+
|
|
780
|
+
followups_created += 1
|
|
781
|
+
logger.info(
|
|
782
|
+
f"Created follow-up ticket {followup_ticket.id} for blocked ticket {ticket.id}"
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
try:
|
|
786
|
+
from app.services.orchestrator_log import add_orchestrator_log
|
|
787
|
+
|
|
788
|
+
add_orchestrator_log(
|
|
789
|
+
"INFO",
|
|
790
|
+
f"Follow-up created: '{followup_ticket.title}'",
|
|
791
|
+
{
|
|
792
|
+
"followup_ticket_id": followup_ticket.id,
|
|
793
|
+
"blocked_ticket_id": ticket.id,
|
|
794
|
+
"blocked_ticket_title": ticket.title,
|
|
795
|
+
"blocker_reason": blocker_reason,
|
|
796
|
+
"existing_siblings": len(sibling_titles),
|
|
797
|
+
},
|
|
798
|
+
)
|
|
799
|
+
except Exception:
|
|
800
|
+
pass
|
|
801
|
+
|
|
802
|
+
return followups_created
|
|
803
|
+
|
|
804
|
+
|
|
805
|
+
def _should_skip_followup(blocker_reason: str, config: PlannerConfig) -> bool:
|
|
806
|
+
"""Check if this blocker reason should skip follow-up generation."""
|
|
807
|
+
reason_lower = blocker_reason.lower()
|
|
808
|
+
for skip_reason in config.skip_followup_reasons:
|
|
809
|
+
if skip_reason.lower() in reason_lower:
|
|
810
|
+
return True
|
|
811
|
+
return False
|
|
812
|
+
|
|
813
|
+
|
|
814
|
+
def _generate_followup_proposal(
|
|
815
|
+
ticket_title: str,
|
|
816
|
+
ticket_description: str | None,
|
|
817
|
+
blocker_reason: str | None,
|
|
818
|
+
goal_title: str | None,
|
|
819
|
+
goal_description: str | None,
|
|
820
|
+
llm_service: LLMService,
|
|
821
|
+
config: PlannerConfig,
|
|
822
|
+
existing_ticket_titles: list[str] | None = None,
|
|
823
|
+
) -> dict:
|
|
824
|
+
"""Generate a follow-up ticket proposal using LLM."""
|
|
825
|
+
context_parts = []
|
|
826
|
+
if goal_title:
|
|
827
|
+
context_parts.append(f"Goal: {goal_title}")
|
|
828
|
+
if goal_description:
|
|
829
|
+
context_parts.append(f"Goal description: {goal_description}")
|
|
830
|
+
context_parts.append(f"Blocked ticket: {ticket_title}")
|
|
831
|
+
if ticket_description:
|
|
832
|
+
context_parts.append(f"Ticket description: {ticket_description}")
|
|
833
|
+
if blocker_reason:
|
|
834
|
+
context_parts.append(f"Blocker reason: {blocker_reason}")
|
|
835
|
+
|
|
836
|
+
context = "\n".join(context_parts)
|
|
837
|
+
|
|
838
|
+
# Include existing tickets so LLM avoids duplicates
|
|
839
|
+
existing_section = ""
|
|
840
|
+
if existing_ticket_titles:
|
|
841
|
+
ticket_list = "\n".join(f"- {t}" for t in existing_ticket_titles)
|
|
842
|
+
existing_section = f"""
|
|
843
|
+
|
|
844
|
+
## Existing Tickets (DO NOT DUPLICATE)
|
|
845
|
+
These tickets already exist in the same goal. Do NOT create a follow-up that overlaps with any of these:
|
|
846
|
+
{ticket_list}"""
|
|
847
|
+
|
|
848
|
+
system_prompt = """You are a technical project planner. Given a blocked ticket, propose a follow-up ticket that addresses the blocker.
|
|
849
|
+
|
|
850
|
+
Your response MUST be valid JSON with this exact structure:
|
|
851
|
+
{
|
|
852
|
+
"title": "Short, actionable title for the follow-up ticket",
|
|
853
|
+
"description": "Clear description of what needs to be done to unblock the original ticket",
|
|
854
|
+
"verification": ["command1", "command2"]
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
Guidelines:
|
|
858
|
+
- Do NOT create a ticket that duplicates an existing one"""
|
|
859
|
+
|
|
860
|
+
user_prompt = f"""A ticket is blocked and needs a follow-up ticket to address the blocker.
|
|
861
|
+
|
|
862
|
+
{context}{existing_section}
|
|
863
|
+
|
|
864
|
+
Generate a follow-up ticket proposal as JSON."""
|
|
865
|
+
|
|
866
|
+
try:
|
|
867
|
+
response = llm_service.call_completion(
|
|
868
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
869
|
+
max_tokens=config.max_tokens_followup,
|
|
870
|
+
system_prompt=system_prompt,
|
|
871
|
+
)
|
|
872
|
+
data = llm_service.safe_parse_json(response.content, {})
|
|
873
|
+
|
|
874
|
+
return {
|
|
875
|
+
"title": data.get("title", "Follow-up for blocked ticket"),
|
|
876
|
+
"description": data.get(
|
|
877
|
+
"description", "Address the blocker from the original ticket."
|
|
878
|
+
),
|
|
879
|
+
"verification": data.get("verification", []),
|
|
880
|
+
}
|
|
881
|
+
except Exception as e:
|
|
882
|
+
logger.error(f"LLM API call failed: {e}")
|
|
883
|
+
return {
|
|
884
|
+
"title": f"Follow-up: {ticket_title}",
|
|
885
|
+
"description": f"Address blocker: {blocker_reason or 'Unknown blocker'}",
|
|
886
|
+
"verification": [],
|
|
887
|
+
}
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
def _generate_reflections_sync(db, config: PlannerConfig) -> int:
|
|
891
|
+
"""Generate reflections for done tickets (synchronous).
|
|
892
|
+
|
|
893
|
+
Returns:
|
|
894
|
+
Number of reflections added.
|
|
895
|
+
"""
|
|
896
|
+
reflections_added = 0
|
|
897
|
+
|
|
898
|
+
# Find done tickets
|
|
899
|
+
done_tickets = (
|
|
900
|
+
db.execute(
|
|
901
|
+
select(Ticket)
|
|
902
|
+
.where(Ticket.state == TicketState.DONE.value)
|
|
903
|
+
.options(selectinload(Ticket.events), selectinload(Ticket.evidence))
|
|
904
|
+
)
|
|
905
|
+
.scalars()
|
|
906
|
+
.all()
|
|
907
|
+
)
|
|
908
|
+
|
|
909
|
+
llm_service = LLMService(config)
|
|
910
|
+
|
|
911
|
+
for ticket in done_tickets:
|
|
912
|
+
# Check if already has reflection
|
|
913
|
+
has_reflection = any(
|
|
914
|
+
event.payload_json and REFLECTION_MARKER in event.payload_json
|
|
915
|
+
for event in ticket.events
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
if has_reflection:
|
|
919
|
+
continue
|
|
920
|
+
|
|
921
|
+
# Build summaries
|
|
922
|
+
events_summary = _summarize_events(ticket.events)
|
|
923
|
+
evidence_summary = _summarize_evidence(ticket.evidence)
|
|
924
|
+
|
|
925
|
+
# Generate reflection
|
|
926
|
+
try:
|
|
927
|
+
reflection = _generate_reflection_summary(
|
|
928
|
+
ticket_title=ticket.title,
|
|
929
|
+
ticket_description=ticket.description,
|
|
930
|
+
events_summary=events_summary,
|
|
931
|
+
evidence_summary=evidence_summary,
|
|
932
|
+
llm_service=llm_service,
|
|
933
|
+
config=config,
|
|
934
|
+
)
|
|
935
|
+
except Exception as e:
|
|
936
|
+
logger.error(f"Failed to generate reflection for ticket {ticket.id}: {e}")
|
|
937
|
+
continue
|
|
938
|
+
|
|
939
|
+
# Create reflection event
|
|
940
|
+
reflection_event = TicketEvent(
|
|
941
|
+
ticket_id=ticket.id,
|
|
942
|
+
event_type=REFLECTION_EVENT_TYPE,
|
|
943
|
+
from_state=ticket.state,
|
|
944
|
+
to_state=ticket.state,
|
|
945
|
+
actor_type=ActorType.PLANNER.value,
|
|
946
|
+
actor_id="planner",
|
|
947
|
+
reason=reflection,
|
|
948
|
+
payload_json=json.dumps(
|
|
949
|
+
{
|
|
950
|
+
REFLECTION_MARKER: True,
|
|
951
|
+
"type": "reflection_added",
|
|
952
|
+
}
|
|
953
|
+
),
|
|
954
|
+
)
|
|
955
|
+
db.add(reflection_event)
|
|
956
|
+
|
|
957
|
+
reflections_added += 1
|
|
958
|
+
logger.info(f"Generated reflection for ticket {ticket.id}")
|
|
959
|
+
|
|
960
|
+
return reflections_added
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
def _summarize_events(events) -> str:
|
|
964
|
+
"""Summarize ticket events."""
|
|
965
|
+
if not events:
|
|
966
|
+
return "No events"
|
|
967
|
+
|
|
968
|
+
transitions = []
|
|
969
|
+
for event in events:
|
|
970
|
+
if event.event_type == EventType.TRANSITIONED.value:
|
|
971
|
+
transitions.append(f"{event.from_state} → {event.to_state}")
|
|
972
|
+
elif event.event_type == EventType.CREATED.value:
|
|
973
|
+
transitions.append(f"created ({event.to_state})")
|
|
974
|
+
|
|
975
|
+
if transitions:
|
|
976
|
+
return " → ".join(transitions[:5])
|
|
977
|
+
return "No state transitions"
|
|
978
|
+
|
|
979
|
+
|
|
980
|
+
def _summarize_evidence(evidence) -> str:
|
|
981
|
+
"""Summarize verification evidence."""
|
|
982
|
+
if not evidence:
|
|
983
|
+
return "No verification evidence"
|
|
984
|
+
|
|
985
|
+
passed = sum(1 for e in evidence if e.exit_code == 0)
|
|
986
|
+
failed = len(evidence) - passed
|
|
987
|
+
|
|
988
|
+
parts = []
|
|
989
|
+
if passed:
|
|
990
|
+
parts.append(f"{passed} passed")
|
|
991
|
+
if failed:
|
|
992
|
+
parts.append(f"{failed} failed")
|
|
993
|
+
|
|
994
|
+
return ", ".join(parts) if parts else "No evidence"
|
|
995
|
+
|
|
996
|
+
|
|
997
|
+
def _generate_reflection_summary(
|
|
998
|
+
ticket_title: str,
|
|
999
|
+
ticket_description: str | None,
|
|
1000
|
+
events_summary: str | None,
|
|
1001
|
+
evidence_summary: str | None,
|
|
1002
|
+
llm_service: LLMService,
|
|
1003
|
+
config: PlannerConfig,
|
|
1004
|
+
) -> str:
|
|
1005
|
+
"""Generate a reflection summary using LLM."""
|
|
1006
|
+
context_parts = [f"Ticket: {ticket_title}"]
|
|
1007
|
+
if ticket_description:
|
|
1008
|
+
context_parts.append(f"Description: {ticket_description}")
|
|
1009
|
+
if events_summary:
|
|
1010
|
+
context_parts.append(f"Journey: {events_summary}")
|
|
1011
|
+
if evidence_summary:
|
|
1012
|
+
context_parts.append(f"Evidence: {evidence_summary}")
|
|
1013
|
+
|
|
1014
|
+
context = "\n".join(context_parts)
|
|
1015
|
+
|
|
1016
|
+
system_prompt = """You are a technical project assistant. Generate a brief reflection summary for a completed ticket.
|
|
1017
|
+
|
|
1018
|
+
Your response MUST be valid JSON with this exact structure:
|
|
1019
|
+
{
|
|
1020
|
+
"summary": "A concise 2-3 sentence reflection on what was accomplished and any lessons learned"
|
|
1021
|
+
}"""
|
|
1022
|
+
|
|
1023
|
+
user_prompt = f"""A ticket has been completed. Generate a reflection summary.
|
|
1024
|
+
|
|
1025
|
+
{context}
|
|
1026
|
+
|
|
1027
|
+
Generate a reflection summary as JSON."""
|
|
1028
|
+
|
|
1029
|
+
try:
|
|
1030
|
+
response = llm_service.call_completion(
|
|
1031
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
1032
|
+
max_tokens=config.max_tokens_reflection,
|
|
1033
|
+
system_prompt=system_prompt,
|
|
1034
|
+
)
|
|
1035
|
+
data = llm_service.safe_parse_json(response.content, {})
|
|
1036
|
+
|
|
1037
|
+
return data.get("summary", f"Completed: {ticket_title}")
|
|
1038
|
+
except Exception as e:
|
|
1039
|
+
logger.error(f"LLM API call failed: {e}")
|
|
1040
|
+
return f"Ticket '{ticket_title}' was completed successfully."
|