draft-board 0.1.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/app/backend/.env.example +9 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_patch.txt +195 -0
- package/app/backend/.smartkanban/evidence/8b383839-cbec-45af-86ee-c7708d075cbe/bddf2ed5-2e21-4d46-a62b-10b87f1642a6_stat.txt +6 -0
- package/app/backend/CURL_EXAMPLES.md +335 -0
- package/app/backend/ENV_SETUP.md +65 -0
- package/app/backend/alembic/env.py +71 -0
- package/app/backend/alembic/script.py.mako +28 -0
- package/app/backend/alembic/versions/001_initial_schema.py +104 -0
- package/app/backend/alembic/versions/002_add_jobs_table.py +52 -0
- package/app/backend/alembic/versions/003_add_workspace_table.py +48 -0
- package/app/backend/alembic/versions/004_add_evidence_table.py +56 -0
- package/app/backend/alembic/versions/005_add_verification_commands.py +32 -0
- package/app/backend/alembic/versions/006_add_planner_lock_table.py +39 -0
- package/app/backend/alembic/versions/007_add_revision_review_tables.py +126 -0
- package/app/backend/alembic/versions/008_add_revision_idempotency_and_traceability.py +52 -0
- package/app/backend/alembic/versions/009_add_job_health_fields.py +46 -0
- package/app/backend/alembic/versions/010_add_review_comment_line_content.py +36 -0
- package/app/backend/alembic/versions/011_add_analysis_cache.py +47 -0
- package/app/backend/alembic/versions/012_add_boards_table.py +102 -0
- package/app/backend/alembic/versions/013_add_ticket_blocking.py +45 -0
- package/app/backend/alembic/versions/014_add_agent_sessions.py +220 -0
- package/app/backend/alembic/versions/015_add_ticket_sort_order.py +33 -0
- package/app/backend/alembic/versions/03220f0b93ae_add_pr_fields_to_ticket.py +49 -0
- package/app/backend/alembic/versions/0c2d89fff3b1_seed_board_configs_from_yaml.py +206 -0
- package/app/backend/alembic/versions/3348e5cf54c1_add_merge_checklist_table.py +67 -0
- package/app/backend/alembic/versions/357c780ee445_add_goal_status.py +34 -0
- package/app/backend/alembic/versions/553340b7e26c_add_autonomy_fields_to_goal.py +65 -0
- package/app/backend/alembic/versions/774dc335c679_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/7b307e847cbd_merge_heads.py +23 -0
- package/app/backend/alembic/versions/82ecd978cc70_add_missing_indexes.py +48 -0
- package/app/backend/alembic/versions/8ef5054dc280_add_normalized_log_entries.py +173 -0
- package/app/backend/alembic/versions/8f3e2bd8ea3b_merge_migration_heads.py +23 -0
- package/app/backend/alembic/versions/9d17f0698d3b_add_config_column_to_boards_table.py +30 -0
- package/app/backend/alembic/versions/add_agent_conversation_history.py +72 -0
- package/app/backend/alembic/versions/add_job_variant.py +34 -0
- package/app/backend/alembic/versions/add_performance_indexes.py +95 -0
- package/app/backend/alembic/versions/add_repos_and_board_repos.py +174 -0
- package/app/backend/alembic/versions/add_session_id_to_jobs.py +27 -0
- package/app/backend/alembic/versions/add_sqlite_backend_tables.py +104 -0
- package/app/backend/alembic/versions/b10fb0b62240_add_diff_content_to_revisions.py +34 -0
- package/app/backend/alembic.ini +89 -0
- package/app/backend/app/__init__.py +3 -0
- package/app/backend/app/data_dir.py +85 -0
- package/app/backend/app/database.py +70 -0
- package/app/backend/app/database_sync.py +64 -0
- package/app/backend/app/dependencies/__init__.py +5 -0
- package/app/backend/app/dependencies/auth.py +80 -0
- package/app/backend/app/dependencies.py +43 -0
- package/app/backend/app/exceptions.py +178 -0
- package/app/backend/app/executors/__init__.py +1 -0
- package/app/backend/app/executors/adapters/__init__.py +1 -0
- package/app/backend/app/executors/adapters/aider.py +152 -0
- package/app/backend/app/executors/adapters/amazon_q.py +103 -0
- package/app/backend/app/executors/adapters/amp.py +123 -0
- package/app/backend/app/executors/adapters/claude.py +177 -0
- package/app/backend/app/executors/adapters/cline.py +127 -0
- package/app/backend/app/executors/adapters/codex.py +167 -0
- package/app/backend/app/executors/adapters/copilot.py +202 -0
- package/app/backend/app/executors/adapters/cursor.py +87 -0
- package/app/backend/app/executors/adapters/droid.py +123 -0
- package/app/backend/app/executors/adapters/gemini.py +132 -0
- package/app/backend/app/executors/adapters/goose.py +131 -0
- package/app/backend/app/executors/adapters/opencode.py +123 -0
- package/app/backend/app/executors/adapters/qwen.py +123 -0
- package/app/backend/app/executors/plugins/__init__.py +1 -0
- package/app/backend/app/executors/registry.py +202 -0
- package/app/backend/app/executors/spec.py +226 -0
- package/app/backend/app/main.py +486 -0
- package/app/backend/app/middleware/__init__.py +13 -0
- package/app/backend/app/middleware/idempotency.py +426 -0
- package/app/backend/app/middleware/rate_limit.py +312 -0
- package/app/backend/app/middleware/security_headers.py +43 -0
- package/app/backend/app/middleware/timeout.py +37 -0
- package/app/backend/app/models/__init__.py +56 -0
- package/app/backend/app/models/agent_conversation_history.py +56 -0
- package/app/backend/app/models/agent_session.py +127 -0
- package/app/backend/app/models/analysis_cache.py +49 -0
- package/app/backend/app/models/base.py +9 -0
- package/app/backend/app/models/board.py +79 -0
- package/app/backend/app/models/board_repo.py +68 -0
- package/app/backend/app/models/cost_budget.py +42 -0
- package/app/backend/app/models/enums.py +40 -0
- package/app/backend/app/models/evidence.py +132 -0
- package/app/backend/app/models/goal.py +102 -0
- package/app/backend/app/models/idempotency_entry.py +30 -0
- package/app/backend/app/models/job.py +163 -0
- package/app/backend/app/models/job_queue.py +39 -0
- package/app/backend/app/models/kv_store.py +28 -0
- package/app/backend/app/models/merge_checklist.py +87 -0
- package/app/backend/app/models/normalized_log.py +100 -0
- package/app/backend/app/models/planner_lock.py +43 -0
- package/app/backend/app/models/rate_limit_entry.py +25 -0
- package/app/backend/app/models/repo.py +66 -0
- package/app/backend/app/models/review_comment.py +91 -0
- package/app/backend/app/models/review_summary.py +69 -0
- package/app/backend/app/models/revision.py +130 -0
- package/app/backend/app/models/ticket.py +223 -0
- package/app/backend/app/models/ticket_event.py +83 -0
- package/app/backend/app/models/user.py +47 -0
- package/app/backend/app/models/workspace.py +71 -0
- package/app/backend/app/redis_client.py +119 -0
- package/app/backend/app/routers/__init__.py +29 -0
- package/app/backend/app/routers/agents.py +296 -0
- package/app/backend/app/routers/auth.py +94 -0
- package/app/backend/app/routers/board.py +885 -0
- package/app/backend/app/routers/dashboard.py +351 -0
- package/app/backend/app/routers/debug.py +528 -0
- package/app/backend/app/routers/evidence.py +96 -0
- package/app/backend/app/routers/executors.py +324 -0
- package/app/backend/app/routers/goals.py +574 -0
- package/app/backend/app/routers/jobs.py +448 -0
- package/app/backend/app/routers/maintenance.py +172 -0
- package/app/backend/app/routers/merge.py +360 -0
- package/app/backend/app/routers/planner.py +537 -0
- package/app/backend/app/routers/pull_requests.py +382 -0
- package/app/backend/app/routers/repos.py +263 -0
- package/app/backend/app/routers/revisions.py +939 -0
- package/app/backend/app/routers/settings.py +267 -0
- package/app/backend/app/routers/tickets.py +2003 -0
- package/app/backend/app/routers/webhooks.py +143 -0
- package/app/backend/app/routers/websocket.py +249 -0
- package/app/backend/app/schemas/__init__.py +109 -0
- package/app/backend/app/schemas/board.py +87 -0
- package/app/backend/app/schemas/common.py +33 -0
- package/app/backend/app/schemas/evidence.py +87 -0
- package/app/backend/app/schemas/goal.py +90 -0
- package/app/backend/app/schemas/job.py +97 -0
- package/app/backend/app/schemas/merge.py +139 -0
- package/app/backend/app/schemas/planner.py +500 -0
- package/app/backend/app/schemas/repo.py +187 -0
- package/app/backend/app/schemas/review.py +137 -0
- package/app/backend/app/schemas/revision.py +114 -0
- package/app/backend/app/schemas/ticket.py +238 -0
- package/app/backend/app/schemas/ticket_event.py +72 -0
- package/app/backend/app/schemas/workspace.py +19 -0
- package/app/backend/app/services/__init__.py +31 -0
- package/app/backend/app/services/agent_memory_service.py +223 -0
- package/app/backend/app/services/agent_registry.py +346 -0
- package/app/backend/app/services/agent_session_manager.py +318 -0
- package/app/backend/app/services/agent_session_service.py +219 -0
- package/app/backend/app/services/agent_tools.py +379 -0
- package/app/backend/app/services/auth_service.py +98 -0
- package/app/backend/app/services/autonomy_service.py +380 -0
- package/app/backend/app/services/board_repo_service.py +201 -0
- package/app/backend/app/services/board_service.py +326 -0
- package/app/backend/app/services/cleanup_service.py +1085 -0
- package/app/backend/app/services/config_service.py +908 -0
- package/app/backend/app/services/context_gatherer.py +557 -0
- package/app/backend/app/services/cost_tracking_service.py +293 -0
- package/app/backend/app/services/cursor_log_normalizer.py +536 -0
- package/app/backend/app/services/delivery_pipeline.py +440 -0
- package/app/backend/app/services/executor_service.py +634 -0
- package/app/backend/app/services/git_host/__init__.py +11 -0
- package/app/backend/app/services/git_host/factory.py +87 -0
- package/app/backend/app/services/git_host/github.py +270 -0
- package/app/backend/app/services/git_host/gitlab.py +194 -0
- package/app/backend/app/services/git_host/protocol.py +75 -0
- package/app/backend/app/services/git_merge_simple.py +346 -0
- package/app/backend/app/services/git_ops.py +384 -0
- package/app/backend/app/services/github_service.py +233 -0
- package/app/backend/app/services/goal_service.py +113 -0
- package/app/backend/app/services/job_service.py +423 -0
- package/app/backend/app/services/job_watchdog_service.py +424 -0
- package/app/backend/app/services/langchain_adapter.py +122 -0
- package/app/backend/app/services/llm_provider_clients.py +351 -0
- package/app/backend/app/services/llm_service.py +285 -0
- package/app/backend/app/services/log_normalizer.py +342 -0
- package/app/backend/app/services/log_stream_service.py +276 -0
- package/app/backend/app/services/merge_checklist_service.py +264 -0
- package/app/backend/app/services/merge_service.py +784 -0
- package/app/backend/app/services/orchestrator_log.py +84 -0
- package/app/backend/app/services/planner_service.py +1662 -0
- package/app/backend/app/services/planner_tick_sync.py +1040 -0
- package/app/backend/app/services/queued_message_service.py +156 -0
- package/app/backend/app/services/reliability_wrapper.py +389 -0
- package/app/backend/app/services/repo_discovery_service.py +318 -0
- package/app/backend/app/services/review_service.py +334 -0
- package/app/backend/app/services/revision_service.py +389 -0
- package/app/backend/app/services/safe_autopilot.py +510 -0
- package/app/backend/app/services/sqlite_worker.py +372 -0
- package/app/backend/app/services/task_dispatch.py +135 -0
- package/app/backend/app/services/ticket_generation_service.py +1781 -0
- package/app/backend/app/services/ticket_service.py +486 -0
- package/app/backend/app/services/udar_planner_service.py +1007 -0
- package/app/backend/app/services/webhook_service.py +126 -0
- package/app/backend/app/services/workspace_service.py +465 -0
- package/app/backend/app/services/worktree_file_service.py +92 -0
- package/app/backend/app/services/worktree_validator.py +213 -0
- package/app/backend/app/sqlite_kv.py +278 -0
- package/app/backend/app/state_machine.py +128 -0
- package/app/backend/app/templates/__init__.py +5 -0
- package/app/backend/app/templates/registry.py +243 -0
- package/app/backend/app/utils/__init__.py +5 -0
- package/app/backend/app/utils/artifact_reader.py +87 -0
- package/app/backend/app/utils/circuit_breaker.py +229 -0
- package/app/backend/app/utils/db_retry.py +136 -0
- package/app/backend/app/utils/ignored_fields.py +123 -0
- package/app/backend/app/utils/validators.py +54 -0
- package/app/backend/app/websocket/__init__.py +5 -0
- package/app/backend/app/websocket/manager.py +179 -0
- package/app/backend/app/websocket/state_tracker.py +113 -0
- package/app/backend/app/worker.py +3190 -0
- package/app/backend/calculator_tickets.json +40 -0
- package/app/backend/canary_tests.sh +591 -0
- package/app/backend/celerybeat-schedule +0 -0
- package/app/backend/celerybeat-schedule-shm +0 -0
- package/app/backend/celerybeat-schedule-wal +0 -0
- package/app/backend/logs/.gitkeep +3 -0
- package/app/backend/multiplication_division_implementation_tickets.json +55 -0
- package/app/backend/multiplication_division_tickets.json +42 -0
- package/app/backend/pyproject.toml +45 -0
- package/app/backend/requirements-dev.txt +8 -0
- package/app/backend/requirements.txt +20 -0
- package/app/backend/run.sh +30 -0
- package/app/backend/run_with_logs.sh +10 -0
- package/app/backend/scientific_calculator_tickets.json +40 -0
- package/app/backend/scripts/extract_openapi.py +21 -0
- package/app/backend/scripts/seed_demo.py +187 -0
- package/app/backend/setup_demo_review.py +302 -0
- package/app/backend/test_actual_parse.py +41 -0
- package/app/backend/test_agent_streaming.py +61 -0
- package/app/backend/test_parse.py +51 -0
- package/app/backend/test_streaming.py +51 -0
- package/app/backend/test_subprocess_streaming.py +50 -0
- package/app/backend/tests/__init__.py +1 -0
- package/app/backend/tests/conftest.py +46 -0
- package/app/backend/tests/test_auth.py +341 -0
- package/app/backend/tests/test_autonomy_service.py +391 -0
- package/app/backend/tests/test_cleanup_service_safety.py +417 -0
- package/app/backend/tests/test_middleware.py +279 -0
- package/app/backend/tests/test_planner_providers.py +290 -0
- package/app/backend/tests/test_planner_unblock.py +183 -0
- package/app/backend/tests/test_revision_invariants.py +618 -0
- package/app/backend/tests/test_sqlite_kv.py +290 -0
- package/app/backend/tests/test_sqlite_worker.py +353 -0
- package/app/backend/tests/test_task_dispatch.py +100 -0
- package/app/backend/tests/test_ticket_validation.py +304 -0
- package/app/backend/tests/test_udar_agent.py +693 -0
- package/app/backend/tests/test_webhook_service.py +184 -0
- package/app/backend/tickets_output.json +59 -0
- package/app/backend/user_management_tickets.json +50 -0
- package/app/backend/uvicorn.log +0 -0
- package/app/draft.yaml +313 -0
- package/app/frontend/dist/assets/index-LcjCczu5.js +155 -0
- package/app/frontend/dist/assets/index-_FP_279e.css +1 -0
- package/app/frontend/dist/index.html +14 -0
- package/app/frontend/dist/vite.svg +1 -0
- package/app/frontend/package.json +101 -0
- package/bin/cli.js +527 -0
- package/package.json +37 -0
|
@@ -0,0 +1,1781 @@
|
|
|
1
|
+
"""Ticket generation service - orchestrates context gathering, agent calls, and validation.
|
|
2
|
+
|
|
3
|
+
This service is responsible for:
|
|
4
|
+
1. Collecting repository context via ContextGatherer
|
|
5
|
+
2. Building prompts for ticket generation
|
|
6
|
+
3. Calling the agent CLI (cursor-agent/claude) to generate tickets
|
|
7
|
+
4. Validating, capping, and sanitizing output
|
|
8
|
+
5. De-duplicating against existing tickets
|
|
9
|
+
6. Caching analysis results
|
|
10
|
+
|
|
11
|
+
Ticket generation uses the same agent infrastructure as execution for consistency.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import asyncio
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import re
|
|
19
|
+
import shutil
|
|
20
|
+
import subprocess
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from datetime import UTC, datetime, timedelta
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
from sqlalchemy import and_, select
|
|
26
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
27
|
+
|
|
28
|
+
from app.models.analysis_cache import AnalysisCache
|
|
29
|
+
from app.models.goal import Goal
|
|
30
|
+
from app.models.ticket import Ticket
|
|
31
|
+
from app.models.ticket_event import TicketEvent
|
|
32
|
+
from app.schemas.planner import (
|
|
33
|
+
AnalyzeCodebaseResponse,
|
|
34
|
+
ContextStats,
|
|
35
|
+
CreatedTicketSchema,
|
|
36
|
+
ExcludedMatch,
|
|
37
|
+
FiletypeCount,
|
|
38
|
+
PriorityBucket,
|
|
39
|
+
ReflectionResult,
|
|
40
|
+
SimilarTicketWarning,
|
|
41
|
+
SuggestedPriorityChange,
|
|
42
|
+
bucket_to_priority,
|
|
43
|
+
priority_to_bucket,
|
|
44
|
+
)
|
|
45
|
+
from app.services.config_service import ConfigService, PlannerConfig
|
|
46
|
+
from app.services.context_gatherer import ContextGatherer, RepoContext
|
|
47
|
+
from app.services.executor_service import ExecutorService, ExecutorType
|
|
48
|
+
from app.services.llm_service import LLMService
|
|
49
|
+
from app.state_machine import ActorType, EventType, TicketState
|
|
50
|
+
|
|
51
|
+
logger = logging.getLogger(__name__)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# =============================================================================
|
|
55
|
+
# Constants
|
|
56
|
+
# =============================================================================
|
|
57
|
+
|
|
58
|
+
# Cache TTL for analysis results
|
|
59
|
+
ANALYSIS_CACHE_TTL_MINUTES = 10
|
|
60
|
+
|
|
61
|
+
# Max tickets to generate in one call
|
|
62
|
+
MAX_TICKETS_PER_GENERATION = 10
|
|
63
|
+
|
|
64
|
+
# Similarity threshold for dedup (token overlap)
|
|
65
|
+
DEDUP_SIMILARITY_THRESHOLD = 0.6
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
# =============================================================================
|
|
69
|
+
# Dataclasses
|
|
70
|
+
# =============================================================================
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class GenerationResult:
|
|
75
|
+
"""Result of ticket generation."""
|
|
76
|
+
|
|
77
|
+
tickets: list[CreatedTicketSchema]
|
|
78
|
+
goal_id: str | None
|
|
79
|
+
from_cache: bool = False
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# =============================================================================
|
|
83
|
+
# Service
|
|
84
|
+
# =============================================================================
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class TicketGenerationService:
|
|
88
|
+
"""Orchestrates ticket generation from goals or codebase analysis.
|
|
89
|
+
|
|
90
|
+
This service coordinates:
|
|
91
|
+
- Context gathering (ContextGatherer)
|
|
92
|
+
- LLM calls (LLMService)
|
|
93
|
+
- Ticket creation and validation
|
|
94
|
+
- Caching and deduplication
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(
|
|
98
|
+
self,
|
|
99
|
+
db: AsyncSession,
|
|
100
|
+
llm_service: LLMService | None = None,
|
|
101
|
+
config: PlannerConfig | None = None,
|
|
102
|
+
):
|
|
103
|
+
"""Initialize the ticket generation service.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
db: Async database session.
|
|
107
|
+
llm_service: LLM service instance. If None, creates one.
|
|
108
|
+
config: Planner configuration. If None, loads from config file.
|
|
109
|
+
"""
|
|
110
|
+
self.db = db
|
|
111
|
+
|
|
112
|
+
if config is None:
|
|
113
|
+
config_service = ConfigService()
|
|
114
|
+
config = config_service.get_planner_config()
|
|
115
|
+
self.config = config
|
|
116
|
+
|
|
117
|
+
if llm_service is None:
|
|
118
|
+
llm_service = LLMService(config)
|
|
119
|
+
self.llm = llm_service
|
|
120
|
+
|
|
121
|
+
self.context_gatherer = ContextGatherer()
|
|
122
|
+
|
|
123
|
+
# =========================================================================
|
|
124
|
+
# PUBLIC METHODS
|
|
125
|
+
# =========================================================================
|
|
126
|
+
|
|
127
|
+
async def generate_from_goal(
|
|
128
|
+
self,
|
|
129
|
+
goal_id: str,
|
|
130
|
+
repo_root: Path | str | None = None,
|
|
131
|
+
include_readme: bool = False,
|
|
132
|
+
validate_tickets: bool = False,
|
|
133
|
+
stream_callback=None,
|
|
134
|
+
) -> GenerationResult:
|
|
135
|
+
"""Generate tickets from a goal using the agent CLI.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
goal_id: ID of the goal to generate tickets for.
|
|
139
|
+
repo_root: Optional path to repository for context.
|
|
140
|
+
include_readme: Whether to include README excerpt.
|
|
141
|
+
validate_tickets: Whether to validate tickets against codebase before creating.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
GenerationResult with created tickets.
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
ValueError: If goal not found or no agent available.
|
|
148
|
+
"""
|
|
149
|
+
# Fetch goal
|
|
150
|
+
result = await self.db.execute(select(Goal).where(Goal.id == goal_id))
|
|
151
|
+
goal = result.scalar_one_or_none()
|
|
152
|
+
if not goal:
|
|
153
|
+
raise ValueError(f"Goal not found: {goal_id}")
|
|
154
|
+
|
|
155
|
+
# Determine repo_root from goal's board if not provided
|
|
156
|
+
if not repo_root and goal.board_id:
|
|
157
|
+
from app.models.board import Board
|
|
158
|
+
|
|
159
|
+
board_result = await self.db.execute(
|
|
160
|
+
select(Board).where(Board.id == goal.board_id)
|
|
161
|
+
)
|
|
162
|
+
board = board_result.scalar_one_or_none()
|
|
163
|
+
if board:
|
|
164
|
+
repo_root = board.repo_root
|
|
165
|
+
|
|
166
|
+
if not repo_root:
|
|
167
|
+
raise ValueError("No repository path available for ticket generation")
|
|
168
|
+
|
|
169
|
+
repo_root = Path(repo_root)
|
|
170
|
+
|
|
171
|
+
# Fetch existing tickets for this goal to prevent duplicates
|
|
172
|
+
existing_tickets = await self._get_existing_tickets(goal_id)
|
|
173
|
+
|
|
174
|
+
# Build prompt for agent (includes existing tickets for dedup awareness)
|
|
175
|
+
prompt = self._build_agent_ticket_generation_prompt(
|
|
176
|
+
goal, include_readme, existing_tickets
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
# Call agent to generate tickets (run in thread pool to avoid blocking event loop)
|
|
180
|
+
logger.info(
|
|
181
|
+
f"Calling agent CLI for goal '{goal.title}' (streaming={'yes' if stream_callback else 'no'})"
|
|
182
|
+
)
|
|
183
|
+
if stream_callback:
|
|
184
|
+
stream_callback(
|
|
185
|
+
f"[DEBUG] Prompt built ({len(prompt)} chars). Calling agent..."
|
|
186
|
+
)
|
|
187
|
+
agent_response = await asyncio.to_thread(
|
|
188
|
+
self._call_agent_for_tickets,
|
|
189
|
+
prompt,
|
|
190
|
+
repo_root,
|
|
191
|
+
stream_callback,
|
|
192
|
+
)
|
|
193
|
+
logger.info(
|
|
194
|
+
f"Agent CLI completed. Response length: {len(agent_response)} chars"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Parse and validate response
|
|
198
|
+
data = self._parse_agent_json_response(agent_response)
|
|
199
|
+
raw_tickets = data.get("tickets", [])
|
|
200
|
+
|
|
201
|
+
logger.info(
|
|
202
|
+
f"Agent generated {len(raw_tickets)} raw tickets for goal '{goal.title}'"
|
|
203
|
+
)
|
|
204
|
+
if len(raw_tickets) == 0:
|
|
205
|
+
logger.warning(
|
|
206
|
+
f"Agent returned 0 tickets. Response preview: {agent_response[:500]}"
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Validate tickets against codebase if enabled
|
|
210
|
+
filtered_count = 0
|
|
211
|
+
if validate_tickets and raw_tickets:
|
|
212
|
+
logger.info(f"Validating {len(raw_tickets)} tickets against codebase")
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
# Gather context for validation
|
|
216
|
+
context = self.context_gatherer.gather(
|
|
217
|
+
repo_root=repo_root,
|
|
218
|
+
include_readme_excerpt=include_readme,
|
|
219
|
+
)
|
|
220
|
+
context_summary = context.to_prompt_string()[
|
|
221
|
+
:3000
|
|
222
|
+
] # Limit size for validation
|
|
223
|
+
|
|
224
|
+
validated_tickets = []
|
|
225
|
+
|
|
226
|
+
for raw in raw_tickets:
|
|
227
|
+
try:
|
|
228
|
+
validation = self._validate_ticket_against_codebase(
|
|
229
|
+
ticket=raw,
|
|
230
|
+
goal=goal,
|
|
231
|
+
context_summary=context_summary,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# Store validation result in ticket for later use in event payload
|
|
235
|
+
raw["_validation"] = validation
|
|
236
|
+
|
|
237
|
+
# Only include appropriate tickets
|
|
238
|
+
if (
|
|
239
|
+
validation.get("is_valid")
|
|
240
|
+
and validation.get("validation_result") == "appropriate"
|
|
241
|
+
):
|
|
242
|
+
validated_tickets.append(raw)
|
|
243
|
+
else:
|
|
244
|
+
filtered_count += 1
|
|
245
|
+
logger.warning(
|
|
246
|
+
f"Filtered ticket '{raw.get('title')}': "
|
|
247
|
+
f"result={validation.get('validation_result')}, "
|
|
248
|
+
f"reason={validation.get('reasoning')}"
|
|
249
|
+
)
|
|
250
|
+
except Exception as e:
|
|
251
|
+
# If validation fails for a ticket, include it anyway (fail open)
|
|
252
|
+
logger.error(
|
|
253
|
+
f"Validation failed for ticket '{raw.get('title')}': {e}"
|
|
254
|
+
)
|
|
255
|
+
validated_tickets.append(raw)
|
|
256
|
+
|
|
257
|
+
if filtered_count > 0:
|
|
258
|
+
logger.warning(
|
|
259
|
+
f"Filtered {filtered_count}/{len(raw_tickets)} tickets during validation"
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
raw_tickets = validated_tickets
|
|
263
|
+
except Exception as e:
|
|
264
|
+
# If entire validation process fails, proceed with all tickets (fail open)
|
|
265
|
+
logger.error(
|
|
266
|
+
f"Validation process failed, proceeding with all {len(raw_tickets)} tickets: {e}"
|
|
267
|
+
)
|
|
268
|
+
filtered_count = 0
|
|
269
|
+
else:
|
|
270
|
+
if not validate_tickets:
|
|
271
|
+
logger.debug(
|
|
272
|
+
f"Ticket validation disabled, skipping for {len(raw_tickets)} tickets"
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Get existing tickets for dedup
|
|
276
|
+
existing_tickets = await self._get_existing_tickets(goal_id)
|
|
277
|
+
|
|
278
|
+
# Create tickets in database
|
|
279
|
+
created_tickets: list[CreatedTicketSchema] = []
|
|
280
|
+
# Track title -> ticket_id for resolving blocked_by references
|
|
281
|
+
title_to_ticket_id: dict[str, str] = {}
|
|
282
|
+
# Track tickets that need blocked_by resolved after all are created
|
|
283
|
+
pending_blocked_by: list[tuple[str, str]] = [] # (ticket_id, blocked_by_title)
|
|
284
|
+
|
|
285
|
+
for _idx, raw in enumerate(raw_tickets[:MAX_TICKETS_PER_GENERATION], 1):
|
|
286
|
+
try:
|
|
287
|
+
# Skip non-dict items (malformed agent output)
|
|
288
|
+
if not isinstance(raw, dict):
|
|
289
|
+
logger.warning(
|
|
290
|
+
f"Skipping non-dict ticket entry (type={type(raw).__name__}): {str(raw)[:100]}"
|
|
291
|
+
)
|
|
292
|
+
continue
|
|
293
|
+
|
|
294
|
+
# Validate required fields
|
|
295
|
+
title = raw.get("title", "").strip()
|
|
296
|
+
if not title or len(title) > 255:
|
|
297
|
+
logger.warning(
|
|
298
|
+
f"Skipping ticket with invalid title (len={len(title)})"
|
|
299
|
+
)
|
|
300
|
+
continue
|
|
301
|
+
|
|
302
|
+
# Dedup check - block on exact match and high-similarity matches
|
|
303
|
+
status, dup_id, dup_title, similarity = self._check_duplicate(
|
|
304
|
+
title, existing_tickets
|
|
305
|
+
)
|
|
306
|
+
if status == "exact":
|
|
307
|
+
logger.info(f"Skipping exact duplicate ticket: {title[:50]}")
|
|
308
|
+
continue
|
|
309
|
+
if status == "similar" and similarity >= 0.7:
|
|
310
|
+
logger.info(
|
|
311
|
+
f"Skipping near-duplicate ticket: '{title[:50]}' "
|
|
312
|
+
f"(similar to '{dup_title}', score={similarity:.2f})"
|
|
313
|
+
)
|
|
314
|
+
continue
|
|
315
|
+
|
|
316
|
+
# Parse priority bucket
|
|
317
|
+
bucket_str = raw.get("priority_bucket", "P2")
|
|
318
|
+
try:
|
|
319
|
+
bucket = PriorityBucket(bucket_str)
|
|
320
|
+
except ValueError:
|
|
321
|
+
bucket = PriorityBucket.P2 # Default to medium
|
|
322
|
+
|
|
323
|
+
priority = bucket_to_priority(bucket)
|
|
324
|
+
rationale = raw.get("priority_rationale", "")
|
|
325
|
+
|
|
326
|
+
# Determine initial state: auto-approve if goal has autonomy enabled
|
|
327
|
+
initial_state = TicketState.PROPOSED.value
|
|
328
|
+
auto_approved_ticket = False
|
|
329
|
+
if goal.autonomy_enabled and goal.auto_approve_tickets:
|
|
330
|
+
initial_state = TicketState.PLANNED.value
|
|
331
|
+
auto_approved_ticket = True
|
|
332
|
+
|
|
333
|
+
# Create ticket
|
|
334
|
+
ticket = Ticket(
|
|
335
|
+
goal_id=goal_id,
|
|
336
|
+
board_id=goal.board_id,
|
|
337
|
+
title=title,
|
|
338
|
+
description=raw.get("description", ""),
|
|
339
|
+
state=initial_state,
|
|
340
|
+
priority=priority,
|
|
341
|
+
)
|
|
342
|
+
self.db.add(ticket)
|
|
343
|
+
await self.db.flush()
|
|
344
|
+
await self.db.refresh(ticket)
|
|
345
|
+
logger.info(f"Created ticket {ticket.id}: {title[:50]}")
|
|
346
|
+
|
|
347
|
+
# Track title -> id mapping for blocked_by resolution
|
|
348
|
+
title_to_ticket_id[title.lower()] = ticket.id
|
|
349
|
+
|
|
350
|
+
# Check if this ticket has a blocked_by reference
|
|
351
|
+
blocked_by_title = raw.get("blocked_by")
|
|
352
|
+
if blocked_by_title:
|
|
353
|
+
pending_blocked_by.append((ticket.id, blocked_by_title))
|
|
354
|
+
|
|
355
|
+
# Build event payload
|
|
356
|
+
event_payload = {
|
|
357
|
+
"priority_bucket": bucket.value,
|
|
358
|
+
"priority_rationale": rationale,
|
|
359
|
+
"verification": raw.get("verification", []),
|
|
360
|
+
"notes": raw.get("notes"),
|
|
361
|
+
"blocked_by_title": blocked_by_title,
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
# Add validation result if present
|
|
365
|
+
if "_validation" in raw:
|
|
366
|
+
validation = raw["_validation"]
|
|
367
|
+
event_payload["validation"] = {
|
|
368
|
+
"validated": True,
|
|
369
|
+
"confidence": validation.get("confidence"),
|
|
370
|
+
"validation_result": validation.get("validation_result"),
|
|
371
|
+
"reasoning": validation.get("reasoning"),
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
# Add auto-approval info to event payload
|
|
375
|
+
if auto_approved_ticket:
|
|
376
|
+
event_payload["auto_approved"] = True
|
|
377
|
+
|
|
378
|
+
# Create event
|
|
379
|
+
event = TicketEvent(
|
|
380
|
+
ticket_id=ticket.id,
|
|
381
|
+
event_type=EventType.CREATED.value,
|
|
382
|
+
from_state=None,
|
|
383
|
+
to_state=initial_state,
|
|
384
|
+
actor_type=ActorType.PLANNER.value,
|
|
385
|
+
actor_id="ticket_generation_service",
|
|
386
|
+
reason=f"Generated from goal: {goal.title}",
|
|
387
|
+
payload_json=json.dumps(event_payload),
|
|
388
|
+
)
|
|
389
|
+
self.db.add(event)
|
|
390
|
+
|
|
391
|
+
# Record autonomy event if auto-approved
|
|
392
|
+
if auto_approved_ticket:
|
|
393
|
+
autonomy_event = TicketEvent(
|
|
394
|
+
ticket_id=ticket.id,
|
|
395
|
+
event_type=EventType.TRANSITIONED.value,
|
|
396
|
+
from_state=TicketState.PROPOSED.value,
|
|
397
|
+
to_state=TicketState.PLANNED.value,
|
|
398
|
+
actor_type=ActorType.SYSTEM.value,
|
|
399
|
+
actor_id="autonomy_service",
|
|
400
|
+
reason="Auto-approved ticket (autonomy mode)",
|
|
401
|
+
payload_json=json.dumps({"autonomy_action": "approve_ticket"}),
|
|
402
|
+
)
|
|
403
|
+
self.db.add(autonomy_event)
|
|
404
|
+
|
|
405
|
+
created_tickets.append(
|
|
406
|
+
CreatedTicketSchema(
|
|
407
|
+
id=ticket.id,
|
|
408
|
+
title=ticket.title,
|
|
409
|
+
description=ticket.description or "",
|
|
410
|
+
priority_bucket=bucket,
|
|
411
|
+
priority=priority,
|
|
412
|
+
priority_rationale=rationale,
|
|
413
|
+
verification=raw.get("verification", []),
|
|
414
|
+
notes=raw.get("notes"),
|
|
415
|
+
)
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
existing_tickets.append((ticket.id, title)) # Add to dedup list
|
|
419
|
+
|
|
420
|
+
except Exception as e:
|
|
421
|
+
raw_title = (
|
|
422
|
+
raw.get("title", "") if isinstance(raw, dict) else str(raw)[:50]
|
|
423
|
+
)
|
|
424
|
+
logger.error(
|
|
425
|
+
f"Error creating ticket '{raw_title[:50]}': {e}",
|
|
426
|
+
exc_info=True,
|
|
427
|
+
)
|
|
428
|
+
# Don't re-raise, continue with next ticket
|
|
429
|
+
continue
|
|
430
|
+
|
|
431
|
+
# Resolve blocked_by references now that all tickets are created
|
|
432
|
+
for ticket_id, blocked_by_title in pending_blocked_by:
|
|
433
|
+
blocker_id = title_to_ticket_id.get(blocked_by_title.lower())
|
|
434
|
+
if blocker_id:
|
|
435
|
+
# Update the ticket with the blocker ID
|
|
436
|
+
result = await self.db.execute(
|
|
437
|
+
select(Ticket).where(Ticket.id == ticket_id)
|
|
438
|
+
)
|
|
439
|
+
ticket = result.scalar_one_or_none()
|
|
440
|
+
if ticket:
|
|
441
|
+
ticket.blocked_by_ticket_id = blocker_id
|
|
442
|
+
logger.info(
|
|
443
|
+
f"Ticket '{ticket.title}' blocked by ticket ID {blocker_id}"
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# Update the CreatedTicketSchema with blocked_by info
|
|
447
|
+
for created in created_tickets:
|
|
448
|
+
if created.id == ticket_id:
|
|
449
|
+
created.blocked_by_ticket_id = blocker_id
|
|
450
|
+
created.blocked_by_title = blocked_by_title
|
|
451
|
+
break
|
|
452
|
+
else:
|
|
453
|
+
logger.warning(
|
|
454
|
+
f"Could not resolve blocked_by reference '{blocked_by_title}' "
|
|
455
|
+
f"for ticket {ticket_id}"
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
await self.db.commit()
|
|
459
|
+
|
|
460
|
+
logger.info(
|
|
461
|
+
f"Created {len(created_tickets)} tickets for goal '{goal.title}' "
|
|
462
|
+
f"(generated: {len(data.get('tickets', []))}, filtered: {filtered_count})"
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
return GenerationResult(
|
|
466
|
+
tickets=created_tickets,
|
|
467
|
+
goal_id=goal_id,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
async def analyze_codebase(
|
|
471
|
+
self,
|
|
472
|
+
repo_root: Path | str,
|
|
473
|
+
goal_id: str | None = None,
|
|
474
|
+
focus_areas: list[str] | None = None,
|
|
475
|
+
include_readme: bool = False,
|
|
476
|
+
board_id: str | None = None,
|
|
477
|
+
) -> AnalyzeCodebaseResponse:
|
|
478
|
+
"""Analyze a codebase and generate improvement tickets.
|
|
479
|
+
|
|
480
|
+
Args:
|
|
481
|
+
repo_root: Path to the repository.
|
|
482
|
+
goal_id: Optional goal to attach tickets to.
|
|
483
|
+
focus_areas: Optional focus hints.
|
|
484
|
+
include_readme: Whether to include README excerpt.
|
|
485
|
+
board_id: Board ID for scoping (required for multi-board setups).
|
|
486
|
+
|
|
487
|
+
Returns:
|
|
488
|
+
AnalyzeCodebaseResponse with generated tickets.
|
|
489
|
+
"""
|
|
490
|
+
repo_root = Path(repo_root).resolve()
|
|
491
|
+
|
|
492
|
+
# Get git HEAD SHA for cache invalidation
|
|
493
|
+
head_sha = self._get_git_head_sha(repo_root)
|
|
494
|
+
|
|
495
|
+
# Check cache (includes HEAD SHA so invalidates on new commits)
|
|
496
|
+
cache_key = self._compute_cache_key(repo_root, focus_areas, head_sha)
|
|
497
|
+
cached = await self._get_cached_analysis(cache_key)
|
|
498
|
+
if cached:
|
|
499
|
+
return AnalyzeCodebaseResponse(
|
|
500
|
+
tickets=cached.get("tickets", []),
|
|
501
|
+
goal_id=goal_id,
|
|
502
|
+
analysis_summary=cached.get("analysis_summary", ""),
|
|
503
|
+
cache_hit=True,
|
|
504
|
+
context_stats=cached.get("context_stats"),
|
|
505
|
+
similar_warnings=cached.get("similar_warnings", []),
|
|
506
|
+
repo_head_sha=head_sha,
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
# Gather context
|
|
510
|
+
context = self.context_gatherer.gather(
|
|
511
|
+
repo_root=repo_root,
|
|
512
|
+
include_readme_excerpt=include_readme,
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# Build context stats for observability
|
|
516
|
+
# Build excluded_matches (top 10 by count)
|
|
517
|
+
excluded_matches = [
|
|
518
|
+
ExcludedMatch(pattern=pattern, count=count)
|
|
519
|
+
for pattern, count in sorted(
|
|
520
|
+
context.stats.excluded_by_pattern.items(),
|
|
521
|
+
key=lambda x: -x[1],
|
|
522
|
+
)[:10]
|
|
523
|
+
]
|
|
524
|
+
|
|
525
|
+
# Build filetype histogram (top 10 by count)
|
|
526
|
+
filetype_histogram = [
|
|
527
|
+
FiletypeCount(extension=ext, count=count)
|
|
528
|
+
for ext, count in sorted(
|
|
529
|
+
context.stats.extensions_scanned.items(),
|
|
530
|
+
key=lambda x: -x[1],
|
|
531
|
+
)[:10]
|
|
532
|
+
]
|
|
533
|
+
|
|
534
|
+
context_stats = ContextStats(
|
|
535
|
+
files_scanned=context.stats.files_scanned,
|
|
536
|
+
todos_collected=context.stats.todo_lines_found,
|
|
537
|
+
context_truncated=(
|
|
538
|
+
context.stats.files_scanned >= self.context_gatherer.MAX_FILES_SCANNED
|
|
539
|
+
or context.stats.bytes_read >= self.context_gatherer.MAX_BYTES_TOTAL
|
|
540
|
+
),
|
|
541
|
+
skipped_excluded=context.stats.skipped_excluded,
|
|
542
|
+
skipped_symlinks=context.stats.skipped_symlinks,
|
|
543
|
+
bytes_read=context.stats.bytes_read,
|
|
544
|
+
excluded_matches=excluded_matches,
|
|
545
|
+
filetype_histogram=filetype_histogram,
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
# Build prompt for agent
|
|
549
|
+
prompt = self._build_agent_analysis_prompt(focus_areas)
|
|
550
|
+
|
|
551
|
+
# Call agent to analyze codebase
|
|
552
|
+
agent_response = self._call_agent_for_tickets(prompt, repo_root)
|
|
553
|
+
|
|
554
|
+
# Parse response
|
|
555
|
+
data = self._parse_agent_json_response(agent_response)
|
|
556
|
+
raw_tickets = data.get("tickets", [])
|
|
557
|
+
summary = data.get("summary", "Analysis complete.")
|
|
558
|
+
|
|
559
|
+
# Get existing tickets for dedup (across all goals if no goal specified)
|
|
560
|
+
existing_tickets = await self._get_existing_tickets(goal_id)
|
|
561
|
+
|
|
562
|
+
# Create tickets with improved dedup (exact=block, similar=warn)
|
|
563
|
+
created_tickets: list[CreatedTicketSchema] = []
|
|
564
|
+
similar_warnings: list[SimilarTicketWarning] = []
|
|
565
|
+
# Track title -> ticket_id for resolving blocked_by references
|
|
566
|
+
title_to_ticket_id: dict[str, str] = {}
|
|
567
|
+
# Track tickets that need blocked_by resolved after all are created
|
|
568
|
+
pending_blocked_by: list[tuple[str, str]] = [] # (ticket_id, blocked_by_title)
|
|
569
|
+
|
|
570
|
+
for raw in raw_tickets[:MAX_TICKETS_PER_GENERATION]:
|
|
571
|
+
title = raw.get("title", "").strip()
|
|
572
|
+
if not title or len(title) > 255:
|
|
573
|
+
continue
|
|
574
|
+
|
|
575
|
+
# Check for duplicates
|
|
576
|
+
status, existing_id, existing_title, similarity = self._check_duplicate(
|
|
577
|
+
title, existing_tickets
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
if status == "exact":
|
|
581
|
+
# Hard block on exact match
|
|
582
|
+
logger.debug(f"Blocking exact duplicate ticket: {title}")
|
|
583
|
+
continue
|
|
584
|
+
elif status == "similar":
|
|
585
|
+
# Warn but don't block on similar
|
|
586
|
+
similar_warnings.append(
|
|
587
|
+
SimilarTicketWarning(
|
|
588
|
+
proposed_title=title,
|
|
589
|
+
similar_to_id=existing_id,
|
|
590
|
+
similar_to_title=existing_title,
|
|
591
|
+
similarity_score=similarity,
|
|
592
|
+
)
|
|
593
|
+
)
|
|
594
|
+
logger.debug(f"Warning: ticket '{title}' similar to '{existing_title}'")
|
|
595
|
+
# Continue creating the ticket (warn, don't block)
|
|
596
|
+
|
|
597
|
+
bucket_str = raw.get("priority_bucket", "P2")
|
|
598
|
+
try:
|
|
599
|
+
bucket = PriorityBucket(bucket_str)
|
|
600
|
+
except ValueError:
|
|
601
|
+
bucket = PriorityBucket.P2
|
|
602
|
+
|
|
603
|
+
priority = bucket_to_priority(bucket)
|
|
604
|
+
rationale = raw.get("priority_rationale", "")
|
|
605
|
+
blocked_by_title = raw.get("blocked_by")
|
|
606
|
+
|
|
607
|
+
# Only create in DB if goal_id provided
|
|
608
|
+
if goal_id:
|
|
609
|
+
ticket = Ticket(
|
|
610
|
+
goal_id=goal_id,
|
|
611
|
+
board_id=board_id, # Scope to board for permission boundary
|
|
612
|
+
title=title,
|
|
613
|
+
description=raw.get("description", ""),
|
|
614
|
+
state=TicketState.PROPOSED.value,
|
|
615
|
+
priority=priority,
|
|
616
|
+
)
|
|
617
|
+
self.db.add(ticket)
|
|
618
|
+
await self.db.flush()
|
|
619
|
+
await self.db.refresh(ticket)
|
|
620
|
+
|
|
621
|
+
# Track title -> id mapping for blocked_by resolution
|
|
622
|
+
title_to_ticket_id[title.lower()] = ticket.id
|
|
623
|
+
|
|
624
|
+
# Check if this ticket has a blocked_by reference
|
|
625
|
+
if blocked_by_title:
|
|
626
|
+
pending_blocked_by.append((ticket.id, blocked_by_title))
|
|
627
|
+
|
|
628
|
+
event = TicketEvent(
|
|
629
|
+
ticket_id=ticket.id,
|
|
630
|
+
event_type=EventType.CREATED.value,
|
|
631
|
+
from_state=None,
|
|
632
|
+
to_state=TicketState.PROPOSED.value,
|
|
633
|
+
actor_type=ActorType.PLANNER.value,
|
|
634
|
+
actor_id="ticket_generation_service",
|
|
635
|
+
reason="Generated from codebase analysis",
|
|
636
|
+
payload_json=json.dumps(
|
|
637
|
+
{
|
|
638
|
+
"priority_bucket": bucket.value,
|
|
639
|
+
"priority_rationale": rationale,
|
|
640
|
+
"focus_areas": focus_areas,
|
|
641
|
+
"repo_head_sha": head_sha,
|
|
642
|
+
"blocked_by_title": blocked_by_title,
|
|
643
|
+
}
|
|
644
|
+
),
|
|
645
|
+
)
|
|
646
|
+
self.db.add(event)
|
|
647
|
+
|
|
648
|
+
ticket_id = ticket.id
|
|
649
|
+
else:
|
|
650
|
+
# Preview mode - no DB write
|
|
651
|
+
ticket_id = f"preview-{len(created_tickets)}"
|
|
652
|
+
title_to_ticket_id[title.lower()] = ticket_id
|
|
653
|
+
|
|
654
|
+
created_tickets.append(
|
|
655
|
+
CreatedTicketSchema(
|
|
656
|
+
id=ticket_id,
|
|
657
|
+
title=title,
|
|
658
|
+
description=raw.get("description", ""),
|
|
659
|
+
priority_bucket=bucket,
|
|
660
|
+
priority=priority,
|
|
661
|
+
priority_rationale=rationale,
|
|
662
|
+
verification=raw.get("verification", []),
|
|
663
|
+
notes=raw.get("notes"),
|
|
664
|
+
)
|
|
665
|
+
)
|
|
666
|
+
|
|
667
|
+
# Add to existing for remaining dedup checks
|
|
668
|
+
existing_tickets.append((ticket_id, title))
|
|
669
|
+
|
|
670
|
+
# Resolve blocked_by references now that all tickets are created
|
|
671
|
+
if goal_id:
|
|
672
|
+
for ticket_id, blocked_by_title in pending_blocked_by:
|
|
673
|
+
blocker_id = title_to_ticket_id.get(blocked_by_title.lower())
|
|
674
|
+
if blocker_id:
|
|
675
|
+
# Update the ticket with the blocker ID
|
|
676
|
+
result = await self.db.execute(
|
|
677
|
+
select(Ticket).where(Ticket.id == ticket_id)
|
|
678
|
+
)
|
|
679
|
+
ticket = result.scalar_one_or_none()
|
|
680
|
+
if ticket:
|
|
681
|
+
ticket.blocked_by_ticket_id = blocker_id
|
|
682
|
+
logger.info(
|
|
683
|
+
f"Ticket '{ticket.title}' blocked by ticket ID {blocker_id}"
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
# Update the CreatedTicketSchema with blocked_by info
|
|
687
|
+
for created in created_tickets:
|
|
688
|
+
if created.id == ticket_id:
|
|
689
|
+
created.blocked_by_ticket_id = blocker_id
|
|
690
|
+
created.blocked_by_title = blocked_by_title
|
|
691
|
+
break
|
|
692
|
+
else:
|
|
693
|
+
logger.warning(
|
|
694
|
+
f"Could not resolve blocked_by reference '{blocked_by_title}' "
|
|
695
|
+
f"for ticket {ticket_id}"
|
|
696
|
+
)
|
|
697
|
+
|
|
698
|
+
await self.db.commit()
|
|
699
|
+
|
|
700
|
+
# Cache result (includes stats and warnings)
|
|
701
|
+
await self._cache_analysis(
|
|
702
|
+
cache_key,
|
|
703
|
+
{
|
|
704
|
+
"tickets": [t.model_dump() for t in created_tickets],
|
|
705
|
+
"analysis_summary": summary,
|
|
706
|
+
"context_stats": context_stats.model_dump(),
|
|
707
|
+
"similar_warnings": [w.model_dump() for w in similar_warnings],
|
|
708
|
+
},
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
return AnalyzeCodebaseResponse(
|
|
712
|
+
tickets=created_tickets,
|
|
713
|
+
goal_id=goal_id,
|
|
714
|
+
analysis_summary=summary,
|
|
715
|
+
cache_hit=False,
|
|
716
|
+
context_stats=context_stats,
|
|
717
|
+
similar_warnings=similar_warnings,
|
|
718
|
+
repo_head_sha=head_sha,
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
async def reflect_on_proposals(self, goal_id: str) -> ReflectionResult:
|
|
722
|
+
"""Reflect on proposed tickets for a goal.
|
|
723
|
+
|
|
724
|
+
Evaluates ticket quality, identifies coverage gaps, and suggests
|
|
725
|
+
priority adjustments.
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
goal_id: ID of the goal whose proposals to reflect on.
|
|
729
|
+
|
|
730
|
+
Returns:
|
|
731
|
+
ReflectionResult with assessment and suggestions.
|
|
732
|
+
"""
|
|
733
|
+
# Get goal
|
|
734
|
+
result = await self.db.execute(select(Goal).where(Goal.id == goal_id))
|
|
735
|
+
goal = result.scalar_one_or_none()
|
|
736
|
+
if not goal:
|
|
737
|
+
raise ValueError(f"Goal not found: {goal_id}")
|
|
738
|
+
|
|
739
|
+
# Get proposed tickets
|
|
740
|
+
result = await self.db.execute(
|
|
741
|
+
select(Ticket).where(
|
|
742
|
+
and_(
|
|
743
|
+
Ticket.goal_id == goal_id,
|
|
744
|
+
Ticket.state == TicketState.PROPOSED.value,
|
|
745
|
+
)
|
|
746
|
+
)
|
|
747
|
+
)
|
|
748
|
+
tickets = list(result.scalars().all())
|
|
749
|
+
|
|
750
|
+
if not tickets:
|
|
751
|
+
return ReflectionResult(
|
|
752
|
+
overall_quality="insufficient",
|
|
753
|
+
quality_notes="No proposed tickets found for this goal.",
|
|
754
|
+
coverage_gaps=["No tickets have been generated yet."],
|
|
755
|
+
suggested_changes=[],
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
# Build prompt
|
|
759
|
+
system_prompt = self._build_reflection_system_prompt()
|
|
760
|
+
user_prompt = self._build_reflection_user_prompt(goal, tickets)
|
|
761
|
+
|
|
762
|
+
response = self.llm.call_completion(
|
|
763
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
764
|
+
max_tokens=1500,
|
|
765
|
+
system_prompt=system_prompt,
|
|
766
|
+
)
|
|
767
|
+
|
|
768
|
+
# Parse response
|
|
769
|
+
data = self.llm.safe_parse_json(
|
|
770
|
+
response.content,
|
|
771
|
+
{
|
|
772
|
+
"overall_quality": "needs_work",
|
|
773
|
+
"quality_notes": "Unable to analyze tickets.",
|
|
774
|
+
"coverage_gaps": [],
|
|
775
|
+
"suggested_changes": [],
|
|
776
|
+
},
|
|
777
|
+
)
|
|
778
|
+
|
|
779
|
+
# Build suggested changes with ticket info
|
|
780
|
+
suggested_changes: list[SuggestedPriorityChange] = []
|
|
781
|
+
for change in data.get("suggested_changes", []):
|
|
782
|
+
ticket_id = change.get("ticket_id")
|
|
783
|
+
# Find the ticket
|
|
784
|
+
ticket = next((t for t in tickets if t.id == ticket_id), None)
|
|
785
|
+
if not ticket:
|
|
786
|
+
continue
|
|
787
|
+
|
|
788
|
+
try:
|
|
789
|
+
current_bucket = priority_to_bucket(ticket.priority or 50)
|
|
790
|
+
suggested_bucket = PriorityBucket(change.get("suggested_bucket", "P2"))
|
|
791
|
+
except ValueError:
|
|
792
|
+
continue
|
|
793
|
+
|
|
794
|
+
suggested_changes.append(
|
|
795
|
+
SuggestedPriorityChange(
|
|
796
|
+
ticket_id=ticket_id,
|
|
797
|
+
ticket_title=ticket.title,
|
|
798
|
+
current_bucket=current_bucket,
|
|
799
|
+
current_priority=ticket.priority or 50,
|
|
800
|
+
suggested_bucket=suggested_bucket,
|
|
801
|
+
suggested_priority=bucket_to_priority(suggested_bucket),
|
|
802
|
+
reason=change.get("reason", ""),
|
|
803
|
+
)
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
return ReflectionResult(
|
|
807
|
+
overall_quality=data.get("overall_quality", "needs_work"),
|
|
808
|
+
quality_notes=data.get("quality_notes", ""),
|
|
809
|
+
coverage_gaps=data.get("coverage_gaps", []),
|
|
810
|
+
suggested_changes=suggested_changes,
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
# =========================================================================
|
|
814
|
+
# PROMPT BUILDERS
|
|
815
|
+
# =========================================================================
|
|
816
|
+
|
|
817
|
+
def _build_goal_system_prompt(self) -> str:
|
|
818
|
+
"""Build system prompt for goal-based ticket generation."""
|
|
819
|
+
return """You are a technical project planner. Given a goal and optional repository context, break it down into 2-5 specific, actionable tickets.
|
|
820
|
+
|
|
821
|
+
Your response MUST be valid JSON with this exact structure:
|
|
822
|
+
{
|
|
823
|
+
"tickets": [
|
|
824
|
+
{
|
|
825
|
+
"title": "Short, action-oriented title (verb first)",
|
|
826
|
+
"description": "Clear description with acceptance criteria",
|
|
827
|
+
"priority_bucket": "P0|P1|P2|P3",
|
|
828
|
+
"priority_rationale": "Brief explanation of why this priority",
|
|
829
|
+
"verification": ["command1", "command2"],
|
|
830
|
+
"notes": "Optional implementation notes",
|
|
831
|
+
"blocked_by": "Title of another ticket in this list that must complete first (or null)"
|
|
832
|
+
}
|
|
833
|
+
]
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
Priority Buckets (USE THESE EXACTLY):
|
|
837
|
+
- P0: Critical - security issues, blocking bugs, data loss risks
|
|
838
|
+
- P1: High - important features, performance issues affecting users
|
|
839
|
+
- P2: Medium - improvements, nice-to-haves, minor bugs
|
|
840
|
+
- P3: Low - cleanup, documentation, cosmetic issues
|
|
841
|
+
|
|
842
|
+
Dependencies (blocked_by):
|
|
843
|
+
- If a ticket depends on another ticket being completed first, set blocked_by to that ticket's exact title
|
|
844
|
+
- Example: A "Write unit tests for auth module" ticket should have blocked_by: "Implement auth module"
|
|
845
|
+
- This prevents the blocked ticket from being executed until the blocker is done
|
|
846
|
+
- Only specify blocked_by if there's a true dependency (code must exist, API must be ready, etc.)
|
|
847
|
+
- Leave blocked_by as null if the ticket can be done independently
|
|
848
|
+
|
|
849
|
+
Guidelines:
|
|
850
|
+
- Create 2-5 tickets that together achieve the goal
|
|
851
|
+
- Each ticket should be independently implementable
|
|
852
|
+
- Titles should be concise and action-oriented (start with a verb)
|
|
853
|
+
- Descriptions should clearly explain the task and acceptance criteria
|
|
854
|
+
- Verification commands should be shell commands that verify completion
|
|
855
|
+
- Order tickets by logical implementation sequence
|
|
856
|
+
- Be realistic with priorities - not everything is P0!"""
|
|
857
|
+
|
|
858
|
+
def _build_goal_user_prompt(self, goal: Goal, context: RepoContext | None) -> str:
|
|
859
|
+
"""Build user prompt for goal-based generation."""
|
|
860
|
+
parts = [
|
|
861
|
+
f"Goal: {goal.title}",
|
|
862
|
+
]
|
|
863
|
+
if goal.description:
|
|
864
|
+
parts.append(f"Description: {goal.description}")
|
|
865
|
+
if context:
|
|
866
|
+
parts.append(f"\nRepository context:\n{context.to_prompt_string()}")
|
|
867
|
+
|
|
868
|
+
parts.append("\nGenerate actionable tickets as JSON.")
|
|
869
|
+
return "\n".join(parts)
|
|
870
|
+
|
|
871
|
+
def _build_analysis_system_prompt(self, focus_areas: list[str] | None) -> str:
|
|
872
|
+
"""Build system prompt for codebase analysis."""
|
|
873
|
+
focus_hint = ""
|
|
874
|
+
if focus_areas:
|
|
875
|
+
focus_hint = f"\n\nFocus on these areas: {', '.join(focus_areas)}"
|
|
876
|
+
|
|
877
|
+
return f"""You are a technical project planner analyzing a codebase. Based on the repository structure, TODOs, and metadata, identify improvement opportunities and generate actionable tickets.
|
|
878
|
+
|
|
879
|
+
Your response MUST be valid JSON with this exact structure:
|
|
880
|
+
{{
|
|
881
|
+
"summary": "Brief overall assessment of the codebase (2-3 sentences)",
|
|
882
|
+
"tickets": [
|
|
883
|
+
{{
|
|
884
|
+
"title": "Short, action-oriented title",
|
|
885
|
+
"description": "What needs to be done and why",
|
|
886
|
+
"priority_bucket": "P0|P1|P2|P3",
|
|
887
|
+
"priority_rationale": "Why this priority",
|
|
888
|
+
"verification": ["command to verify"],
|
|
889
|
+
"notes": "Optional notes",
|
|
890
|
+
"blocked_by": "Title of another ticket in this list that must complete first (or null)"
|
|
891
|
+
}}
|
|
892
|
+
]
|
|
893
|
+
}}
|
|
894
|
+
|
|
895
|
+
Priority Buckets:
|
|
896
|
+
- P0: Critical (security, data loss, blocking bugs)
|
|
897
|
+
- P1: High (performance, important features)
|
|
898
|
+
- P2: Medium (improvements, minor issues)
|
|
899
|
+
- P3: Low (cleanup, docs, cosmetic){focus_hint}
|
|
900
|
+
|
|
901
|
+
Dependencies (blocked_by):
|
|
902
|
+
- If a ticket depends on another ticket being completed first, set blocked_by to that ticket's exact title
|
|
903
|
+
- Example: "Write unit tests for auth module" should have blocked_by: "Implement auth module"
|
|
904
|
+
- Leave blocked_by as null if the ticket can be done independently
|
|
905
|
+
|
|
906
|
+
Guidelines:
|
|
907
|
+
- Generate 3-7 tickets based on what you observe
|
|
908
|
+
- Prioritize issues found in TODOs/FIXMEs
|
|
909
|
+
- Look for patterns: missing tests, outdated deps, code smells
|
|
910
|
+
- Be specific - reference actual files/paths when relevant
|
|
911
|
+
- Don't generate vague tickets like "improve code quality"
|
|
912
|
+
- Be conservative with P0/P1 - most tickets should be P2/P3"""
|
|
913
|
+
|
|
914
|
+
def _build_analysis_user_prompt(
|
|
915
|
+
self, context: RepoContext, focus_areas: list[str] | None
|
|
916
|
+
) -> str:
|
|
917
|
+
"""Build user prompt for codebase analysis."""
|
|
918
|
+
parts = [context.to_prompt_string()]
|
|
919
|
+
|
|
920
|
+
if focus_areas:
|
|
921
|
+
parts.append(f"\nFocus areas requested: {', '.join(focus_areas)}")
|
|
922
|
+
|
|
923
|
+
parts.append(
|
|
924
|
+
"\nAnalyze this codebase and generate improvement tickets as JSON."
|
|
925
|
+
)
|
|
926
|
+
return "\n".join(parts)
|
|
927
|
+
|
|
928
|
+
def _build_reflection_system_prompt(self) -> str:
|
|
929
|
+
"""Build system prompt for ticket reflection."""
|
|
930
|
+
return """You are reviewing proposed tickets for quality and coverage. Evaluate the tickets and suggest improvements.
|
|
931
|
+
|
|
932
|
+
Your response MUST be valid JSON with this structure:
|
|
933
|
+
{
|
|
934
|
+
"overall_quality": "good|needs_work|insufficient",
|
|
935
|
+
"quality_notes": "Detailed assessment of ticket quality",
|
|
936
|
+
"coverage_gaps": ["Area not covered 1", "Area not covered 2"],
|
|
937
|
+
"suggested_changes": [
|
|
938
|
+
{
|
|
939
|
+
"ticket_id": "uuid-here",
|
|
940
|
+
"suggested_bucket": "P0|P1|P2|P3",
|
|
941
|
+
"reason": "Why the priority should change"
|
|
942
|
+
}
|
|
943
|
+
]
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
Evaluation criteria:
|
|
947
|
+
- Are tickets specific and actionable?
|
|
948
|
+
- Do they cover the goal comprehensively?
|
|
949
|
+
- Are priorities realistic (not everything is critical)?
|
|
950
|
+
- Are there obvious gaps or missing concerns?
|
|
951
|
+
|
|
952
|
+
Only suggest priority changes when clearly warranted. Don't change priorities just for the sake of it."""
|
|
953
|
+
|
|
954
|
+
def _build_reflection_user_prompt(self, goal: Goal, tickets: list[Ticket]) -> str:
|
|
955
|
+
"""Build user prompt for reflection."""
|
|
956
|
+
parts = [
|
|
957
|
+
f"Goal: {goal.title}",
|
|
958
|
+
]
|
|
959
|
+
if goal.description:
|
|
960
|
+
parts.append(f"Description: {goal.description}")
|
|
961
|
+
|
|
962
|
+
parts.append("\nProposed tickets:")
|
|
963
|
+
for t in tickets:
|
|
964
|
+
bucket = priority_to_bucket(t.priority or 50)
|
|
965
|
+
parts.append(
|
|
966
|
+
f"- [{t.id}] {t.title} (Priority: {bucket.value})"
|
|
967
|
+
f"\n {t.description or 'No description'}"
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
parts.append("\nEvaluate these tickets and respond with JSON.")
|
|
971
|
+
return "\n".join(parts)
|
|
972
|
+
|
|
973
|
+
def _build_ticket_validation_system_prompt(self) -> str:
|
|
974
|
+
"""Build system prompt for validating generated tickets against codebase."""
|
|
975
|
+
return """You are a technical code reviewer validating whether a proposed ticket is appropriate for a codebase.
|
|
976
|
+
|
|
977
|
+
Your response MUST be valid JSON with this exact structure:
|
|
978
|
+
{
|
|
979
|
+
"is_valid": true|false,
|
|
980
|
+
"confidence": "high|medium|low",
|
|
981
|
+
"validation_result": "appropriate|already_implemented|not_relevant|unclear",
|
|
982
|
+
"reasoning": "Brief explanation of your assessment (max 2 sentences)",
|
|
983
|
+
"suggested_modification": "Optional: How to modify the ticket to make it valid (or null)"
|
|
984
|
+
}
|
|
985
|
+
|
|
986
|
+
Validation Results:
|
|
987
|
+
- appropriate: Ticket is valid and should be created
|
|
988
|
+
- already_implemented: Feature/fix already exists in the codebase
|
|
989
|
+
- not_relevant: Ticket doesn't align with goal or codebase structure
|
|
990
|
+
- unclear: Cannot determine validity from available context
|
|
991
|
+
|
|
992
|
+
Guidelines:
|
|
993
|
+
- Check if similar functionality already exists in the codebase
|
|
994
|
+
- Verify the ticket aligns with the stated goal
|
|
995
|
+
- Consider the current codebase structure and patterns
|
|
996
|
+
- Be conservative - flag anything suspicious as "unclear" with low confidence
|
|
997
|
+
- Only mark as "already_implemented" if you see clear evidence in the code
|
|
998
|
+
- Look for existing files, functions, or features that match the ticket's intent"""
|
|
999
|
+
|
|
1000
|
+
def _build_ticket_validation_user_prompt(
|
|
1001
|
+
self,
|
|
1002
|
+
ticket: dict,
|
|
1003
|
+
goal_title: str,
|
|
1004
|
+
goal_description: str | None,
|
|
1005
|
+
context_summary: str,
|
|
1006
|
+
) -> str:
|
|
1007
|
+
"""Build user prompt for validating a ticket."""
|
|
1008
|
+
parts = [
|
|
1009
|
+
f"Goal: {goal_title}",
|
|
1010
|
+
]
|
|
1011
|
+
if goal_description:
|
|
1012
|
+
parts.append(f"Goal Description: {goal_description}")
|
|
1013
|
+
|
|
1014
|
+
parts.append("\nProposed Ticket:")
|
|
1015
|
+
parts.append(f"Title: {ticket['title']}")
|
|
1016
|
+
parts.append(f"Description: {ticket.get('description', 'N/A')}")
|
|
1017
|
+
parts.append(f"Priority: {ticket.get('priority_bucket', 'N/A')}")
|
|
1018
|
+
|
|
1019
|
+
parts.append(f"\nCodebase Context:\n{context_summary}")
|
|
1020
|
+
parts.append(
|
|
1021
|
+
"\nIs this ticket appropriate to create? Provide validation assessment as JSON."
|
|
1022
|
+
)
|
|
1023
|
+
|
|
1024
|
+
return "\n".join(parts)
|
|
1025
|
+
|
|
1026
|
+
# =========================================================================
|
|
1027
|
+
# TICKET VALIDATION
|
|
1028
|
+
# =========================================================================
|
|
1029
|
+
|
|
1030
|
+
def _validate_ticket_against_codebase(
|
|
1031
|
+
self, ticket: dict, goal: Goal, context_summary: str
|
|
1032
|
+
) -> dict:
|
|
1033
|
+
"""Validate a generated ticket against the codebase.
|
|
1034
|
+
|
|
1035
|
+
Args:
|
|
1036
|
+
ticket: Raw ticket dict from generation.
|
|
1037
|
+
goal: The goal the ticket was generated for.
|
|
1038
|
+
context_summary: Summary of repository context.
|
|
1039
|
+
|
|
1040
|
+
Returns:
|
|
1041
|
+
Validation result dict with keys: is_valid, confidence, validation_result, reasoning.
|
|
1042
|
+
"""
|
|
1043
|
+
try:
|
|
1044
|
+
system_prompt = self._build_ticket_validation_system_prompt()
|
|
1045
|
+
user_prompt = self._build_ticket_validation_user_prompt(
|
|
1046
|
+
ticket=ticket,
|
|
1047
|
+
goal_title=goal.title,
|
|
1048
|
+
goal_description=goal.description,
|
|
1049
|
+
context_summary=context_summary,
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
response = self.llm.call_completion(
|
|
1053
|
+
messages=[{"role": "user", "content": user_prompt}],
|
|
1054
|
+
max_tokens=300,
|
|
1055
|
+
system_prompt=system_prompt,
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
# Parse JSON response
|
|
1059
|
+
validation = self.llm.safe_parse_json(
|
|
1060
|
+
response.content,
|
|
1061
|
+
{
|
|
1062
|
+
"is_valid": True, # Default to valid if parsing fails
|
|
1063
|
+
"confidence": "low",
|
|
1064
|
+
"validation_result": "appropriate", # Default to appropriate (fail open)
|
|
1065
|
+
"reasoning": "Unable to validate ticket, accepting by default",
|
|
1066
|
+
"suggested_modification": None,
|
|
1067
|
+
},
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
# If result is "unclear", treat as appropriate (fail open)
|
|
1071
|
+
if validation.get("validation_result") == "unclear":
|
|
1072
|
+
validation["is_valid"] = True
|
|
1073
|
+
validation["validation_result"] = "appropriate"
|
|
1074
|
+
logger.debug(
|
|
1075
|
+
f"Validation unclear for '{ticket.get('title')}', accepting by default"
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
return validation
|
|
1079
|
+
|
|
1080
|
+
except Exception as e:
|
|
1081
|
+
logger.error(f"Ticket validation failed: {e}")
|
|
1082
|
+
# On error, default to accepting the ticket (fail open)
|
|
1083
|
+
return {
|
|
1084
|
+
"is_valid": True,
|
|
1085
|
+
"confidence": "low",
|
|
1086
|
+
"validation_result": "unclear",
|
|
1087
|
+
"reasoning": f"Validation error: {str(e)[:100]}",
|
|
1088
|
+
"suggested_modification": None,
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
# =========================================================================
|
|
1092
|
+
# AGENT-BASED TICKET GENERATION
|
|
1093
|
+
# =========================================================================
|
|
1094
|
+
|
|
1095
|
+
def _build_agent_ticket_generation_prompt(
|
|
1096
|
+
self,
|
|
1097
|
+
goal: Goal,
|
|
1098
|
+
include_readme: bool = False,
|
|
1099
|
+
existing_tickets: list[tuple[str, str]] | None = None,
|
|
1100
|
+
) -> str:
|
|
1101
|
+
"""Build prompt for agent-based ticket generation.
|
|
1102
|
+
|
|
1103
|
+
The agent will analyze the codebase in the workspace and generate tickets.
|
|
1104
|
+
"""
|
|
1105
|
+
# Build the existing tickets section
|
|
1106
|
+
existing_section = ""
|
|
1107
|
+
if existing_tickets:
|
|
1108
|
+
ticket_lines = "\n".join(f"- {title}" for _, title in existing_tickets)
|
|
1109
|
+
existing_section = f"""
|
|
1110
|
+
## Existing Tickets (DO NOT DUPLICATE)
|
|
1111
|
+
The following tickets already exist for this goal. Do NOT create tickets that overlap with or duplicate these:
|
|
1112
|
+
{ticket_lines}
|
|
1113
|
+
|
|
1114
|
+
"""
|
|
1115
|
+
|
|
1116
|
+
prompt = f"""# Task: Generate Implementation Tickets
|
|
1117
|
+
|
|
1118
|
+
## Goal
|
|
1119
|
+
**{goal.title}**
|
|
1120
|
+
|
|
1121
|
+
{goal.description or "No additional description provided."}
|
|
1122
|
+
{existing_section}
|
|
1123
|
+
## Instructions
|
|
1124
|
+
|
|
1125
|
+
Analyze this codebase and break down the goal into 2-5 specific, actionable tickets.
|
|
1126
|
+
{"**Skip any work already covered by the existing tickets listed above.**" if existing_tickets else ""}
|
|
1127
|
+
|
|
1128
|
+
**IMPORTANT**: Your response MUST include a JSON code block with the tickets. Use this exact format:
|
|
1129
|
+
|
|
1130
|
+
```json
|
|
1131
|
+
{{
|
|
1132
|
+
"tickets": [
|
|
1133
|
+
{{
|
|
1134
|
+
"title": "Short, action-oriented title (verb first)",
|
|
1135
|
+
"description": "Clear description with acceptance criteria",
|
|
1136
|
+
"priority_bucket": "P0|P1|P2|P3",
|
|
1137
|
+
"priority_rationale": "Brief explanation of why this priority",
|
|
1138
|
+
"verification": ["shell command to verify completion"],
|
|
1139
|
+
"notes": "Optional implementation notes",
|
|
1140
|
+
"blocked_by": "Title of another ticket in this list that must complete first (or null)"
|
|
1141
|
+
}}
|
|
1142
|
+
]
|
|
1143
|
+
}}
|
|
1144
|
+
```
|
|
1145
|
+
|
|
1146
|
+
## Priority Buckets (use these exactly)
|
|
1147
|
+
- **P0**: Critical - security issues, blocking bugs, data loss risks
|
|
1148
|
+
- **P1**: High - important features, performance issues affecting users
|
|
1149
|
+
- **P2**: Medium - improvements, nice-to-haves, minor bugs
|
|
1150
|
+
- **P3**: Low - cleanup, documentation, cosmetic issues
|
|
1151
|
+
|
|
1152
|
+
## Dependencies (blocked_by)
|
|
1153
|
+
- If a ticket depends on another ticket being completed first, set `blocked_by` to that ticket's **exact title**
|
|
1154
|
+
- Example: A "Write unit tests for feature X" ticket should have `"blocked_by": "Implement feature X"`
|
|
1155
|
+
- The blocked ticket will NOT be executed until the blocking ticket is marked as DONE
|
|
1156
|
+
- Leave `blocked_by` as `null` if the ticket can be done independently
|
|
1157
|
+
|
|
1158
|
+
## Guidelines
|
|
1159
|
+
- Create 2-5 tickets that together achieve the goal
|
|
1160
|
+
- Each ticket should be independently implementable
|
|
1161
|
+
- Titles should be concise and action-oriented (start with a verb)
|
|
1162
|
+
- Descriptions should clearly explain the task and acceptance criteria
|
|
1163
|
+
- Be realistic with priorities - not everything is P0!
|
|
1164
|
+
- Reference actual files/paths when relevant
|
|
1165
|
+
|
|
1166
|
+
Now analyze the codebase and generate the tickets JSON."""
|
|
1167
|
+
|
|
1168
|
+
return prompt
|
|
1169
|
+
|
|
1170
|
+
def _build_agent_analysis_prompt(self, focus_areas: list[str] | None = None) -> str:
|
|
1171
|
+
"""Build prompt for agent-based codebase analysis.
|
|
1172
|
+
|
|
1173
|
+
The agent will analyze the codebase in the workspace and generate improvement tickets.
|
|
1174
|
+
"""
|
|
1175
|
+
focus_hint = ""
|
|
1176
|
+
if focus_areas:
|
|
1177
|
+
focus_hint = f"\n\n**Focus Areas**: {', '.join(focus_areas)}"
|
|
1178
|
+
|
|
1179
|
+
prompt = f"""# Task: Analyze Codebase and Generate Improvement Tickets
|
|
1180
|
+
|
|
1181
|
+
## Instructions
|
|
1182
|
+
|
|
1183
|
+
Analyze this codebase and identify improvement opportunities. Generate 3-7 actionable tickets.{focus_hint}
|
|
1184
|
+
|
|
1185
|
+
**IMPORTANT**: Your response MUST include a JSON code block with the analysis. Use this exact format:
|
|
1186
|
+
|
|
1187
|
+
```json
|
|
1188
|
+
{{
|
|
1189
|
+
"summary": "Brief overall assessment of the codebase (2-3 sentences)",
|
|
1190
|
+
"tickets": [
|
|
1191
|
+
{{
|
|
1192
|
+
"title": "Short, action-oriented title",
|
|
1193
|
+
"description": "What needs to be done and why",
|
|
1194
|
+
"priority_bucket": "P0|P1|P2|P3",
|
|
1195
|
+
"priority_rationale": "Why this priority",
|
|
1196
|
+
"verification": ["shell command to verify"],
|
|
1197
|
+
"notes": "Optional notes",
|
|
1198
|
+
"blocked_by": "Title of another ticket in this list that must complete first (or null)"
|
|
1199
|
+
}}
|
|
1200
|
+
]
|
|
1201
|
+
}}
|
|
1202
|
+
```
|
|
1203
|
+
|
|
1204
|
+
## Priority Buckets
|
|
1205
|
+
- **P0**: Critical (security, data loss, blocking bugs)
|
|
1206
|
+
- **P1**: High (performance, important features)
|
|
1207
|
+
- **P2**: Medium (improvements, minor issues)
|
|
1208
|
+
- **P3**: Low (cleanup, docs, cosmetic)
|
|
1209
|
+
|
|
1210
|
+
## Dependencies (blocked_by)
|
|
1211
|
+
- If a ticket depends on another ticket being completed first, set `blocked_by` to that ticket's **exact title**
|
|
1212
|
+
- Example: "Write unit tests for auth module" should have `"blocked_by": "Implement auth module"`
|
|
1213
|
+
- Leave `blocked_by` as `null` if the ticket can be done independently
|
|
1214
|
+
|
|
1215
|
+
## What to Look For
|
|
1216
|
+
- TODOs and FIXMEs in the code
|
|
1217
|
+
- Missing tests or test coverage gaps
|
|
1218
|
+
- Security issues or potential vulnerabilities
|
|
1219
|
+
- Performance bottlenecks
|
|
1220
|
+
- Code duplication or refactoring opportunities
|
|
1221
|
+
- Missing documentation
|
|
1222
|
+
- Outdated dependencies
|
|
1223
|
+
|
|
1224
|
+
## Guidelines
|
|
1225
|
+
- Be specific - reference actual files/paths
|
|
1226
|
+
- Don't generate vague tickets like "improve code quality"
|
|
1227
|
+
- Be conservative with P0/P1 - most tickets should be P2/P3
|
|
1228
|
+
- Each ticket should be independently implementable
|
|
1229
|
+
|
|
1230
|
+
Now analyze the codebase and generate the JSON."""
|
|
1231
|
+
|
|
1232
|
+
return prompt
|
|
1233
|
+
|
|
1234
|
+
def _call_agent_for_tickets(
|
|
1235
|
+
self, prompt: str, repo_root: Path, stream_callback=None
|
|
1236
|
+
) -> str:
|
|
1237
|
+
"""Call CLI agent or LLM API to generate tickets.
|
|
1238
|
+
|
|
1239
|
+
When model starts with "cli/" (e.g. "cli/claude"), uses the CLI executor.
|
|
1240
|
+
Otherwise uses the LLM API directly (no subprocess).
|
|
1241
|
+
|
|
1242
|
+
Args:
|
|
1243
|
+
prompt: The prompt for ticket generation.
|
|
1244
|
+
repo_root: Path to the repository.
|
|
1245
|
+
stream_callback: Optional callback for streaming output.
|
|
1246
|
+
|
|
1247
|
+
Returns:
|
|
1248
|
+
The agent's response text.
|
|
1249
|
+
|
|
1250
|
+
Raises:
|
|
1251
|
+
ValueError: If neither CLI nor LLM API is available.
|
|
1252
|
+
"""
|
|
1253
|
+
# API model: skip CLI entirely
|
|
1254
|
+
if not self.config.model.startswith("cli/"):
|
|
1255
|
+
logger.info(
|
|
1256
|
+
f"Model '{self.config.model}' is API-based, using LLM API directly"
|
|
1257
|
+
)
|
|
1258
|
+
return self._call_llm_for_tickets(prompt, repo_root, stream_callback)
|
|
1259
|
+
|
|
1260
|
+
# CLI model: use CLI only, no fallback to API
|
|
1261
|
+
return self._call_cli_for_tickets(prompt, repo_root, stream_callback)
|
|
1262
|
+
|
|
1263
|
+
def _get_llm_for_api_fallback(self) -> "LLMService":
|
|
1264
|
+
"""Get an LLM service suitable for API calls.
|
|
1265
|
+
|
|
1266
|
+
If the current model is CLI-based (cli/*), detects available API
|
|
1267
|
+
credentials and creates a temporary LLMService with a real model.
|
|
1268
|
+
|
|
1269
|
+
Returns:
|
|
1270
|
+
LLMService configured for API calls.
|
|
1271
|
+
|
|
1272
|
+
Raises:
|
|
1273
|
+
ValueError: If no API credentials are available.
|
|
1274
|
+
"""
|
|
1275
|
+
import os
|
|
1276
|
+
|
|
1277
|
+
# If model is already an API model, use existing LLM service
|
|
1278
|
+
if not self.config.model.startswith("cli/"):
|
|
1279
|
+
return self.llm
|
|
1280
|
+
|
|
1281
|
+
# CLI model — detect available API keys and pick a model
|
|
1282
|
+
# Also try loading from .env file if not already in environment
|
|
1283
|
+
from pathlib import Path as _Path
|
|
1284
|
+
|
|
1285
|
+
env_file = _Path(__file__).parent.parent.parent / ".env"
|
|
1286
|
+
if env_file.exists():
|
|
1287
|
+
for line in env_file.read_text().splitlines():
|
|
1288
|
+
line = line.strip()
|
|
1289
|
+
if line and not line.startswith("#") and "=" in line:
|
|
1290
|
+
key, _, value = line.partition("=")
|
|
1291
|
+
key = key.strip()
|
|
1292
|
+
value = value.strip().strip('"').strip("'")
|
|
1293
|
+
if (
|
|
1294
|
+
key
|
|
1295
|
+
in ("ANTHROPIC_API_KEY", "OPENAI_API_KEY", "AWS_ACCESS_KEY_ID")
|
|
1296
|
+
and value
|
|
1297
|
+
):
|
|
1298
|
+
os.environ.setdefault(key, value)
|
|
1299
|
+
|
|
1300
|
+
if os.environ.get("ANTHROPIC_API_KEY"):
|
|
1301
|
+
api_model = "anthropic/claude-sonnet-4-5-20250929"
|
|
1302
|
+
elif os.environ.get("OPENAI_API_KEY"):
|
|
1303
|
+
api_model = "gpt-4o-mini"
|
|
1304
|
+
elif os.environ.get("AWS_ACCESS_KEY_ID"):
|
|
1305
|
+
api_model = "bedrock/anthropic.claude-sonnet-4-5-20250929-v1:0"
|
|
1306
|
+
else:
|
|
1307
|
+
raise ValueError(
|
|
1308
|
+
"CLI agent unavailable and no LLM API credentials found. "
|
|
1309
|
+
"Uncomment and set ANTHROPIC_API_KEY or OPENAI_API_KEY in backend/.env, "
|
|
1310
|
+
"or change planner model from 'cli/claude' to an API model in Settings."
|
|
1311
|
+
)
|
|
1312
|
+
|
|
1313
|
+
from dataclasses import replace
|
|
1314
|
+
|
|
1315
|
+
fallback_config = replace(self.config, model=api_model)
|
|
1316
|
+
logger.info(f"CLI fallback: using API model {api_model}")
|
|
1317
|
+
return LLMService(fallback_config)
|
|
1318
|
+
|
|
1319
|
+
def _call_llm_for_tickets(
|
|
1320
|
+
self, prompt: str, repo_root: Path, stream_callback=None
|
|
1321
|
+
) -> str:
|
|
1322
|
+
"""Generate tickets using LLM API (fallback when CLI unavailable).
|
|
1323
|
+
|
|
1324
|
+
Args:
|
|
1325
|
+
prompt: The prompt for ticket generation.
|
|
1326
|
+
repo_root: Path to the repository.
|
|
1327
|
+
stream_callback: Optional callback for streaming output.
|
|
1328
|
+
|
|
1329
|
+
Returns:
|
|
1330
|
+
The LLM's response text containing JSON tickets.
|
|
1331
|
+
|
|
1332
|
+
Raises:
|
|
1333
|
+
ValueError: If LLM is not configured or API call fails.
|
|
1334
|
+
"""
|
|
1335
|
+
llm = self._get_llm_for_api_fallback()
|
|
1336
|
+
|
|
1337
|
+
if stream_callback:
|
|
1338
|
+
stream_callback(
|
|
1339
|
+
f"[DEBUG] Using LLM API model: {llm.model if hasattr(llm, 'model') else 'unknown'}"
|
|
1340
|
+
)
|
|
1341
|
+
stream_callback("[Generating tickets via LLM API...]")
|
|
1342
|
+
|
|
1343
|
+
# Gather repo context for the LLM
|
|
1344
|
+
try:
|
|
1345
|
+
context = self.context_gatherer.gather(repo_root=repo_root)
|
|
1346
|
+
context_summary = context.to_prompt_string()[:8000]
|
|
1347
|
+
except Exception as e:
|
|
1348
|
+
logger.warning(f"Failed to gather repo context: {e}")
|
|
1349
|
+
context_summary = f"Repository at: {repo_root}"
|
|
1350
|
+
|
|
1351
|
+
system_prompt = self._build_goal_system_prompt()
|
|
1352
|
+
messages = [
|
|
1353
|
+
{"role": "user", "content": f"{context_summary}\n\n{prompt}"},
|
|
1354
|
+
]
|
|
1355
|
+
|
|
1356
|
+
try:
|
|
1357
|
+
response = llm.call_completion(
|
|
1358
|
+
messages=messages,
|
|
1359
|
+
max_tokens=4000,
|
|
1360
|
+
system_prompt=system_prompt,
|
|
1361
|
+
json_mode=True,
|
|
1362
|
+
timeout=60,
|
|
1363
|
+
)
|
|
1364
|
+
logger.info(f"LLM API response length: {len(response.content)} chars")
|
|
1365
|
+
if stream_callback:
|
|
1366
|
+
stream_callback("[LLM API response received]")
|
|
1367
|
+
return response.content
|
|
1368
|
+
except Exception as e:
|
|
1369
|
+
raise ValueError(
|
|
1370
|
+
f"LLM API call failed: {e}. "
|
|
1371
|
+
"Please verify your LLM credentials in Settings."
|
|
1372
|
+
)
|
|
1373
|
+
|
|
1374
|
+
def _call_cli_for_tickets(
|
|
1375
|
+
self, prompt: str, repo_root: Path, stream_callback=None
|
|
1376
|
+
) -> str:
|
|
1377
|
+
"""Call the agent CLI to generate tickets.
|
|
1378
|
+
|
|
1379
|
+
Args:
|
|
1380
|
+
prompt: The prompt for ticket generation.
|
|
1381
|
+
repo_root: Path to the repository.
|
|
1382
|
+
stream_callback: Optional callback for streaming output.
|
|
1383
|
+
|
|
1384
|
+
Returns:
|
|
1385
|
+
The agent's response text.
|
|
1386
|
+
|
|
1387
|
+
Raises:
|
|
1388
|
+
ValueError: If no agent is available or agent fails.
|
|
1389
|
+
FileNotFoundError: If agent command not found.
|
|
1390
|
+
"""
|
|
1391
|
+
import os
|
|
1392
|
+
|
|
1393
|
+
# Get agent path from config
|
|
1394
|
+
agent_path = self.config.get_agent_path()
|
|
1395
|
+
|
|
1396
|
+
# Track whether we're using Claude CLI (for stream-json support)
|
|
1397
|
+
is_claude_cli = False
|
|
1398
|
+
|
|
1399
|
+
if os.path.exists(agent_path):
|
|
1400
|
+
logger.info(f"Using agent from config: {agent_path}")
|
|
1401
|
+
# Determine if it's cursor-agent style (needs --workspace) or claude style
|
|
1402
|
+
if "cursor-agent" in agent_path:
|
|
1403
|
+
cmd = [agent_path, "--print", "--workspace", str(repo_root), prompt]
|
|
1404
|
+
else:
|
|
1405
|
+
# Claude-style
|
|
1406
|
+
is_claude_cli = True
|
|
1407
|
+
cmd = [agent_path, "--print", prompt]
|
|
1408
|
+
else:
|
|
1409
|
+
# Fall back to executor service detection
|
|
1410
|
+
logger.warning(
|
|
1411
|
+
f"Agent not found at configured path: {agent_path}, falling back to auto-detection"
|
|
1412
|
+
)
|
|
1413
|
+
try:
|
|
1414
|
+
executor = ExecutorService.detect_headless_executor(
|
|
1415
|
+
agent_path=agent_path
|
|
1416
|
+
)
|
|
1417
|
+
if not executor:
|
|
1418
|
+
executor = ExecutorService.detect_executor(agent_path=agent_path)
|
|
1419
|
+
except Exception as e:
|
|
1420
|
+
raise ValueError(
|
|
1421
|
+
f"No agent CLI available at {agent_path} and auto-detection failed: {e}"
|
|
1422
|
+
)
|
|
1423
|
+
|
|
1424
|
+
logger.info(
|
|
1425
|
+
f"Using agent: {executor.executor_type.value} for ticket generation"
|
|
1426
|
+
)
|
|
1427
|
+
|
|
1428
|
+
# Build command based on executor type
|
|
1429
|
+
if executor.executor_type == ExecutorType.CLAUDE:
|
|
1430
|
+
is_claude_cli = True
|
|
1431
|
+
cmd = [executor.command, "--print", prompt]
|
|
1432
|
+
elif executor.executor_type == ExecutorType.CURSOR_AGENT:
|
|
1433
|
+
cmd = [
|
|
1434
|
+
executor.command,
|
|
1435
|
+
"--print",
|
|
1436
|
+
"--workspace",
|
|
1437
|
+
str(repo_root),
|
|
1438
|
+
prompt,
|
|
1439
|
+
]
|
|
1440
|
+
else:
|
|
1441
|
+
# Cursor (interactive) - not suitable for automated generation
|
|
1442
|
+
raise ValueError(
|
|
1443
|
+
f"Agent {executor.executor_type.value} is interactive only. "
|
|
1444
|
+
"Need cursor-agent or claude CLI for automated ticket generation."
|
|
1445
|
+
)
|
|
1446
|
+
|
|
1447
|
+
# For Claude CLI with streaming: enable structured JSON output
|
|
1448
|
+
# so the normalizer can produce typed entries (thinking, tool_use, etc.)
|
|
1449
|
+
if is_claude_cli and stream_callback:
|
|
1450
|
+
cmd = [
|
|
1451
|
+
cmd[0], # executable path
|
|
1452
|
+
"--print",
|
|
1453
|
+
"--output-format",
|
|
1454
|
+
"stream-json",
|
|
1455
|
+
"--verbose",
|
|
1456
|
+
"--include-partial-messages",
|
|
1457
|
+
"--no-session-persistence",
|
|
1458
|
+
prompt,
|
|
1459
|
+
]
|
|
1460
|
+
|
|
1461
|
+
# Run the agent — show executable + flags, truncate prompt arg
|
|
1462
|
+
cmd_display = cmd[0]
|
|
1463
|
+
if len(cmd) > 1:
|
|
1464
|
+
flags = [a for a in cmd[1:] if a.startswith("-")]
|
|
1465
|
+
cmd_display += " " + " ".join(flags) if flags else ""
|
|
1466
|
+
cmd_display += " <prompt>"
|
|
1467
|
+
logger.info(f"Running agent command: {cmd_display} (cwd={repo_root})")
|
|
1468
|
+
|
|
1469
|
+
# Strip Claude Code session env vars to avoid "nested session" errors
|
|
1470
|
+
# when spawning claude CLI from within a Claude Code session
|
|
1471
|
+
clean_env = {
|
|
1472
|
+
k: v
|
|
1473
|
+
for k, v in os.environ.items()
|
|
1474
|
+
if k not in ("CLAUDECODE", "CLAUDE_CODE_ENTRYPOINT")
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
# Validate working directory exists before launching subprocess
|
|
1478
|
+
if not repo_root.exists():
|
|
1479
|
+
raise ValueError(
|
|
1480
|
+
f"Repository directory does not exist: {repo_root}. "
|
|
1481
|
+
"The board's repo_root may point to a deleted or moved directory."
|
|
1482
|
+
)
|
|
1483
|
+
|
|
1484
|
+
try:
|
|
1485
|
+
if stream_callback:
|
|
1486
|
+
# Stream output line by line for real-time feedback
|
|
1487
|
+
process = subprocess.Popen(
|
|
1488
|
+
cmd,
|
|
1489
|
+
cwd=repo_root,
|
|
1490
|
+
stdout=subprocess.PIPE,
|
|
1491
|
+
stderr=subprocess.PIPE,
|
|
1492
|
+
text=True,
|
|
1493
|
+
bufsize=1, # Line buffered
|
|
1494
|
+
env=clean_env,
|
|
1495
|
+
)
|
|
1496
|
+
|
|
1497
|
+
output_lines = []
|
|
1498
|
+
result_text = None # Extracted from 'result' JSON line (stream-json)
|
|
1499
|
+
|
|
1500
|
+
# Read stdout line by line
|
|
1501
|
+
while True:
|
|
1502
|
+
line = process.stdout.readline()
|
|
1503
|
+
if not line and process.poll() is not None:
|
|
1504
|
+
break
|
|
1505
|
+
if line:
|
|
1506
|
+
output_lines.append(line)
|
|
1507
|
+
stripped = line.rstrip()
|
|
1508
|
+
stream_callback(stripped)
|
|
1509
|
+
|
|
1510
|
+
# For stream-json mode: extract the result text
|
|
1511
|
+
# from the final {"type":"result",...,"result":"..."} line
|
|
1512
|
+
if is_claude_cli and stripped.startswith("{"):
|
|
1513
|
+
try:
|
|
1514
|
+
parsed = json.loads(stripped)
|
|
1515
|
+
if (
|
|
1516
|
+
isinstance(parsed, dict)
|
|
1517
|
+
and parsed.get("type") == "result"
|
|
1518
|
+
and isinstance(parsed.get("result"), str)
|
|
1519
|
+
):
|
|
1520
|
+
result_text = parsed["result"]
|
|
1521
|
+
except json.JSONDecodeError:
|
|
1522
|
+
pass
|
|
1523
|
+
|
|
1524
|
+
logger.info(
|
|
1525
|
+
f"Agent subprocess completed. Total lines: {len(output_lines)}"
|
|
1526
|
+
)
|
|
1527
|
+
|
|
1528
|
+
# Wait for process to complete
|
|
1529
|
+
try:
|
|
1530
|
+
process.wait(timeout=600)
|
|
1531
|
+
except subprocess.TimeoutExpired:
|
|
1532
|
+
process.kill()
|
|
1533
|
+
process.wait()
|
|
1534
|
+
raise ValueError("Agent timed out after 600 seconds")
|
|
1535
|
+
|
|
1536
|
+
if process.returncode != 0:
|
|
1537
|
+
stderr = process.stderr.read()
|
|
1538
|
+
logger.error(
|
|
1539
|
+
f"Agent failed with code {process.returncode}: {stderr}"
|
|
1540
|
+
)
|
|
1541
|
+
raise ValueError(f"Agent failed: {stderr[:500]}")
|
|
1542
|
+
|
|
1543
|
+
# For stream-json mode: return extracted result text
|
|
1544
|
+
# For plain text mode: return raw output
|
|
1545
|
+
if result_text is not None:
|
|
1546
|
+
return result_text
|
|
1547
|
+
return "".join(output_lines)
|
|
1548
|
+
else:
|
|
1549
|
+
# Non-streaming mode (original behavior)
|
|
1550
|
+
result = subprocess.run(
|
|
1551
|
+
cmd,
|
|
1552
|
+
cwd=repo_root,
|
|
1553
|
+
capture_output=True,
|
|
1554
|
+
text=True,
|
|
1555
|
+
timeout=600, # 10 minute timeout for ticket generation
|
|
1556
|
+
env=clean_env,
|
|
1557
|
+
)
|
|
1558
|
+
|
|
1559
|
+
if result.returncode != 0:
|
|
1560
|
+
logger.error(
|
|
1561
|
+
f"Agent failed with code {result.returncode}: {result.stderr}"
|
|
1562
|
+
)
|
|
1563
|
+
raise ValueError(f"Agent failed: {result.stderr[:500]}")
|
|
1564
|
+
|
|
1565
|
+
logger.debug(f"Agent response length: {len(result.stdout)} chars")
|
|
1566
|
+
return result.stdout
|
|
1567
|
+
|
|
1568
|
+
except subprocess.TimeoutExpired:
|
|
1569
|
+
raise ValueError("Agent timed out after 600 seconds")
|
|
1570
|
+
except FileNotFoundError as e:
|
|
1571
|
+
# Distinguish between missing command and missing cwd
|
|
1572
|
+
if not Path(cmd[0]).exists() and not shutil.which(cmd[0]):
|
|
1573
|
+
raise ValueError(f"Agent command not found: {cmd[0]}")
|
|
1574
|
+
if not repo_root.exists():
|
|
1575
|
+
raise ValueError(f"Repository directory does not exist: {repo_root}")
|
|
1576
|
+
raise ValueError(f"File not found during agent execution: {e}")
|
|
1577
|
+
|
|
1578
|
+
def _parse_agent_json_response(self, response: str) -> dict:
|
|
1579
|
+
"""Parse JSON from agent response.
|
|
1580
|
+
|
|
1581
|
+
The agent may include explanatory text around the JSON.
|
|
1582
|
+
This method extracts and parses the JSON block.
|
|
1583
|
+
|
|
1584
|
+
Args:
|
|
1585
|
+
response: The full agent response text.
|
|
1586
|
+
|
|
1587
|
+
Returns:
|
|
1588
|
+
Parsed JSON dict with tickets.
|
|
1589
|
+
"""
|
|
1590
|
+
# Try to find JSON in code blocks first
|
|
1591
|
+
json_block_pattern = r"```(?:json)?\s*(\{[\s\S]*?\})\s*```"
|
|
1592
|
+
matches = re.findall(json_block_pattern, response)
|
|
1593
|
+
|
|
1594
|
+
for match in matches:
|
|
1595
|
+
try:
|
|
1596
|
+
data = json.loads(match)
|
|
1597
|
+
if "tickets" in data and isinstance(data["tickets"], list):
|
|
1598
|
+
return data
|
|
1599
|
+
except json.JSONDecodeError:
|
|
1600
|
+
continue
|
|
1601
|
+
|
|
1602
|
+
# Try to find raw JSON object
|
|
1603
|
+
json_pattern = r"\{[\s\S]*\"tickets\"[\s\S]*\}"
|
|
1604
|
+
matches = re.findall(json_pattern, response)
|
|
1605
|
+
|
|
1606
|
+
for match in matches:
|
|
1607
|
+
try:
|
|
1608
|
+
data = json.loads(match)
|
|
1609
|
+
if "tickets" in data and isinstance(data["tickets"], list):
|
|
1610
|
+
return data
|
|
1611
|
+
except json.JSONDecodeError:
|
|
1612
|
+
continue
|
|
1613
|
+
|
|
1614
|
+
# Fallback: try to parse entire response as JSON
|
|
1615
|
+
try:
|
|
1616
|
+
data = json.loads(response)
|
|
1617
|
+
if "tickets" in data and isinstance(data["tickets"], list):
|
|
1618
|
+
return data
|
|
1619
|
+
except json.JSONDecodeError:
|
|
1620
|
+
pass
|
|
1621
|
+
|
|
1622
|
+
logger.warning(f"Could not parse JSON from agent response: {response[:500]}")
|
|
1623
|
+
return {"tickets": []}
|
|
1624
|
+
|
|
1625
|
+
# =========================================================================
|
|
1626
|
+
# HELPERS
|
|
1627
|
+
# =========================================================================
|
|
1628
|
+
|
|
1629
|
+
async def _get_existing_tickets(self, goal_id: str | None) -> list[tuple[str, str]]:
|
|
1630
|
+
"""Get existing ticket (id, title) pairs for deduplication."""
|
|
1631
|
+
query = select(Ticket.id, Ticket.title)
|
|
1632
|
+
if goal_id:
|
|
1633
|
+
query = query.where(Ticket.goal_id == goal_id)
|
|
1634
|
+
result = await self.db.execute(query)
|
|
1635
|
+
return [(row[0], row[1]) for row in result.fetchall()]
|
|
1636
|
+
|
|
1637
|
+
def _check_duplicate(
|
|
1638
|
+
self, new_title: str, existing_tickets: list[tuple[str, str]]
|
|
1639
|
+
) -> tuple[str, str | None, str | None, float]:
|
|
1640
|
+
"""Check if a title is a duplicate.
|
|
1641
|
+
|
|
1642
|
+
Returns:
|
|
1643
|
+
Tuple of (status, existing_id, existing_title, similarity):
|
|
1644
|
+
- status: "exact" (hard block), "similar" (warning only), or "ok" (no match)
|
|
1645
|
+
- existing_id: ID of matching ticket if any
|
|
1646
|
+
- existing_title: Title of matching ticket if any
|
|
1647
|
+
- similarity: Similarity score (0-1)
|
|
1648
|
+
"""
|
|
1649
|
+
new_lower = new_title.lower().strip()
|
|
1650
|
+
new_tokens = set(new_lower.split())
|
|
1651
|
+
|
|
1652
|
+
best_match: tuple[str, str, float] | None = None
|
|
1653
|
+
|
|
1654
|
+
for existing_id, existing_title in existing_tickets:
|
|
1655
|
+
existing_lower = existing_title.lower().strip()
|
|
1656
|
+
|
|
1657
|
+
# Exact match = hard block
|
|
1658
|
+
if new_lower == existing_lower:
|
|
1659
|
+
return ("exact", existing_id, existing_title, 1.0)
|
|
1660
|
+
|
|
1661
|
+
# Token overlap for similarity
|
|
1662
|
+
existing_tokens = set(existing_lower.split())
|
|
1663
|
+
if not new_tokens or not existing_tokens:
|
|
1664
|
+
continue
|
|
1665
|
+
|
|
1666
|
+
overlap = len(new_tokens & existing_tokens)
|
|
1667
|
+
similarity = overlap / min(len(new_tokens), len(existing_tokens))
|
|
1668
|
+
|
|
1669
|
+
if similarity > DEDUP_SIMILARITY_THRESHOLD:
|
|
1670
|
+
if best_match is None or similarity > best_match[2]:
|
|
1671
|
+
best_match = (existing_id, existing_title, similarity)
|
|
1672
|
+
|
|
1673
|
+
if best_match:
|
|
1674
|
+
return ("similar", best_match[0], best_match[1], best_match[2])
|
|
1675
|
+
|
|
1676
|
+
return ("ok", None, None, 0.0)
|
|
1677
|
+
|
|
1678
|
+
def _get_git_head_sha(self, repo_root: Path) -> str | None:
|
|
1679
|
+
"""Get the current git HEAD SHA (full 40-char SHA) for cache invalidation.
|
|
1680
|
+
|
|
1681
|
+
We store the full SHA to avoid rare collision issues with short SHAs.
|
|
1682
|
+
"""
|
|
1683
|
+
try:
|
|
1684
|
+
result = subprocess.run(
|
|
1685
|
+
["git", "rev-parse", "HEAD"],
|
|
1686
|
+
cwd=repo_root,
|
|
1687
|
+
capture_output=True,
|
|
1688
|
+
text=True,
|
|
1689
|
+
timeout=5,
|
|
1690
|
+
)
|
|
1691
|
+
if result.returncode == 0:
|
|
1692
|
+
return result.stdout.strip() # Full SHA (40 chars)
|
|
1693
|
+
except Exception as e:
|
|
1694
|
+
logger.debug(f"Failed to get git HEAD: {e}")
|
|
1695
|
+
return None
|
|
1696
|
+
|
|
1697
|
+
def _get_workspace_head_sha(
|
|
1698
|
+
self, workspace_path: Path, repo_root: Path
|
|
1699
|
+
) -> str | None:
|
|
1700
|
+
"""Get the git HEAD SHA for a workspace path if different from repo root.
|
|
1701
|
+
|
|
1702
|
+
Worktrees may be at different SHAs than the main repo.
|
|
1703
|
+
Returns None if workspace_path is the same as repo_root or not a git dir.
|
|
1704
|
+
"""
|
|
1705
|
+
if workspace_path.resolve() == repo_root.resolve():
|
|
1706
|
+
return None # Same as repo root, no need for separate SHA
|
|
1707
|
+
|
|
1708
|
+
try:
|
|
1709
|
+
result = subprocess.run(
|
|
1710
|
+
["git", "rev-parse", "HEAD"],
|
|
1711
|
+
cwd=workspace_path,
|
|
1712
|
+
capture_output=True,
|
|
1713
|
+
text=True,
|
|
1714
|
+
timeout=5,
|
|
1715
|
+
)
|
|
1716
|
+
if result.returncode == 0:
|
|
1717
|
+
return result.stdout.strip() # Full SHA
|
|
1718
|
+
except Exception as e:
|
|
1719
|
+
logger.debug(f"Failed to get workspace HEAD: {e}")
|
|
1720
|
+
return None
|
|
1721
|
+
|
|
1722
|
+
def _compute_cache_key(
|
|
1723
|
+
self, repo_root: Path, focus_areas: list[str] | None, head_sha: str | None
|
|
1724
|
+
) -> str:
|
|
1725
|
+
"""Compute cache key for analysis results.
|
|
1726
|
+
|
|
1727
|
+
Includes git HEAD SHA so cache invalidates on new commits.
|
|
1728
|
+
"""
|
|
1729
|
+
key_parts = [str(repo_root)]
|
|
1730
|
+
if head_sha:
|
|
1731
|
+
key_parts.append(head_sha)
|
|
1732
|
+
if focus_areas:
|
|
1733
|
+
key_parts.extend(sorted(focus_areas))
|
|
1734
|
+
key_str = "|".join(key_parts)
|
|
1735
|
+
return hashlib.sha256(key_str.encode()).hexdigest()[:32]
|
|
1736
|
+
|
|
1737
|
+
async def _get_cached_analysis(self, cache_key: str) -> dict | None:
|
|
1738
|
+
"""Get cached analysis result if valid."""
|
|
1739
|
+
try:
|
|
1740
|
+
result = await self.db.execute(
|
|
1741
|
+
select(AnalysisCache).where(
|
|
1742
|
+
and_(
|
|
1743
|
+
AnalysisCache.id == cache_key,
|
|
1744
|
+
AnalysisCache.expires_at > datetime.now(UTC),
|
|
1745
|
+
)
|
|
1746
|
+
)
|
|
1747
|
+
)
|
|
1748
|
+
cached = result.scalar_one_or_none()
|
|
1749
|
+
if cached:
|
|
1750
|
+
return json.loads(cached.result_json)
|
|
1751
|
+
except Exception as e:
|
|
1752
|
+
logger.debug(f"Cache lookup failed: {e}")
|
|
1753
|
+
return None
|
|
1754
|
+
|
|
1755
|
+
async def _cache_analysis(self, cache_key: str, result: dict) -> None:
|
|
1756
|
+
"""Cache analysis result."""
|
|
1757
|
+
try:
|
|
1758
|
+
expires_at = datetime.now(UTC) + timedelta(
|
|
1759
|
+
minutes=ANALYSIS_CACHE_TTL_MINUTES
|
|
1760
|
+
)
|
|
1761
|
+
|
|
1762
|
+
# Upsert cache entry
|
|
1763
|
+
existing = await self.db.execute(
|
|
1764
|
+
select(AnalysisCache).where(AnalysisCache.id == cache_key)
|
|
1765
|
+
)
|
|
1766
|
+
cache_entry = existing.scalar_one_or_none()
|
|
1767
|
+
|
|
1768
|
+
if cache_entry:
|
|
1769
|
+
cache_entry.result_json = json.dumps(result)
|
|
1770
|
+
cache_entry.expires_at = expires_at
|
|
1771
|
+
else:
|
|
1772
|
+
cache_entry = AnalysisCache(
|
|
1773
|
+
id=cache_key,
|
|
1774
|
+
result_json=json.dumps(result),
|
|
1775
|
+
expires_at=expires_at,
|
|
1776
|
+
)
|
|
1777
|
+
self.db.add(cache_entry)
|
|
1778
|
+
|
|
1779
|
+
await self.db.commit()
|
|
1780
|
+
except Exception as e:
|
|
1781
|
+
logger.warning(f"Failed to cache analysis: {e}")
|