ouroboros-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ouroboros-ai might be problematic. Click here for more details.
- ouroboros/__init__.py +15 -0
- ouroboros/__main__.py +9 -0
- ouroboros/bigbang/__init__.py +39 -0
- ouroboros/bigbang/ambiguity.py +464 -0
- ouroboros/bigbang/interview.py +530 -0
- ouroboros/bigbang/seed_generator.py +610 -0
- ouroboros/cli/__init__.py +9 -0
- ouroboros/cli/commands/__init__.py +7 -0
- ouroboros/cli/commands/config.py +79 -0
- ouroboros/cli/commands/init.py +425 -0
- ouroboros/cli/commands/run.py +201 -0
- ouroboros/cli/commands/status.py +85 -0
- ouroboros/cli/formatters/__init__.py +31 -0
- ouroboros/cli/formatters/panels.py +157 -0
- ouroboros/cli/formatters/progress.py +112 -0
- ouroboros/cli/formatters/tables.py +166 -0
- ouroboros/cli/main.py +60 -0
- ouroboros/config/__init__.py +81 -0
- ouroboros/config/loader.py +292 -0
- ouroboros/config/models.py +332 -0
- ouroboros/core/__init__.py +62 -0
- ouroboros/core/ac_tree.py +401 -0
- ouroboros/core/context.py +472 -0
- ouroboros/core/errors.py +246 -0
- ouroboros/core/seed.py +212 -0
- ouroboros/core/types.py +205 -0
- ouroboros/evaluation/__init__.py +110 -0
- ouroboros/evaluation/consensus.py +350 -0
- ouroboros/evaluation/mechanical.py +351 -0
- ouroboros/evaluation/models.py +235 -0
- ouroboros/evaluation/pipeline.py +286 -0
- ouroboros/evaluation/semantic.py +302 -0
- ouroboros/evaluation/trigger.py +278 -0
- ouroboros/events/__init__.py +5 -0
- ouroboros/events/base.py +80 -0
- ouroboros/events/decomposition.py +153 -0
- ouroboros/events/evaluation.py +248 -0
- ouroboros/execution/__init__.py +44 -0
- ouroboros/execution/atomicity.py +451 -0
- ouroboros/execution/decomposition.py +481 -0
- ouroboros/execution/double_diamond.py +1386 -0
- ouroboros/execution/subagent.py +275 -0
- ouroboros/observability/__init__.py +63 -0
- ouroboros/observability/drift.py +383 -0
- ouroboros/observability/logging.py +504 -0
- ouroboros/observability/retrospective.py +338 -0
- ouroboros/orchestrator/__init__.py +78 -0
- ouroboros/orchestrator/adapter.py +391 -0
- ouroboros/orchestrator/events.py +278 -0
- ouroboros/orchestrator/runner.py +597 -0
- ouroboros/orchestrator/session.py +486 -0
- ouroboros/persistence/__init__.py +23 -0
- ouroboros/persistence/checkpoint.py +511 -0
- ouroboros/persistence/event_store.py +183 -0
- ouroboros/persistence/migrations/__init__.py +1 -0
- ouroboros/persistence/migrations/runner.py +100 -0
- ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
- ouroboros/persistence/schema.py +56 -0
- ouroboros/persistence/uow.py +230 -0
- ouroboros/providers/__init__.py +28 -0
- ouroboros/providers/base.py +133 -0
- ouroboros/providers/claude_code_adapter.py +212 -0
- ouroboros/providers/litellm_adapter.py +316 -0
- ouroboros/py.typed +0 -0
- ouroboros/resilience/__init__.py +67 -0
- ouroboros/resilience/lateral.py +595 -0
- ouroboros/resilience/stagnation.py +727 -0
- ouroboros/routing/__init__.py +60 -0
- ouroboros/routing/complexity.py +272 -0
- ouroboros/routing/downgrade.py +664 -0
- ouroboros/routing/escalation.py +340 -0
- ouroboros/routing/router.py +204 -0
- ouroboros/routing/tiers.py +247 -0
- ouroboros/secondary/__init__.py +40 -0
- ouroboros/secondary/scheduler.py +467 -0
- ouroboros/secondary/todo_registry.py +483 -0
- ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
- ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
- ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
- ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
- ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,530 @@
|
|
|
1
|
+
"""Interactive interview engine for requirement clarification.
|
|
2
|
+
|
|
3
|
+
This module implements the interview protocol that refines vague ideas into
|
|
4
|
+
clear requirements through iterative questioning (max 10 rounds).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from collections.abc import Iterator
|
|
8
|
+
from contextlib import contextmanager
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from datetime import UTC, datetime
|
|
11
|
+
from enum import StrEnum
|
|
12
|
+
import fcntl
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel, Field
|
|
17
|
+
import structlog
|
|
18
|
+
|
|
19
|
+
from ouroboros.core.errors import ProviderError, ValidationError
|
|
20
|
+
from ouroboros.core.types import Result
|
|
21
|
+
from ouroboros.providers.base import (
|
|
22
|
+
CompletionConfig,
|
|
23
|
+
LLMAdapter,
|
|
24
|
+
Message,
|
|
25
|
+
MessageRole,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@contextmanager
|
|
30
|
+
def _file_lock(file_path: Path, exclusive: bool = True) -> Iterator[None]:
|
|
31
|
+
"""Context manager for file locking to prevent race conditions.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
file_path: Path to the file to lock.
|
|
35
|
+
exclusive: If True, use exclusive lock (for writes).
|
|
36
|
+
If False, use shared lock (for reads).
|
|
37
|
+
|
|
38
|
+
Yields:
|
|
39
|
+
None when lock is acquired.
|
|
40
|
+
"""
|
|
41
|
+
lock_path = file_path.with_suffix(file_path.suffix + ".lock")
|
|
42
|
+
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
|
|
44
|
+
with open(lock_path, "w") as lock_file:
|
|
45
|
+
lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH
|
|
46
|
+
try:
|
|
47
|
+
fcntl.flock(lock_file.fileno(), lock_type)
|
|
48
|
+
yield
|
|
49
|
+
finally:
|
|
50
|
+
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
|
51
|
+
|
|
52
|
+
log = structlog.get_logger()
|
|
53
|
+
|
|
54
|
+
MAX_INTERVIEW_ROUNDS = 10
|
|
55
|
+
# Default model moved to config.models.ClarificationConfig.default_model
|
|
56
|
+
_FALLBACK_MODEL = "openrouter/google/gemini-2.0-flash-001"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class InterviewStatus(StrEnum):
|
|
60
|
+
"""Status of the interview process."""
|
|
61
|
+
|
|
62
|
+
IN_PROGRESS = "in_progress"
|
|
63
|
+
COMPLETED = "completed"
|
|
64
|
+
ABORTED = "aborted"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class InterviewRound(BaseModel):
|
|
68
|
+
"""A single round of interview questions and responses.
|
|
69
|
+
|
|
70
|
+
Attributes:
|
|
71
|
+
round_number: 1-based round number (1 to MAX_INTERVIEW_ROUNDS).
|
|
72
|
+
question: The question asked by the system.
|
|
73
|
+
user_response: The user's response (None if not yet answered).
|
|
74
|
+
timestamp: When this round was created.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
round_number: int = Field(ge=1, le=MAX_INTERVIEW_ROUNDS)
|
|
78
|
+
question: str
|
|
79
|
+
user_response: str | None = None
|
|
80
|
+
timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class InterviewState(BaseModel):
|
|
84
|
+
"""Persistent state of an interview session.
|
|
85
|
+
|
|
86
|
+
Attributes:
|
|
87
|
+
interview_id: Unique identifier for this interview.
|
|
88
|
+
status: Current status of the interview.
|
|
89
|
+
rounds: List of completed and current rounds.
|
|
90
|
+
initial_context: The initial context provided by the user.
|
|
91
|
+
created_at: When the interview was created.
|
|
92
|
+
updated_at: When the interview was last updated.
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
interview_id: str
|
|
96
|
+
status: InterviewStatus = InterviewStatus.IN_PROGRESS
|
|
97
|
+
rounds: list[InterviewRound] = Field(default_factory=list)
|
|
98
|
+
initial_context: str = ""
|
|
99
|
+
created_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
|
100
|
+
updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def current_round_number(self) -> int:
|
|
104
|
+
"""Get the current round number (1-based)."""
|
|
105
|
+
return len(self.rounds) + 1
|
|
106
|
+
|
|
107
|
+
@property
|
|
108
|
+
def is_complete(self) -> bool:
|
|
109
|
+
"""Check if interview has reached max rounds or is marked complete."""
|
|
110
|
+
return (
|
|
111
|
+
self.status == InterviewStatus.COMPLETED
|
|
112
|
+
or len(self.rounds) >= MAX_INTERVIEW_ROUNDS
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def mark_updated(self) -> None:
|
|
116
|
+
"""Update the updated_at timestamp."""
|
|
117
|
+
self.updated_at = datetime.now(UTC)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@dataclass
|
|
121
|
+
class InterviewEngine:
|
|
122
|
+
"""Engine for conducting interactive requirement interviews.
|
|
123
|
+
|
|
124
|
+
This engine orchestrates the interview process:
|
|
125
|
+
1. Generates questions based on current context and ambiguity
|
|
126
|
+
2. Collects user responses
|
|
127
|
+
3. Persists state between sessions
|
|
128
|
+
4. Tracks progress through rounds
|
|
129
|
+
|
|
130
|
+
Example:
|
|
131
|
+
engine = InterviewEngine(
|
|
132
|
+
llm_adapter=LiteLLMAdapter(),
|
|
133
|
+
state_dir=Path.home() / ".ouroboros" / "data",
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Start new interview
|
|
137
|
+
result = await engine.start_interview(
|
|
138
|
+
initial_context="I want to build a CLI tool for task management"
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Ask questions in rounds
|
|
142
|
+
while not state.is_complete:
|
|
143
|
+
question_result = await engine.ask_next_question(state)
|
|
144
|
+
if question_result.is_ok:
|
|
145
|
+
question = question_result.value
|
|
146
|
+
user_response = input(question)
|
|
147
|
+
await engine.record_response(state, user_response)
|
|
148
|
+
|
|
149
|
+
# Generate final seed (not implemented in this story)
|
|
150
|
+
|
|
151
|
+
Note:
|
|
152
|
+
The model can be configured via OuroborosConfig.clarification.default_model
|
|
153
|
+
or passed directly to the constructor.
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
llm_adapter: LLMAdapter
|
|
157
|
+
state_dir: Path = field(default_factory=lambda: Path.home() / ".ouroboros" / "data")
|
|
158
|
+
model: str = _FALLBACK_MODEL
|
|
159
|
+
temperature: float = 0.7
|
|
160
|
+
max_tokens: int = 2048
|
|
161
|
+
|
|
162
|
+
def __post_init__(self) -> None:
|
|
163
|
+
"""Ensure state directory exists."""
|
|
164
|
+
self.state_dir.mkdir(parents=True, exist_ok=True)
|
|
165
|
+
|
|
166
|
+
def _state_file_path(self, interview_id: str) -> Path:
|
|
167
|
+
"""Get the path to the state file for an interview.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
interview_id: The interview ID.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Path to the state file.
|
|
174
|
+
"""
|
|
175
|
+
return self.state_dir / f"interview_{interview_id}.json"
|
|
176
|
+
|
|
177
|
+
async def start_interview(
|
|
178
|
+
self, initial_context: str, interview_id: str | None = None
|
|
179
|
+
) -> Result[InterviewState, ValidationError]:
|
|
180
|
+
"""Start a new interview session.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
initial_context: The initial context or idea provided by the user.
|
|
184
|
+
interview_id: Optional interview ID (generated if not provided).
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Result containing the new InterviewState or ValidationError.
|
|
188
|
+
"""
|
|
189
|
+
if not initial_context.strip():
|
|
190
|
+
return Result.err(
|
|
191
|
+
ValidationError("Initial context cannot be empty", field="initial_context")
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
if interview_id is None:
|
|
195
|
+
interview_id = f"interview_{datetime.now(UTC).strftime('%Y%m%d_%H%M%S')}"
|
|
196
|
+
|
|
197
|
+
state = InterviewState(
|
|
198
|
+
interview_id=interview_id,
|
|
199
|
+
initial_context=initial_context,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
log.info(
|
|
203
|
+
"interview.started",
|
|
204
|
+
interview_id=interview_id,
|
|
205
|
+
initial_context_length=len(initial_context),
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
return Result.ok(state)
|
|
209
|
+
|
|
210
|
+
async def ask_next_question(
|
|
211
|
+
self, state: InterviewState
|
|
212
|
+
) -> Result[str, ProviderError | ValidationError]:
|
|
213
|
+
"""Generate the next question based on current state.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
state: Current interview state.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
Result containing the next question or error.
|
|
220
|
+
"""
|
|
221
|
+
if state.is_complete:
|
|
222
|
+
return Result.err(
|
|
223
|
+
ValidationError(
|
|
224
|
+
"Interview is already complete",
|
|
225
|
+
field="status",
|
|
226
|
+
value=state.status,
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# Build the context from previous rounds
|
|
231
|
+
conversation_history = self._build_conversation_history(state)
|
|
232
|
+
|
|
233
|
+
# Generate next question
|
|
234
|
+
system_prompt = self._build_system_prompt(state)
|
|
235
|
+
messages = [
|
|
236
|
+
Message(role=MessageRole.SYSTEM, content=system_prompt),
|
|
237
|
+
*conversation_history,
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
config = CompletionConfig(
|
|
241
|
+
model=self.model,
|
|
242
|
+
temperature=self.temperature,
|
|
243
|
+
max_tokens=self.max_tokens,
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
log.debug(
|
|
247
|
+
"interview.generating_question",
|
|
248
|
+
interview_id=state.interview_id,
|
|
249
|
+
round_number=state.current_round_number,
|
|
250
|
+
message_count=len(messages),
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
result = await self.llm_adapter.complete(messages, config)
|
|
254
|
+
|
|
255
|
+
if result.is_err:
|
|
256
|
+
log.warning(
|
|
257
|
+
"interview.question_generation_failed",
|
|
258
|
+
interview_id=state.interview_id,
|
|
259
|
+
round_number=state.current_round_number,
|
|
260
|
+
error=str(result.error),
|
|
261
|
+
)
|
|
262
|
+
return Result.err(result.error)
|
|
263
|
+
|
|
264
|
+
question = result.value.content.strip()
|
|
265
|
+
|
|
266
|
+
log.info(
|
|
267
|
+
"interview.question_generated",
|
|
268
|
+
interview_id=state.interview_id,
|
|
269
|
+
round_number=state.current_round_number,
|
|
270
|
+
question_length=len(question),
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
return Result.ok(question)
|
|
274
|
+
|
|
275
|
+
async def record_response(
|
|
276
|
+
self, state: InterviewState, user_response: str, question: str
|
|
277
|
+
) -> Result[InterviewState, ValidationError]:
|
|
278
|
+
"""Record the user's response to the current question.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
state: Current interview state.
|
|
282
|
+
user_response: The user's response.
|
|
283
|
+
question: The question that was asked.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Result containing updated state or ValidationError.
|
|
287
|
+
"""
|
|
288
|
+
if not user_response.strip():
|
|
289
|
+
return Result.err(
|
|
290
|
+
ValidationError("User response cannot be empty", field="user_response")
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
if state.is_complete:
|
|
294
|
+
return Result.err(
|
|
295
|
+
ValidationError(
|
|
296
|
+
"Cannot record response - interview is complete",
|
|
297
|
+
field="status",
|
|
298
|
+
value=state.status,
|
|
299
|
+
)
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Create new round
|
|
303
|
+
round_data = InterviewRound(
|
|
304
|
+
round_number=state.current_round_number,
|
|
305
|
+
question=question,
|
|
306
|
+
user_response=user_response,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
state.rounds.append(round_data)
|
|
310
|
+
state.mark_updated()
|
|
311
|
+
|
|
312
|
+
log.info(
|
|
313
|
+
"interview.response_recorded",
|
|
314
|
+
interview_id=state.interview_id,
|
|
315
|
+
round_number=round_data.round_number,
|
|
316
|
+
response_length=len(user_response),
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
# Check if we've reached max rounds
|
|
320
|
+
if len(state.rounds) >= MAX_INTERVIEW_ROUNDS:
|
|
321
|
+
state.status = InterviewStatus.COMPLETED
|
|
322
|
+
log.info(
|
|
323
|
+
"interview.max_rounds_reached",
|
|
324
|
+
interview_id=state.interview_id,
|
|
325
|
+
total_rounds=len(state.rounds),
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
return Result.ok(state)
|
|
329
|
+
|
|
330
|
+
async def save_state(
|
|
331
|
+
self, state: InterviewState
|
|
332
|
+
) -> Result[Path, ValidationError]:
|
|
333
|
+
"""Persist interview state to disk.
|
|
334
|
+
|
|
335
|
+
Uses file locking to prevent race conditions during concurrent access.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
state: The interview state to save.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Result containing path to saved file or ValidationError.
|
|
342
|
+
"""
|
|
343
|
+
try:
|
|
344
|
+
file_path = self._state_file_path(state.interview_id)
|
|
345
|
+
state.mark_updated()
|
|
346
|
+
|
|
347
|
+
# Use file locking to prevent race conditions
|
|
348
|
+
with _file_lock(file_path, exclusive=True):
|
|
349
|
+
# Write state as JSON
|
|
350
|
+
content = state.model_dump_json(indent=2)
|
|
351
|
+
file_path.write_text(content, encoding="utf-8")
|
|
352
|
+
|
|
353
|
+
log.info(
|
|
354
|
+
"interview.state_saved",
|
|
355
|
+
interview_id=state.interview_id,
|
|
356
|
+
file_path=str(file_path),
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
return Result.ok(file_path)
|
|
360
|
+
except (OSError, ValueError) as e:
|
|
361
|
+
log.exception(
|
|
362
|
+
"interview.state_save_failed",
|
|
363
|
+
interview_id=state.interview_id,
|
|
364
|
+
error=str(e),
|
|
365
|
+
)
|
|
366
|
+
return Result.err(
|
|
367
|
+
ValidationError(
|
|
368
|
+
f"Failed to save interview state: {e}",
|
|
369
|
+
details={"interview_id": state.interview_id},
|
|
370
|
+
)
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
async def load_state(
|
|
374
|
+
self, interview_id: str
|
|
375
|
+
) -> Result[InterviewState, ValidationError]:
|
|
376
|
+
"""Load interview state from disk.
|
|
377
|
+
|
|
378
|
+
Uses file locking to prevent race conditions during concurrent access.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
interview_id: The interview ID to load.
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
Result containing loaded state or ValidationError.
|
|
385
|
+
"""
|
|
386
|
+
file_path = self._state_file_path(interview_id)
|
|
387
|
+
|
|
388
|
+
if not file_path.exists():
|
|
389
|
+
return Result.err(
|
|
390
|
+
ValidationError(
|
|
391
|
+
f"Interview state not found: {interview_id}",
|
|
392
|
+
field="interview_id",
|
|
393
|
+
value=interview_id,
|
|
394
|
+
)
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
try:
|
|
398
|
+
# Use shared lock for reading
|
|
399
|
+
with _file_lock(file_path, exclusive=False):
|
|
400
|
+
content = file_path.read_text(encoding="utf-8")
|
|
401
|
+
|
|
402
|
+
state = InterviewState.model_validate_json(content)
|
|
403
|
+
|
|
404
|
+
log.info(
|
|
405
|
+
"interview.state_loaded",
|
|
406
|
+
interview_id=interview_id,
|
|
407
|
+
rounds=len(state.rounds),
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
return Result.ok(state)
|
|
411
|
+
except (OSError, ValueError) as e:
|
|
412
|
+
log.exception(
|
|
413
|
+
"interview.state_load_failed",
|
|
414
|
+
interview_id=interview_id,
|
|
415
|
+
error=str(e),
|
|
416
|
+
)
|
|
417
|
+
return Result.err(
|
|
418
|
+
ValidationError(
|
|
419
|
+
f"Failed to load interview state: {e}",
|
|
420
|
+
field="interview_id",
|
|
421
|
+
value=interview_id,
|
|
422
|
+
details={"file_path": str(file_path)},
|
|
423
|
+
)
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
def _build_system_prompt(self, state: InterviewState) -> str:
|
|
427
|
+
"""Build the system prompt for question generation.
|
|
428
|
+
|
|
429
|
+
Args:
|
|
430
|
+
state: Current interview state.
|
|
431
|
+
|
|
432
|
+
Returns:
|
|
433
|
+
The system prompt.
|
|
434
|
+
"""
|
|
435
|
+
round_info = f"Round {state.current_round_number} of {MAX_INTERVIEW_ROUNDS}"
|
|
436
|
+
|
|
437
|
+
return f"""You are an expert requirements engineer conducting an interview to refine vague ideas into clear, executable requirements.
|
|
438
|
+
|
|
439
|
+
This is {round_info}. Your goal is to reduce ambiguity and gather concrete details.
|
|
440
|
+
|
|
441
|
+
Initial context: {state.initial_context}
|
|
442
|
+
|
|
443
|
+
Guidelines:
|
|
444
|
+
- Ask ONE focused question per round
|
|
445
|
+
- Target the biggest source of ambiguity
|
|
446
|
+
- Build on previous responses
|
|
447
|
+
- Be specific and actionable
|
|
448
|
+
- Keep questions concise and clear
|
|
449
|
+
|
|
450
|
+
Generate the next question to reduce ambiguity."""
|
|
451
|
+
|
|
452
|
+
def _build_conversation_history(
|
|
453
|
+
self, state: InterviewState
|
|
454
|
+
) -> list[Message]:
|
|
455
|
+
"""Build conversation history from completed rounds.
|
|
456
|
+
|
|
457
|
+
Args:
|
|
458
|
+
state: Current interview state.
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
List of messages representing the conversation.
|
|
462
|
+
"""
|
|
463
|
+
messages: list[Message] = []
|
|
464
|
+
|
|
465
|
+
for round_data in state.rounds:
|
|
466
|
+
messages.append(
|
|
467
|
+
Message(role=MessageRole.ASSISTANT, content=round_data.question)
|
|
468
|
+
)
|
|
469
|
+
if round_data.user_response:
|
|
470
|
+
messages.append(
|
|
471
|
+
Message(role=MessageRole.USER, content=round_data.user_response)
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
return messages
|
|
475
|
+
|
|
476
|
+
async def complete_interview(
|
|
477
|
+
self, state: InterviewState
|
|
478
|
+
) -> Result[InterviewState, ValidationError]:
|
|
479
|
+
"""Mark the interview as completed.
|
|
480
|
+
|
|
481
|
+
Args:
|
|
482
|
+
state: Current interview state.
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
Result containing updated state or ValidationError.
|
|
486
|
+
"""
|
|
487
|
+
if state.status == InterviewStatus.COMPLETED:
|
|
488
|
+
return Result.ok(state)
|
|
489
|
+
|
|
490
|
+
state.status = InterviewStatus.COMPLETED
|
|
491
|
+
state.mark_updated()
|
|
492
|
+
|
|
493
|
+
log.info(
|
|
494
|
+
"interview.completed",
|
|
495
|
+
interview_id=state.interview_id,
|
|
496
|
+
total_rounds=len(state.rounds),
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
return Result.ok(state)
|
|
500
|
+
|
|
501
|
+
async def list_interviews(self) -> list[dict[str, Any]]:
|
|
502
|
+
"""List all interview sessions in the state directory.
|
|
503
|
+
|
|
504
|
+
Returns:
|
|
505
|
+
List of interview metadata dictionaries.
|
|
506
|
+
"""
|
|
507
|
+
interviews = []
|
|
508
|
+
|
|
509
|
+
for file_path in self.state_dir.glob("interview_*.json"):
|
|
510
|
+
try:
|
|
511
|
+
content = file_path.read_text(encoding="utf-8")
|
|
512
|
+
state = InterviewState.model_validate_json(content)
|
|
513
|
+
interviews.append(
|
|
514
|
+
{
|
|
515
|
+
"interview_id": state.interview_id,
|
|
516
|
+
"status": state.status,
|
|
517
|
+
"rounds": len(state.rounds),
|
|
518
|
+
"created_at": state.created_at,
|
|
519
|
+
"updated_at": state.updated_at,
|
|
520
|
+
}
|
|
521
|
+
)
|
|
522
|
+
except (OSError, ValueError) as e:
|
|
523
|
+
log.warning(
|
|
524
|
+
"interview.list_failed_for_file",
|
|
525
|
+
file_path=str(file_path),
|
|
526
|
+
error=str(e),
|
|
527
|
+
)
|
|
528
|
+
continue
|
|
529
|
+
|
|
530
|
+
return sorted(interviews, key=lambda x: x["updated_at"], reverse=True)
|