cadence-skill-installer 0.2.44 → 0.2.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -11,7 +11,14 @@ RESEARCH_SCHEMA_VERSION = 1
|
|
|
11
11
|
PRIORITY_LEVELS = {"low", "medium", "high"}
|
|
12
12
|
RESEARCH_EXECUTION_SCHEMA_VERSION = 1
|
|
13
13
|
RESEARCH_EXECUTION_STATUSES = {"pending", "in_progress", "complete"}
|
|
14
|
-
RESEARCH_TOPIC_STATUSES = {
|
|
14
|
+
RESEARCH_TOPIC_STATUSES = {
|
|
15
|
+
"pending",
|
|
16
|
+
"in_progress",
|
|
17
|
+
"needs_followup",
|
|
18
|
+
"complete",
|
|
19
|
+
"complete_with_caveats",
|
|
20
|
+
}
|
|
21
|
+
RESEARCH_TOPIC_COMPLETE_STATUSES = {"complete", "complete_with_caveats"}
|
|
15
22
|
DEFAULT_RESEARCH_HANDOFF_MESSAGE = 'Start a new chat and say "continue research".'
|
|
16
23
|
|
|
17
24
|
|
|
@@ -84,11 +91,14 @@ def default_research_execution() -> dict[str, Any]:
|
|
|
84
91
|
"planning": {
|
|
85
92
|
"target_effort_per_pass": 12,
|
|
86
93
|
"max_topics_per_pass": 4,
|
|
94
|
+
"max_passes_per_topic": 3,
|
|
95
|
+
"max_total_passes": 120,
|
|
87
96
|
"latest_round": 0,
|
|
88
97
|
},
|
|
89
98
|
"summary": {
|
|
90
99
|
"topic_total": 0,
|
|
91
100
|
"topic_complete": 0,
|
|
101
|
+
"topic_caveated": 0,
|
|
92
102
|
"topic_needs_followup": 0,
|
|
93
103
|
"topic_pending": 0,
|
|
94
104
|
"pass_pending": 0,
|
|
@@ -181,6 +191,20 @@ def _normalize_research_execution(agenda: dict[str, Any], raw_execution: Any) ->
|
|
|
181
191
|
if max_topics < 1:
|
|
182
192
|
max_topics = 1
|
|
183
193
|
|
|
194
|
+
try:
|
|
195
|
+
max_passes_per_topic = int(planning.get("max_passes_per_topic", 3))
|
|
196
|
+
except (TypeError, ValueError):
|
|
197
|
+
max_passes_per_topic = 3
|
|
198
|
+
if max_passes_per_topic < 1:
|
|
199
|
+
max_passes_per_topic = 1
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
max_total_passes = int(planning.get("max_total_passes", 120))
|
|
203
|
+
except (TypeError, ValueError):
|
|
204
|
+
max_total_passes = 120
|
|
205
|
+
if max_total_passes < 1:
|
|
206
|
+
max_total_passes = 1
|
|
207
|
+
|
|
184
208
|
try:
|
|
185
209
|
latest_round = int(planning.get("latest_round", 0))
|
|
186
210
|
except (TypeError, ValueError):
|
|
@@ -191,6 +215,8 @@ def _normalize_research_execution(agenda: dict[str, Any], raw_execution: Any) ->
|
|
|
191
215
|
normalized["planning"] = {
|
|
192
216
|
"target_effort_per_pass": target_effort,
|
|
193
217
|
"max_topics_per_pass": max_topics,
|
|
218
|
+
"max_passes_per_topic": max_passes_per_topic,
|
|
219
|
+
"max_total_passes": max_total_passes,
|
|
194
220
|
"latest_round": latest_round,
|
|
195
221
|
}
|
|
196
222
|
|
|
@@ -305,7 +331,10 @@ def _normalize_research_execution(agenda: dict[str, Any], raw_execution: Any) ->
|
|
|
305
331
|
normalized["source_registry"] = source_registry
|
|
306
332
|
|
|
307
333
|
total_topics = len(topic_status)
|
|
308
|
-
topic_complete = len(
|
|
334
|
+
topic_complete = len(
|
|
335
|
+
[entry for entry in topic_status.values() if entry.get("status") in RESEARCH_TOPIC_COMPLETE_STATUSES]
|
|
336
|
+
)
|
|
337
|
+
topic_caveated = len([entry for entry in topic_status.values() if entry.get("status") == "complete_with_caveats"])
|
|
309
338
|
topic_needs_followup = len(
|
|
310
339
|
[entry for entry in topic_status.values() if entry.get("status") == "needs_followup"]
|
|
311
340
|
)
|
|
@@ -331,6 +360,7 @@ def _normalize_research_execution(agenda: dict[str, Any], raw_execution: Any) ->
|
|
|
331
360
|
normalized["summary"] = {
|
|
332
361
|
"topic_total": total_topics,
|
|
333
362
|
"topic_complete": topic_complete,
|
|
363
|
+
"topic_caveated": topic_caveated,
|
|
334
364
|
"topic_needs_followup": topic_needs_followup,
|
|
335
365
|
"topic_pending": max(topic_pending, 0),
|
|
336
366
|
"pass_pending": len(pass_queue),
|
|
@@ -405,6 +435,7 @@ def reset_research_execution(ideation: Any) -> dict[str, Any]:
|
|
|
405
435
|
execution["summary"] = {
|
|
406
436
|
"topic_total": len(topic_index),
|
|
407
437
|
"topic_complete": 0,
|
|
438
|
+
"topic_caveated": 0,
|
|
408
439
|
"topic_needs_followup": 0,
|
|
409
440
|
"topic_pending": len(topic_index),
|
|
410
441
|
"pass_pending": 0,
|
|
@@ -23,8 +23,10 @@ from workflow_state import reconcile_workflow_state
|
|
|
23
23
|
SCRIPT_DIR = Path(__file__).resolve().parent
|
|
24
24
|
ROUTE_GUARD_SCRIPT = SCRIPT_DIR / "assert-workflow-route.py"
|
|
25
25
|
CADENCE_JSON_REL = Path(".cadence") / "cadence.json"
|
|
26
|
-
PASS_RESULT_TOPIC_STATUSES = {"complete", "needs_followup"}
|
|
26
|
+
PASS_RESULT_TOPIC_STATUSES = {"complete", "complete_with_caveats", "needs_followup"}
|
|
27
27
|
PASS_RESULT_CONFIDENCE = {"low", "medium", "high"}
|
|
28
|
+
TOPIC_COMPLETE_STATUSES = {"complete", "complete_with_caveats"}
|
|
29
|
+
TOPIC_ACTIVE_STATUSES = {"pending", "in_progress", "needs_followup"}
|
|
28
30
|
|
|
29
31
|
|
|
30
32
|
def run_command(command: list[str]) -> subprocess.CompletedProcess[str]:
|
|
@@ -76,6 +78,33 @@ def coerce_string_list(value: Any) -> list[str]:
|
|
|
76
78
|
return items
|
|
77
79
|
|
|
78
80
|
|
|
81
|
+
def coerce_positive_int(value: Any, default: int, *, minimum: int = 1) -> int:
|
|
82
|
+
try:
|
|
83
|
+
number = int(value)
|
|
84
|
+
except (TypeError, ValueError):
|
|
85
|
+
number = default
|
|
86
|
+
if number < minimum:
|
|
87
|
+
return minimum
|
|
88
|
+
return number
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def planning_config(execution: dict[str, Any]) -> dict[str, int]:
|
|
92
|
+
planning = execution.get("planning")
|
|
93
|
+
if not isinstance(planning, dict):
|
|
94
|
+
planning = {}
|
|
95
|
+
execution["planning"] = planning
|
|
96
|
+
|
|
97
|
+
config = {
|
|
98
|
+
"target_effort_per_pass": coerce_positive_int(planning.get("target_effort_per_pass", 12), 12),
|
|
99
|
+
"max_topics_per_pass": coerce_positive_int(planning.get("max_topics_per_pass", 4), 4),
|
|
100
|
+
"max_passes_per_topic": coerce_positive_int(planning.get("max_passes_per_topic", 3), 3),
|
|
101
|
+
"max_total_passes": coerce_positive_int(planning.get("max_total_passes", 120), 120),
|
|
102
|
+
"latest_round": coerce_positive_int(planning.get("latest_round", 0), 0, minimum=0),
|
|
103
|
+
}
|
|
104
|
+
planning.update(config)
|
|
105
|
+
return config
|
|
106
|
+
|
|
107
|
+
|
|
79
108
|
def parse_args() -> argparse.Namespace:
|
|
80
109
|
parser = argparse.ArgumentParser(
|
|
81
110
|
description="Plan and persist Cadence ideation research passes.",
|
|
@@ -227,7 +256,7 @@ def unresolved_topics(execution: dict[str, Any]) -> list[str]:
|
|
|
227
256
|
[
|
|
228
257
|
topic_id
|
|
229
258
|
for topic_id, entry in topic_status.items()
|
|
230
|
-
if isinstance(entry, dict) and coerce_string(entry.get("status"), "pending")
|
|
259
|
+
if isinstance(entry, dict) and coerce_string(entry.get("status"), "pending") in TOPIC_ACTIVE_STATUSES
|
|
231
260
|
]
|
|
232
261
|
)
|
|
233
262
|
|
|
@@ -236,19 +265,22 @@ def sort_topics_for_planning(topic_ids: list[str], topic_map: dict[str, dict[str
|
|
|
236
265
|
status_map = execution.get("topic_status")
|
|
237
266
|
status_map = status_map if isinstance(status_map, dict) else {}
|
|
238
267
|
|
|
239
|
-
status_rank = {"
|
|
268
|
+
status_rank = {"pending": 0, "in_progress": 1, "needs_followup": 2}
|
|
240
269
|
priority_rank = {"high": 0, "medium": 1, "low": 2}
|
|
241
270
|
|
|
242
|
-
def sort_key(topic_id: str) -> tuple[int, int, int, str]:
|
|
271
|
+
def sort_key(topic_id: str) -> tuple[int, int, int, int, str]:
|
|
243
272
|
status = "pending"
|
|
273
|
+
passes_attempted = 0
|
|
244
274
|
entry = status_map.get(topic_id)
|
|
245
275
|
if isinstance(entry, dict):
|
|
246
276
|
status = coerce_string(entry.get("status"), "pending").lower()
|
|
277
|
+
passes_attempted = coerce_positive_int(entry.get("passes_attempted", 0), 0, minimum=0)
|
|
247
278
|
|
|
248
279
|
topic = topic_map.get(topic_id, {})
|
|
249
280
|
priority = coerce_string(topic.get("priority"), "medium").lower()
|
|
250
281
|
return (
|
|
251
282
|
status_rank.get(status, 2),
|
|
283
|
+
passes_attempted,
|
|
252
284
|
priority_rank.get(priority, 1),
|
|
253
285
|
-topic_effort(topic),
|
|
254
286
|
topic_id,
|
|
@@ -258,15 +290,24 @@ def sort_topics_for_planning(topic_ids: list[str], topic_map: dict[str, dict[str
|
|
|
258
290
|
|
|
259
291
|
|
|
260
292
|
def latest_round(execution: dict[str, Any]) -> int:
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
293
|
+
config = planning_config(execution)
|
|
294
|
+
round_candidates = [config.get("latest_round", 0)]
|
|
295
|
+
|
|
296
|
+
queue = execution.get("pass_queue")
|
|
297
|
+
if isinstance(queue, list):
|
|
298
|
+
for entry in queue:
|
|
299
|
+
if not isinstance(entry, dict):
|
|
300
|
+
continue
|
|
301
|
+
round_candidates.append(coerce_positive_int(entry.get("round", 0), 0, minimum=0))
|
|
302
|
+
|
|
303
|
+
history = execution.get("pass_history")
|
|
304
|
+
if isinstance(history, list):
|
|
305
|
+
for entry in history:
|
|
306
|
+
if not isinstance(entry, dict):
|
|
307
|
+
continue
|
|
308
|
+
round_candidates.append(coerce_positive_int(entry.get("round", 0), 0, minimum=0))
|
|
309
|
+
|
|
310
|
+
return max(round_candidates)
|
|
270
311
|
|
|
271
312
|
|
|
272
313
|
def rebuild_pass_queue(execution: dict[str, Any], topic_map: dict[str, dict[str, Any]], timestamp: str) -> None:
|
|
@@ -275,29 +316,11 @@ def rebuild_pass_queue(execution: dict[str, Any], topic_map: dict[str, dict[str,
|
|
|
275
316
|
execution["pass_queue"] = []
|
|
276
317
|
return
|
|
277
318
|
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
execution["planning"] = planning
|
|
282
|
-
|
|
283
|
-
try:
|
|
284
|
-
target_effort = int(planning.get("target_effort_per_pass", 12))
|
|
285
|
-
except (TypeError, ValueError):
|
|
286
|
-
target_effort = 12
|
|
287
|
-
if target_effort < 1:
|
|
288
|
-
target_effort = 1
|
|
289
|
-
|
|
290
|
-
try:
|
|
291
|
-
max_topics = int(planning.get("max_topics_per_pass", 4))
|
|
292
|
-
except (TypeError, ValueError):
|
|
293
|
-
max_topics = 4
|
|
294
|
-
if max_topics < 1:
|
|
295
|
-
max_topics = 1
|
|
296
|
-
|
|
319
|
+
config = planning_config(execution)
|
|
320
|
+
target_effort = config["target_effort_per_pass"]
|
|
321
|
+
max_topics = config["max_topics_per_pass"]
|
|
297
322
|
round_number = latest_round(execution) + 1
|
|
298
|
-
planning["latest_round"] = round_number
|
|
299
|
-
planning["target_effort_per_pass"] = target_effort
|
|
300
|
-
planning["max_topics_per_pass"] = max_topics
|
|
323
|
+
execution["planning"]["latest_round"] = round_number
|
|
301
324
|
|
|
302
325
|
ordered_topics = sort_topics_for_planning(topic_ids, topic_map, execution)
|
|
303
326
|
queue: list[dict[str, Any]] = []
|
|
@@ -351,6 +374,147 @@ def rebuild_pass_queue(execution: dict[str, Any], topic_map: dict[str, dict[str,
|
|
|
351
374
|
execution["pass_queue"] = queue
|
|
352
375
|
|
|
353
376
|
|
|
377
|
+
def append_unique_note(items: list[str], note: str) -> list[str]:
|
|
378
|
+
normalized = coerce_string_list(items)
|
|
379
|
+
text = coerce_string(note)
|
|
380
|
+
if text and text not in normalized:
|
|
381
|
+
normalized.append(text)
|
|
382
|
+
return normalized
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def mark_topic_complete_with_caveats(
|
|
386
|
+
entry: dict[str, Any],
|
|
387
|
+
*,
|
|
388
|
+
topic_id: str,
|
|
389
|
+
topic_map: dict[str, dict[str, Any]],
|
|
390
|
+
timestamp: str,
|
|
391
|
+
note: str,
|
|
392
|
+
) -> None:
|
|
393
|
+
summary = coerce_string(entry.get("latest_summary"))
|
|
394
|
+
if not summary:
|
|
395
|
+
title = coerce_string(topic_map.get(topic_id, {}).get("title"), topic_id)
|
|
396
|
+
summary = f"Accepted with caveats after bounded research for {title}."
|
|
397
|
+
|
|
398
|
+
entry["status"] = "complete_with_caveats"
|
|
399
|
+
entry["latest_summary"] = summary
|
|
400
|
+
entry["unresolved_questions"] = append_unique_note(entry.get("unresolved_questions", []), note)
|
|
401
|
+
entry["updated_at"] = timestamp
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def enforce_topic_retry_limits(execution: dict[str, Any], topic_map: dict[str, dict[str, Any]], timestamp: str) -> list[str]:
|
|
405
|
+
topic_status = execution.get("topic_status")
|
|
406
|
+
if not isinstance(topic_status, dict):
|
|
407
|
+
return []
|
|
408
|
+
|
|
409
|
+
config = planning_config(execution)
|
|
410
|
+
max_passes_per_topic = config["max_passes_per_topic"]
|
|
411
|
+
capped_topic_ids: list[str] = []
|
|
412
|
+
note = (
|
|
413
|
+
f"Pass cap reached ({max_passes_per_topic}); accepting current findings with caveats."
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
for topic_id, raw_entry in topic_status.items():
|
|
417
|
+
if not isinstance(raw_entry, dict):
|
|
418
|
+
continue
|
|
419
|
+
|
|
420
|
+
status = coerce_string(raw_entry.get("status"), "pending")
|
|
421
|
+
if status not in TOPIC_ACTIVE_STATUSES:
|
|
422
|
+
continue
|
|
423
|
+
|
|
424
|
+
passes_attempted = coerce_positive_int(raw_entry.get("passes_attempted", 0), 0, minimum=0)
|
|
425
|
+
if passes_attempted < max_passes_per_topic:
|
|
426
|
+
continue
|
|
427
|
+
|
|
428
|
+
mark_topic_complete_with_caveats(
|
|
429
|
+
raw_entry,
|
|
430
|
+
topic_id=topic_id,
|
|
431
|
+
topic_map=topic_map,
|
|
432
|
+
timestamp=timestamp,
|
|
433
|
+
note=note,
|
|
434
|
+
)
|
|
435
|
+
capped_topic_ids.append(topic_id)
|
|
436
|
+
|
|
437
|
+
execution["topic_status"] = topic_status
|
|
438
|
+
return sorted(set(capped_topic_ids))
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def enforce_total_pass_limit(execution: dict[str, Any], topic_map: dict[str, dict[str, Any]], timestamp: str) -> list[str]:
|
|
442
|
+
history = execution.get("pass_history")
|
|
443
|
+
history = history if isinstance(history, list) else []
|
|
444
|
+
config = planning_config(execution)
|
|
445
|
+
max_total_passes = config["max_total_passes"]
|
|
446
|
+
if len(history) < max_total_passes:
|
|
447
|
+
return []
|
|
448
|
+
|
|
449
|
+
topic_status = execution.get("topic_status")
|
|
450
|
+
if not isinstance(topic_status, dict):
|
|
451
|
+
return []
|
|
452
|
+
|
|
453
|
+
capped_topic_ids: list[str] = []
|
|
454
|
+
note = (
|
|
455
|
+
f"Total pass cap reached ({max_total_passes}); accepting current findings with caveats."
|
|
456
|
+
)
|
|
457
|
+
for topic_id, raw_entry in topic_status.items():
|
|
458
|
+
if not isinstance(raw_entry, dict):
|
|
459
|
+
continue
|
|
460
|
+
status = coerce_string(raw_entry.get("status"), "pending")
|
|
461
|
+
if status not in TOPIC_ACTIVE_STATUSES:
|
|
462
|
+
continue
|
|
463
|
+
|
|
464
|
+
mark_topic_complete_with_caveats(
|
|
465
|
+
raw_entry,
|
|
466
|
+
topic_id=topic_id,
|
|
467
|
+
topic_map=topic_map,
|
|
468
|
+
timestamp=timestamp,
|
|
469
|
+
note=note,
|
|
470
|
+
)
|
|
471
|
+
capped_topic_ids.append(topic_id)
|
|
472
|
+
|
|
473
|
+
if capped_topic_ids:
|
|
474
|
+
execution["pass_queue"] = []
|
|
475
|
+
execution["topic_status"] = topic_status
|
|
476
|
+
return sorted(set(capped_topic_ids))
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
def prune_pass_queue(execution: dict[str, Any]) -> None:
|
|
480
|
+
queue = execution.get("pass_queue")
|
|
481
|
+
if not isinstance(queue, list):
|
|
482
|
+
execution["pass_queue"] = []
|
|
483
|
+
return
|
|
484
|
+
|
|
485
|
+
topic_status = execution.get("topic_status")
|
|
486
|
+
topic_status = topic_status if isinstance(topic_status, dict) else {}
|
|
487
|
+
active_topic_ids = {
|
|
488
|
+
topic_id
|
|
489
|
+
for topic_id, entry in topic_status.items()
|
|
490
|
+
if isinstance(entry, dict) and coerce_string(entry.get("status"), "pending") in TOPIC_ACTIVE_STATUSES
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
filtered_queue: list[dict[str, Any]] = []
|
|
494
|
+
for entry in queue:
|
|
495
|
+
if not isinstance(entry, dict):
|
|
496
|
+
continue
|
|
497
|
+
topic_ids = [topic_id for topic_id in coerce_string_list(entry.get("topic_ids")) if topic_id in active_topic_ids]
|
|
498
|
+
if not topic_ids:
|
|
499
|
+
continue
|
|
500
|
+
status = coerce_string(entry.get("status"), "pending")
|
|
501
|
+
if status not in {"pending", "in_progress"}:
|
|
502
|
+
status = "pending"
|
|
503
|
+
filtered_queue.append(
|
|
504
|
+
{
|
|
505
|
+
"pass_id": coerce_string(entry.get("pass_id")),
|
|
506
|
+
"round": coerce_positive_int(entry.get("round", 0), 0, minimum=0),
|
|
507
|
+
"status": status,
|
|
508
|
+
"topic_ids": topic_ids,
|
|
509
|
+
"planned_effort": coerce_positive_int(entry.get("planned_effort", 0), 0, minimum=0),
|
|
510
|
+
"created_at": coerce_string(entry.get("created_at")),
|
|
511
|
+
"started_at": coerce_string(entry.get("started_at")),
|
|
512
|
+
}
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
execution["pass_queue"] = filtered_queue
|
|
516
|
+
|
|
517
|
+
|
|
354
518
|
def recompute_execution_summary(execution: dict[str, Any]) -> None:
|
|
355
519
|
topic_status = execution.get("topic_status")
|
|
356
520
|
topic_status = topic_status if isinstance(topic_status, dict) else {}
|
|
@@ -360,7 +524,20 @@ def recompute_execution_summary(execution: dict[str, Any]) -> None:
|
|
|
360
524
|
history = history if isinstance(history, list) else []
|
|
361
525
|
|
|
362
526
|
total = len(topic_status)
|
|
363
|
-
complete = len(
|
|
527
|
+
complete = len(
|
|
528
|
+
[
|
|
529
|
+
entry
|
|
530
|
+
for entry in topic_status.values()
|
|
531
|
+
if isinstance(entry, dict) and coerce_string(entry.get("status"), "pending") in TOPIC_COMPLETE_STATUSES
|
|
532
|
+
]
|
|
533
|
+
)
|
|
534
|
+
caveated = len(
|
|
535
|
+
[
|
|
536
|
+
entry
|
|
537
|
+
for entry in topic_status.values()
|
|
538
|
+
if isinstance(entry, dict) and coerce_string(entry.get("status"), "pending") == "complete_with_caveats"
|
|
539
|
+
]
|
|
540
|
+
)
|
|
364
541
|
followup = len(
|
|
365
542
|
[
|
|
366
543
|
entry
|
|
@@ -408,6 +585,7 @@ def recompute_execution_summary(execution: dict[str, Any]) -> None:
|
|
|
408
585
|
execution["summary"] = {
|
|
409
586
|
"topic_total": total,
|
|
410
587
|
"topic_complete": complete,
|
|
588
|
+
"topic_caveated": caveated,
|
|
411
589
|
"topic_needs_followup": followup,
|
|
412
590
|
"topic_pending": pending,
|
|
413
591
|
"pass_pending": len(queue),
|
|
@@ -643,6 +821,10 @@ def handle_start(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
643
821
|
)
|
|
644
822
|
return 2
|
|
645
823
|
|
|
824
|
+
enforce_topic_retry_limits(execution, topic_map, timestamp)
|
|
825
|
+
enforce_total_pass_limit(execution, topic_map, timestamp)
|
|
826
|
+
prune_pass_queue(execution)
|
|
827
|
+
|
|
646
828
|
queue = execution.get("pass_queue")
|
|
647
829
|
queue = queue if isinstance(queue, list) else []
|
|
648
830
|
current_pass = next(
|
|
@@ -655,8 +837,9 @@ def handle_start(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
655
837
|
)
|
|
656
838
|
|
|
657
839
|
if not isinstance(current_pass, dict):
|
|
658
|
-
if not queue:
|
|
840
|
+
if not queue and unresolved_topics(execution):
|
|
659
841
|
rebuild_pass_queue(execution, topic_map, timestamp)
|
|
842
|
+
prune_pass_queue(execution)
|
|
660
843
|
queue = execution.get("pass_queue", [])
|
|
661
844
|
current_pass = next(
|
|
662
845
|
(
|
|
@@ -695,7 +878,7 @@ def handle_start(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
695
878
|
entry = topic_status.get(topic_id)
|
|
696
879
|
if not isinstance(entry, dict):
|
|
697
880
|
continue
|
|
698
|
-
if coerce_string(entry.get("status"), "pending")
|
|
881
|
+
if coerce_string(entry.get("status"), "pending") not in TOPIC_COMPLETE_STATUSES:
|
|
699
882
|
if entry.get("status") != "needs_followup":
|
|
700
883
|
entry["status"] = "in_progress"
|
|
701
884
|
entry["last_pass_id"] = pass_id
|
|
@@ -810,7 +993,7 @@ def handle_complete(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
810
993
|
|
|
811
994
|
unresolved = coerce_string_list(result.get("unresolved_questions"))
|
|
812
995
|
if status == "complete" and unresolved:
|
|
813
|
-
status = "
|
|
996
|
+
status = "complete_with_caveats"
|
|
814
997
|
|
|
815
998
|
summary = coerce_string(result.get("summary"))
|
|
816
999
|
confidence = coerce_string(result.get("confidence"), "medium").lower()
|
|
@@ -841,12 +1024,7 @@ def handle_complete(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
841
1024
|
}
|
|
842
1025
|
topic_status[topic_id] = entry
|
|
843
1026
|
|
|
844
|
-
|
|
845
|
-
passes_attempted = int(entry.get("passes_attempted", 0))
|
|
846
|
-
except (TypeError, ValueError):
|
|
847
|
-
passes_attempted = 0
|
|
848
|
-
if passes_attempted < 0:
|
|
849
|
-
passes_attempted = 0
|
|
1027
|
+
passes_attempted = coerce_positive_int(entry.get("passes_attempted", 0), 0, minimum=0)
|
|
850
1028
|
entry["passes_attempted"] = passes_attempted + 1
|
|
851
1029
|
entry["status"] = status
|
|
852
1030
|
entry["last_pass_id"] = pass_id
|
|
@@ -894,13 +1072,17 @@ def handle_complete(project_root: Path, args: argparse.Namespace) -> int:
|
|
|
894
1072
|
)
|
|
895
1073
|
execution["pass_history"] = history
|
|
896
1074
|
|
|
897
|
-
# Remove the completed in-progress pass
|
|
1075
|
+
# Remove the completed in-progress pass. Re-plan only at round boundaries.
|
|
898
1076
|
execution["pass_queue"] = [
|
|
899
1077
|
entry
|
|
900
1078
|
for entry in queue
|
|
901
1079
|
if isinstance(entry, dict) and coerce_string(entry.get("pass_id")) != pass_id
|
|
902
1080
|
]
|
|
903
|
-
|
|
1081
|
+
enforce_topic_retry_limits(execution, topic_map, timestamp)
|
|
1082
|
+
enforce_total_pass_limit(execution, topic_map, timestamp)
|
|
1083
|
+
prune_pass_queue(execution)
|
|
1084
|
+
if not execution.get("pass_queue") and unresolved_topics(execution):
|
|
1085
|
+
rebuild_pass_queue(execution, topic_map, timestamp)
|
|
904
1086
|
|
|
905
1087
|
recompute_execution_summary(execution)
|
|
906
1088
|
summary = execution.get("summary", {})
|
|
@@ -26,7 +26,7 @@ description: Execute ideation research agenda topics through dynamic multi-pass
|
|
|
26
26
|
- `topics` (array)
|
|
27
27
|
- Each `topics[]` item must include:
|
|
28
28
|
- `topic_id` (string, must be in current pass topic_ids)
|
|
29
|
-
- `status` (`complete` or `needs_followup`)
|
|
29
|
+
- `status` (`complete`, `complete_with_caveats`, or `needs_followup`)
|
|
30
30
|
- `summary` (string)
|
|
31
31
|
- `confidence` (`low|medium|high`)
|
|
32
32
|
- `unresolved_questions` (array of strings)
|
|
@@ -7,7 +7,11 @@ SCRIPTS_DIR = Path(__file__).resolve().parents[1] / "scripts"
|
|
|
7
7
|
if str(SCRIPTS_DIR) not in sys.path:
|
|
8
8
|
sys.path.insert(0, str(SCRIPTS_DIR))
|
|
9
9
|
|
|
10
|
-
from ideation_research import
|
|
10
|
+
from ideation_research import (
|
|
11
|
+
ResearchAgendaValidationError,
|
|
12
|
+
ensure_ideation_research_defaults,
|
|
13
|
+
normalize_ideation_research,
|
|
14
|
+
)
|
|
11
15
|
|
|
12
16
|
|
|
13
17
|
def base_payload() -> dict:
|
|
@@ -141,6 +145,41 @@ class IdeationResearchNormalizationTests(unittest.TestCase):
|
|
|
141
145
|
|
|
142
146
|
self.assertEqual(block_b_topic["related_entities"], ["entity-marketplace-fees"])
|
|
143
147
|
|
|
148
|
+
def test_execution_normalization_supports_caveated_completion_and_planning_caps(self) -> None:
|
|
149
|
+
payload = base_payload()
|
|
150
|
+
payload["research_execution"] = {
|
|
151
|
+
"planning": {
|
|
152
|
+
"target_effort_per_pass": 10,
|
|
153
|
+
"max_topics_per_pass": 2,
|
|
154
|
+
"max_passes_per_topic": 0,
|
|
155
|
+
"max_total_passes": -1,
|
|
156
|
+
"latest_round": 4,
|
|
157
|
+
},
|
|
158
|
+
"topic_status": {
|
|
159
|
+
"topic-a1": {
|
|
160
|
+
"status": "complete_with_caveats",
|
|
161
|
+
"passes_attempted": 3,
|
|
162
|
+
"latest_summary": "Accepted with caveats.",
|
|
163
|
+
},
|
|
164
|
+
"topic-b1": {
|
|
165
|
+
"status": "pending",
|
|
166
|
+
"passes_attempted": 0,
|
|
167
|
+
},
|
|
168
|
+
},
|
|
169
|
+
"pass_queue": [],
|
|
170
|
+
"pass_history": [],
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
normalized_agenda = normalize_ideation_research(copy.deepcopy(payload), require_topics=True)
|
|
174
|
+
normalized = ensure_ideation_research_defaults(normalized_agenda)
|
|
175
|
+
execution = normalized["research_execution"]
|
|
176
|
+
|
|
177
|
+
self.assertEqual(execution["topic_status"]["topic-a1"]["status"], "complete_with_caveats")
|
|
178
|
+
self.assertEqual(execution["summary"]["topic_complete"], 1)
|
|
179
|
+
self.assertEqual(execution["summary"]["topic_caveated"], 1)
|
|
180
|
+
self.assertEqual(execution["planning"]["max_passes_per_topic"], 1)
|
|
181
|
+
self.assertEqual(execution["planning"]["max_total_passes"], 1)
|
|
182
|
+
|
|
144
183
|
|
|
145
184
|
if __name__ == "__main__":
|
|
146
185
|
unittest.main()
|
|
@@ -4,6 +4,7 @@ import sys
|
|
|
4
4
|
import tempfile
|
|
5
5
|
import unittest
|
|
6
6
|
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
7
8
|
|
|
8
9
|
SCRIPTS_DIR = Path(__file__).resolve().parents[1] / "scripts"
|
|
9
10
|
if str(SCRIPTS_DIR) not in sys.path:
|
|
@@ -15,7 +16,13 @@ from workflow_state import default_data, reconcile_workflow_state, set_workflow_
|
|
|
15
16
|
RUN_RESEARCH_PASS_SCRIPT = SCRIPTS_DIR / "run-research-pass.py"
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
def build_cadence_state(
|
|
19
|
+
def build_cadence_state(
|
|
20
|
+
*,
|
|
21
|
+
pass_queue: list[dict[str, Any]] | None = None,
|
|
22
|
+
planning_overrides: dict[str, Any] | None = None,
|
|
23
|
+
topic_status_overrides: dict[str, dict[str, Any]] | None = None,
|
|
24
|
+
pass_history: list[dict[str, Any]] | None = None,
|
|
25
|
+
) -> dict[str, Any]:
|
|
19
26
|
data = default_data()
|
|
20
27
|
data.setdefault("state", {})["project-mode"] = "greenfield"
|
|
21
28
|
for task_id in (
|
|
@@ -76,17 +83,32 @@ def build_cadence_state() -> dict:
|
|
|
76
83
|
ideation = reset_research_execution(ideation)
|
|
77
84
|
|
|
78
85
|
execution = ideation.get("research_execution", {})
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
86
|
+
if pass_queue is None:
|
|
87
|
+
pass_queue = [
|
|
88
|
+
{
|
|
89
|
+
"pass_id": "pass-r1-01",
|
|
90
|
+
"round": 1,
|
|
91
|
+
"status": "in_progress",
|
|
92
|
+
"topic_ids": ["topic-one", "topic-two"],
|
|
93
|
+
"planned_effort": 4,
|
|
94
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
95
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
96
|
+
}
|
|
97
|
+
]
|
|
98
|
+
execution["pass_queue"] = pass_queue
|
|
99
|
+
if planning_overrides:
|
|
100
|
+
execution.setdefault("planning", {}).update(planning_overrides)
|
|
101
|
+
if pass_history is not None:
|
|
102
|
+
execution["pass_history"] = pass_history
|
|
103
|
+
|
|
104
|
+
topic_status = execution.get("topic_status", {})
|
|
105
|
+
if topic_status_overrides:
|
|
106
|
+
for topic_id, updates in topic_status_overrides.items():
|
|
107
|
+
existing = topic_status.get(topic_id)
|
|
108
|
+
if not isinstance(existing, dict):
|
|
109
|
+
continue
|
|
110
|
+
existing.update(updates)
|
|
111
|
+
|
|
90
112
|
execution["status"] = "in_progress"
|
|
91
113
|
data["ideation"] = ideation
|
|
92
114
|
data.setdefault("state", {})["ideation-completed"] = True
|
|
@@ -94,14 +116,54 @@ def build_cadence_state() -> dict:
|
|
|
94
116
|
return reconcile_workflow_state(data, cadence_dir_exists=True)
|
|
95
117
|
|
|
96
118
|
|
|
119
|
+
def write_state(project_root: Path, state: dict[str, Any]) -> Path:
|
|
120
|
+
cadence_dir = project_root / ".cadence"
|
|
121
|
+
cadence_dir.mkdir(parents=True, exist_ok=True)
|
|
122
|
+
cadence_json = cadence_dir / "cadence.json"
|
|
123
|
+
cadence_json.write_text(json.dumps(state, indent=4) + "\n", encoding="utf-8")
|
|
124
|
+
return cadence_json
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def run_complete(project_root: Path, *, pass_id: str, payload: dict[str, Any]) -> subprocess.CompletedProcess[str]:
|
|
128
|
+
return subprocess.run(
|
|
129
|
+
[
|
|
130
|
+
sys.executable,
|
|
131
|
+
str(RUN_RESEARCH_PASS_SCRIPT),
|
|
132
|
+
"--project-root",
|
|
133
|
+
str(project_root),
|
|
134
|
+
"complete",
|
|
135
|
+
"--pass-id",
|
|
136
|
+
pass_id,
|
|
137
|
+
"--json",
|
|
138
|
+
json.dumps(payload),
|
|
139
|
+
],
|
|
140
|
+
capture_output=True,
|
|
141
|
+
text=True,
|
|
142
|
+
check=False,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def run_start(project_root: Path) -> subprocess.CompletedProcess[str]:
|
|
147
|
+
return subprocess.run(
|
|
148
|
+
[
|
|
149
|
+
sys.executable,
|
|
150
|
+
str(RUN_RESEARCH_PASS_SCRIPT),
|
|
151
|
+
"--project-root",
|
|
152
|
+
str(project_root),
|
|
153
|
+
"start",
|
|
154
|
+
"--ack-handoff",
|
|
155
|
+
],
|
|
156
|
+
capture_output=True,
|
|
157
|
+
text=True,
|
|
158
|
+
check=False,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
|
|
97
162
|
class RunResearchPassValidationTests(unittest.TestCase):
|
|
98
163
|
def test_complete_requires_result_for_every_topic_in_pass(self) -> None:
|
|
99
164
|
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
100
165
|
project_root = Path(tmp_dir)
|
|
101
|
-
|
|
102
|
-
cadence_dir.mkdir(parents=True, exist_ok=True)
|
|
103
|
-
cadence_json = cadence_dir / "cadence.json"
|
|
104
|
-
cadence_json.write_text(json.dumps(build_cadence_state(), indent=4) + "\n", encoding="utf-8")
|
|
166
|
+
write_state(project_root, build_cadence_state())
|
|
105
167
|
|
|
106
168
|
payload = {
|
|
107
169
|
"topics": [
|
|
@@ -116,27 +178,293 @@ class RunResearchPassValidationTests(unittest.TestCase):
|
|
|
116
178
|
]
|
|
117
179
|
}
|
|
118
180
|
|
|
119
|
-
result =
|
|
120
|
-
[
|
|
121
|
-
sys.executable,
|
|
122
|
-
str(RUN_RESEARCH_PASS_SCRIPT),
|
|
123
|
-
"--project-root",
|
|
124
|
-
str(project_root),
|
|
125
|
-
"complete",
|
|
126
|
-
"--pass-id",
|
|
127
|
-
"pass-r1-01",
|
|
128
|
-
"--json",
|
|
129
|
-
json.dumps(payload),
|
|
130
|
-
],
|
|
131
|
-
capture_output=True,
|
|
132
|
-
text=True,
|
|
133
|
-
check=False,
|
|
134
|
-
)
|
|
181
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
135
182
|
|
|
136
183
|
self.assertEqual(result.returncode, 2)
|
|
137
184
|
self.assertIn("PASS_RESULT_MISSING_TOPICS", result.stderr)
|
|
138
185
|
self.assertIn("topic-two", result.stderr)
|
|
139
186
|
|
|
187
|
+
def test_does_not_replan_queue_mid_round_after_followup(self) -> None:
|
|
188
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
189
|
+
project_root = Path(tmp_dir)
|
|
190
|
+
write_state(
|
|
191
|
+
project_root,
|
|
192
|
+
build_cadence_state(
|
|
193
|
+
pass_queue=[
|
|
194
|
+
{
|
|
195
|
+
"pass_id": "pass-r1-01",
|
|
196
|
+
"round": 1,
|
|
197
|
+
"status": "in_progress",
|
|
198
|
+
"topic_ids": ["topic-one"],
|
|
199
|
+
"planned_effort": 2,
|
|
200
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
201
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
202
|
+
},
|
|
203
|
+
{
|
|
204
|
+
"pass_id": "pass-r1-02",
|
|
205
|
+
"round": 1,
|
|
206
|
+
"status": "pending",
|
|
207
|
+
"topic_ids": ["topic-two"],
|
|
208
|
+
"planned_effort": 2,
|
|
209
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
210
|
+
"started_at": "",
|
|
211
|
+
},
|
|
212
|
+
]
|
|
213
|
+
),
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
payload = {
|
|
217
|
+
"pass_summary": "Topic one needs one more pass.",
|
|
218
|
+
"topics": [
|
|
219
|
+
{
|
|
220
|
+
"topic_id": "topic-one",
|
|
221
|
+
"status": "needs_followup",
|
|
222
|
+
"summary": "Need one more clarification.",
|
|
223
|
+
"confidence": "medium",
|
|
224
|
+
"unresolved_questions": ["Open question"],
|
|
225
|
+
"sources": [{"url": "https://example.com/1"}],
|
|
226
|
+
}
|
|
227
|
+
],
|
|
228
|
+
}
|
|
229
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
230
|
+
|
|
231
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
232
|
+
output = json.loads(result.stdout)
|
|
233
|
+
self.assertEqual(output["summary"]["next_pass_id"], "pass-r1-02")
|
|
234
|
+
|
|
235
|
+
state = json.loads((project_root / ".cadence" / "cadence.json").read_text(encoding="utf-8"))
|
|
236
|
+
queue = state["ideation"]["research_execution"]["pass_queue"]
|
|
237
|
+
self.assertEqual(len(queue), 1)
|
|
238
|
+
self.assertEqual(queue[0]["pass_id"], "pass-r1-02")
|
|
239
|
+
self.assertEqual(queue[0]["topic_ids"], ["topic-two"])
|
|
240
|
+
|
|
241
|
+
def test_replans_when_round_queue_drains(self) -> None:
|
|
242
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
243
|
+
project_root = Path(tmp_dir)
|
|
244
|
+
write_state(
|
|
245
|
+
project_root,
|
|
246
|
+
build_cadence_state(
|
|
247
|
+
pass_queue=[
|
|
248
|
+
{
|
|
249
|
+
"pass_id": "pass-r1-01",
|
|
250
|
+
"round": 1,
|
|
251
|
+
"status": "in_progress",
|
|
252
|
+
"topic_ids": ["topic-one"],
|
|
253
|
+
"planned_effort": 2,
|
|
254
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
255
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
256
|
+
}
|
|
257
|
+
],
|
|
258
|
+
topic_status_overrides={"topic-two": {"status": "complete"}},
|
|
259
|
+
),
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
payload = {
|
|
263
|
+
"pass_summary": "Topic one still needs follow-up.",
|
|
264
|
+
"topics": [
|
|
265
|
+
{
|
|
266
|
+
"topic_id": "topic-one",
|
|
267
|
+
"status": "needs_followup",
|
|
268
|
+
"summary": "Need one more clarification.",
|
|
269
|
+
"confidence": "medium",
|
|
270
|
+
"unresolved_questions": ["Open question"],
|
|
271
|
+
"sources": [{"url": "https://example.com/2"}],
|
|
272
|
+
}
|
|
273
|
+
],
|
|
274
|
+
}
|
|
275
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
276
|
+
|
|
277
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
278
|
+
output = json.loads(result.stdout)
|
|
279
|
+
self.assertEqual(output["summary"]["next_pass_id"], "pass-r2-01")
|
|
280
|
+
|
|
281
|
+
state = json.loads((project_root / ".cadence" / "cadence.json").read_text(encoding="utf-8"))
|
|
282
|
+
queue = state["ideation"]["research_execution"]["pass_queue"]
|
|
283
|
+
self.assertEqual(len(queue), 1)
|
|
284
|
+
self.assertEqual(queue[0]["pass_id"], "pass-r2-01")
|
|
285
|
+
self.assertEqual(queue[0]["topic_ids"], ["topic-one"])
|
|
286
|
+
|
|
287
|
+
def test_complete_with_unresolved_questions_becomes_complete_with_caveats(self) -> None:
|
|
288
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
289
|
+
project_root = Path(tmp_dir)
|
|
290
|
+
write_state(
|
|
291
|
+
project_root,
|
|
292
|
+
build_cadence_state(
|
|
293
|
+
pass_queue=[
|
|
294
|
+
{
|
|
295
|
+
"pass_id": "pass-r1-01",
|
|
296
|
+
"round": 1,
|
|
297
|
+
"status": "in_progress",
|
|
298
|
+
"topic_ids": ["topic-one"],
|
|
299
|
+
"planned_effort": 2,
|
|
300
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
301
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
302
|
+
}
|
|
303
|
+
],
|
|
304
|
+
topic_status_overrides={"topic-two": {"status": "complete"}},
|
|
305
|
+
),
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
payload = {
|
|
309
|
+
"pass_summary": "Topic one finalized with caveats.",
|
|
310
|
+
"topics": [
|
|
311
|
+
{
|
|
312
|
+
"topic_id": "topic-one",
|
|
313
|
+
"status": "complete",
|
|
314
|
+
"summary": "Primary recommendation stands.",
|
|
315
|
+
"confidence": "high",
|
|
316
|
+
"unresolved_questions": ["Need direct vendor confirmation"],
|
|
317
|
+
"sources": [{"url": "https://example.com/3"}],
|
|
318
|
+
}
|
|
319
|
+
],
|
|
320
|
+
}
|
|
321
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
322
|
+
|
|
323
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
324
|
+
output = json.loads(result.stdout)
|
|
325
|
+
self.assertTrue(output["research_complete"])
|
|
326
|
+
|
|
327
|
+
state = json.loads((project_root / ".cadence" / "cadence.json").read_text(encoding="utf-8"))
|
|
328
|
+
topic_one = state["ideation"]["research_execution"]["topic_status"]["topic-one"]
|
|
329
|
+
self.assertEqual(topic_one["status"], "complete_with_caveats")
|
|
330
|
+
self.assertTrue(state["state"]["research-completed"])
|
|
331
|
+
|
|
332
|
+
def test_topic_pass_cap_converts_followup_to_complete_with_caveats(self) -> None:
|
|
333
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
334
|
+
project_root = Path(tmp_dir)
|
|
335
|
+
write_state(
|
|
336
|
+
project_root,
|
|
337
|
+
build_cadence_state(
|
|
338
|
+
pass_queue=[
|
|
339
|
+
{
|
|
340
|
+
"pass_id": "pass-r1-01",
|
|
341
|
+
"round": 1,
|
|
342
|
+
"status": "in_progress",
|
|
343
|
+
"topic_ids": ["topic-one"],
|
|
344
|
+
"planned_effort": 2,
|
|
345
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
346
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
347
|
+
}
|
|
348
|
+
],
|
|
349
|
+
planning_overrides={"max_passes_per_topic": 2},
|
|
350
|
+
topic_status_overrides={
|
|
351
|
+
"topic-one": {"passes_attempted": 1},
|
|
352
|
+
"topic-two": {"status": "complete"},
|
|
353
|
+
},
|
|
354
|
+
),
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
payload = {
|
|
358
|
+
"pass_summary": "Reached cap on topic one.",
|
|
359
|
+
"topics": [
|
|
360
|
+
{
|
|
361
|
+
"topic_id": "topic-one",
|
|
362
|
+
"status": "needs_followup",
|
|
363
|
+
"summary": "Best available recommendation captured.",
|
|
364
|
+
"confidence": "high",
|
|
365
|
+
"unresolved_questions": ["Need commercial confirmation"],
|
|
366
|
+
"sources": [{"url": "https://example.com/4"}],
|
|
367
|
+
}
|
|
368
|
+
],
|
|
369
|
+
}
|
|
370
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
371
|
+
|
|
372
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
373
|
+
output = json.loads(result.stdout)
|
|
374
|
+
self.assertTrue(output["research_complete"])
|
|
375
|
+
|
|
376
|
+
state = json.loads((project_root / ".cadence" / "cadence.json").read_text(encoding="utf-8"))
|
|
377
|
+
topic_one = state["ideation"]["research_execution"]["topic_status"]["topic-one"]
|
|
378
|
+
self.assertEqual(topic_one["status"], "complete_with_caveats")
|
|
379
|
+
self.assertGreaterEqual(topic_one["passes_attempted"], 2)
|
|
380
|
+
self.assertEqual(state["ideation"]["research_execution"]["summary"]["pass_pending"], 0)
|
|
381
|
+
|
|
382
|
+
def test_total_pass_cap_finishes_remaining_topics(self) -> None:
|
|
383
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
384
|
+
project_root = Path(tmp_dir)
|
|
385
|
+
write_state(
|
|
386
|
+
project_root,
|
|
387
|
+
build_cadence_state(
|
|
388
|
+
pass_queue=[
|
|
389
|
+
{
|
|
390
|
+
"pass_id": "pass-r1-01",
|
|
391
|
+
"round": 1,
|
|
392
|
+
"status": "in_progress",
|
|
393
|
+
"topic_ids": ["topic-one"],
|
|
394
|
+
"planned_effort": 2,
|
|
395
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
396
|
+
"started_at": "2026-01-01T00:00:01Z",
|
|
397
|
+
},
|
|
398
|
+
{
|
|
399
|
+
"pass_id": "pass-r1-02",
|
|
400
|
+
"round": 1,
|
|
401
|
+
"status": "pending",
|
|
402
|
+
"topic_ids": ["topic-two"],
|
|
403
|
+
"planned_effort": 2,
|
|
404
|
+
"created_at": "2026-01-01T00:00:00Z",
|
|
405
|
+
"started_at": "",
|
|
406
|
+
},
|
|
407
|
+
],
|
|
408
|
+
planning_overrides={"max_total_passes": 1},
|
|
409
|
+
pass_history=[
|
|
410
|
+
{
|
|
411
|
+
"pass_id": "pass-r0-01",
|
|
412
|
+
"round": 0,
|
|
413
|
+
"completed_at": "2026-01-01T00:00:00Z",
|
|
414
|
+
"pass_summary": "Previous pass",
|
|
415
|
+
"topics": [],
|
|
416
|
+
"source_ids": [],
|
|
417
|
+
}
|
|
418
|
+
],
|
|
419
|
+
),
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
payload = {
|
|
423
|
+
"pass_summary": "Hit global cap.",
|
|
424
|
+
"topics": [
|
|
425
|
+
{
|
|
426
|
+
"topic_id": "topic-one",
|
|
427
|
+
"status": "needs_followup",
|
|
428
|
+
"summary": "Sufficient for now.",
|
|
429
|
+
"confidence": "medium",
|
|
430
|
+
"unresolved_questions": ["Could investigate more"],
|
|
431
|
+
"sources": [{"url": "https://example.com/5"}],
|
|
432
|
+
}
|
|
433
|
+
],
|
|
434
|
+
}
|
|
435
|
+
result = run_complete(project_root, pass_id="pass-r1-01", payload=payload)
|
|
436
|
+
|
|
437
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
438
|
+
output = json.loads(result.stdout)
|
|
439
|
+
self.assertTrue(output["research_complete"])
|
|
440
|
+
|
|
441
|
+
state = json.loads((project_root / ".cadence" / "cadence.json").read_text(encoding="utf-8"))
|
|
442
|
+
execution = state["ideation"]["research_execution"]
|
|
443
|
+
self.assertEqual(execution["summary"]["pass_pending"], 0)
|
|
444
|
+
self.assertEqual(execution["topic_status"]["topic-one"]["status"], "complete_with_caveats")
|
|
445
|
+
self.assertEqual(execution["topic_status"]["topic-two"]["status"], "complete_with_caveats")
|
|
446
|
+
|
|
447
|
+
def test_start_prioritizes_pending_topics_before_repeat_followups(self) -> None:
|
|
448
|
+
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
449
|
+
project_root = Path(tmp_dir)
|
|
450
|
+
write_state(
|
|
451
|
+
project_root,
|
|
452
|
+
build_cadence_state(
|
|
453
|
+
pass_queue=[],
|
|
454
|
+
planning_overrides={"max_topics_per_pass": 1, "target_effort_per_pass": 10},
|
|
455
|
+
topic_status_overrides={
|
|
456
|
+
"topic-one": {"status": "needs_followup", "passes_attempted": 5},
|
|
457
|
+
"topic-two": {"status": "pending", "passes_attempted": 0},
|
|
458
|
+
},
|
|
459
|
+
),
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
result = run_start(project_root)
|
|
463
|
+
|
|
464
|
+
self.assertEqual(result.returncode, 0, msg=result.stderr or result.stdout)
|
|
465
|
+
output = json.loads(result.stdout)
|
|
466
|
+
self.assertEqual(output["pass"]["topic_ids"], ["topic-two"])
|
|
467
|
+
|
|
140
468
|
|
|
141
469
|
if __name__ == "__main__":
|
|
142
470
|
unittest.main()
|