ouroboros-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ouroboros-ai might be problematic. Click here for more details.
- ouroboros/__init__.py +15 -0
- ouroboros/__main__.py +9 -0
- ouroboros/bigbang/__init__.py +39 -0
- ouroboros/bigbang/ambiguity.py +464 -0
- ouroboros/bigbang/interview.py +530 -0
- ouroboros/bigbang/seed_generator.py +610 -0
- ouroboros/cli/__init__.py +9 -0
- ouroboros/cli/commands/__init__.py +7 -0
- ouroboros/cli/commands/config.py +79 -0
- ouroboros/cli/commands/init.py +425 -0
- ouroboros/cli/commands/run.py +201 -0
- ouroboros/cli/commands/status.py +85 -0
- ouroboros/cli/formatters/__init__.py +31 -0
- ouroboros/cli/formatters/panels.py +157 -0
- ouroboros/cli/formatters/progress.py +112 -0
- ouroboros/cli/formatters/tables.py +166 -0
- ouroboros/cli/main.py +60 -0
- ouroboros/config/__init__.py +81 -0
- ouroboros/config/loader.py +292 -0
- ouroboros/config/models.py +332 -0
- ouroboros/core/__init__.py +62 -0
- ouroboros/core/ac_tree.py +401 -0
- ouroboros/core/context.py +472 -0
- ouroboros/core/errors.py +246 -0
- ouroboros/core/seed.py +212 -0
- ouroboros/core/types.py +205 -0
- ouroboros/evaluation/__init__.py +110 -0
- ouroboros/evaluation/consensus.py +350 -0
- ouroboros/evaluation/mechanical.py +351 -0
- ouroboros/evaluation/models.py +235 -0
- ouroboros/evaluation/pipeline.py +286 -0
- ouroboros/evaluation/semantic.py +302 -0
- ouroboros/evaluation/trigger.py +278 -0
- ouroboros/events/__init__.py +5 -0
- ouroboros/events/base.py +80 -0
- ouroboros/events/decomposition.py +153 -0
- ouroboros/events/evaluation.py +248 -0
- ouroboros/execution/__init__.py +44 -0
- ouroboros/execution/atomicity.py +451 -0
- ouroboros/execution/decomposition.py +481 -0
- ouroboros/execution/double_diamond.py +1386 -0
- ouroboros/execution/subagent.py +275 -0
- ouroboros/observability/__init__.py +63 -0
- ouroboros/observability/drift.py +383 -0
- ouroboros/observability/logging.py +504 -0
- ouroboros/observability/retrospective.py +338 -0
- ouroboros/orchestrator/__init__.py +78 -0
- ouroboros/orchestrator/adapter.py +391 -0
- ouroboros/orchestrator/events.py +278 -0
- ouroboros/orchestrator/runner.py +597 -0
- ouroboros/orchestrator/session.py +486 -0
- ouroboros/persistence/__init__.py +23 -0
- ouroboros/persistence/checkpoint.py +511 -0
- ouroboros/persistence/event_store.py +183 -0
- ouroboros/persistence/migrations/__init__.py +1 -0
- ouroboros/persistence/migrations/runner.py +100 -0
- ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
- ouroboros/persistence/schema.py +56 -0
- ouroboros/persistence/uow.py +230 -0
- ouroboros/providers/__init__.py +28 -0
- ouroboros/providers/base.py +133 -0
- ouroboros/providers/claude_code_adapter.py +212 -0
- ouroboros/providers/litellm_adapter.py +316 -0
- ouroboros/py.typed +0 -0
- ouroboros/resilience/__init__.py +67 -0
- ouroboros/resilience/lateral.py +595 -0
- ouroboros/resilience/stagnation.py +727 -0
- ouroboros/routing/__init__.py +60 -0
- ouroboros/routing/complexity.py +272 -0
- ouroboros/routing/downgrade.py +664 -0
- ouroboros/routing/escalation.py +340 -0
- ouroboros/routing/router.py +204 -0
- ouroboros/routing/tiers.py +247 -0
- ouroboros/secondary/__init__.py +40 -0
- ouroboros/secondary/scheduler.py +467 -0
- ouroboros/secondary/todo_registry.py +483 -0
- ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
- ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
- ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
- ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
- ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,481 @@
|
|
|
1
|
+
"""AC decomposition for hierarchical task breakdown.
|
|
2
|
+
|
|
3
|
+
Decomposes non-atomic Acceptance Criteria into smaller, manageable child ACs.
|
|
4
|
+
Uses LLM to intelligently break down complex tasks based on:
|
|
5
|
+
- Insights from the Discover phase
|
|
6
|
+
- Parent AC context
|
|
7
|
+
- Domain-specific decomposition strategies
|
|
8
|
+
|
|
9
|
+
The decomposition follows these rules:
|
|
10
|
+
- Each decomposition produces 2-5 child ACs
|
|
11
|
+
- Max depth is 5 levels (NFR10)
|
|
12
|
+
- Context is compressed at depth 3+
|
|
13
|
+
- Cyclic decomposition is prevented
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
from ouroboros.execution.decomposition import decompose_ac
|
|
17
|
+
|
|
18
|
+
result = await decompose_ac(
|
|
19
|
+
ac_content="Implement user authentication system",
|
|
20
|
+
ac_id="ac_123",
|
|
21
|
+
execution_id="exec_456",
|
|
22
|
+
depth=0,
|
|
23
|
+
llm_adapter=adapter,
|
|
24
|
+
discover_insights="User needs login, registration, password reset...",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
if result.is_ok:
|
|
28
|
+
for child_ac in result.value.child_acs:
|
|
29
|
+
print(f"Child AC: {child_ac}")
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from __future__ import annotations
|
|
33
|
+
|
|
34
|
+
from dataclasses import dataclass, field
|
|
35
|
+
import json
|
|
36
|
+
import re
|
|
37
|
+
from typing import TYPE_CHECKING, Any
|
|
38
|
+
from uuid import uuid4
|
|
39
|
+
|
|
40
|
+
from ouroboros.core.errors import ProviderError, ValidationError
|
|
41
|
+
from ouroboros.core.types import Result
|
|
42
|
+
from ouroboros.events.base import BaseEvent
|
|
43
|
+
from ouroboros.events.decomposition import (
|
|
44
|
+
create_ac_decomposed_event,
|
|
45
|
+
create_ac_decomposition_failed_event,
|
|
46
|
+
)
|
|
47
|
+
from ouroboros.observability.logging import get_logger
|
|
48
|
+
|
|
49
|
+
if TYPE_CHECKING:
|
|
50
|
+
from ouroboros.providers.litellm_adapter import LiteLLMAdapter
|
|
51
|
+
|
|
52
|
+
log = get_logger(__name__)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
# Decomposition constraints
|
|
56
|
+
MIN_CHILDREN = 2
|
|
57
|
+
MAX_CHILDREN = 5
|
|
58
|
+
MAX_DEPTH = 5
|
|
59
|
+
COMPRESSION_DEPTH = 3
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass(frozen=True, slots=True)
|
|
63
|
+
class DecompositionResult:
|
|
64
|
+
"""Result of AC decomposition.
|
|
65
|
+
|
|
66
|
+
Attributes:
|
|
67
|
+
parent_ac_id: ID of the parent AC that was decomposed.
|
|
68
|
+
child_acs: Tuple of child AC content strings.
|
|
69
|
+
child_ac_ids: Tuple of generated child AC IDs.
|
|
70
|
+
reasoning: LLM explanation of decomposition strategy.
|
|
71
|
+
events: Events emitted during decomposition.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
parent_ac_id: str
|
|
75
|
+
child_acs: tuple[str, ...]
|
|
76
|
+
child_ac_ids: tuple[str, ...]
|
|
77
|
+
reasoning: str
|
|
78
|
+
events: list[BaseEvent] = field(default_factory=list)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class DecompositionError(ValidationError):
|
|
82
|
+
"""Error during AC decomposition.
|
|
83
|
+
|
|
84
|
+
Extends ValidationError with decomposition-specific context.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
message: str,
|
|
90
|
+
*,
|
|
91
|
+
ac_id: str | None = None,
|
|
92
|
+
depth: int | None = None,
|
|
93
|
+
error_type: str = "decomposition_error",
|
|
94
|
+
details: dict[str, Any] | None = None,
|
|
95
|
+
) -> None:
|
|
96
|
+
super().__init__(message, field=error_type, value=ac_id, details=details)
|
|
97
|
+
self.ac_id = ac_id
|
|
98
|
+
self.depth = depth
|
|
99
|
+
self.error_type = error_type
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# LLM prompts for decomposition
|
|
103
|
+
DECOMPOSITION_SYSTEM_PROMPT = """You are an expert at breaking down complex acceptance criteria into smaller, actionable tasks.
|
|
104
|
+
|
|
105
|
+
When decomposing an AC, follow these principles:
|
|
106
|
+
1. MECE (Mutually Exclusive, Collectively Exhaustive) - children should not overlap and should cover the full scope
|
|
107
|
+
2. Each child should be simpler than the parent
|
|
108
|
+
3. Each child should be independently executable
|
|
109
|
+
4. Use consistent granularity across children
|
|
110
|
+
5. Maintain clear boundaries between children
|
|
111
|
+
|
|
112
|
+
Produce 2-5 child ACs. Each should be:
|
|
113
|
+
- Specific and actionable
|
|
114
|
+
- Independently verifiable
|
|
115
|
+
- Clear about its scope"""
|
|
116
|
+
|
|
117
|
+
DECOMPOSITION_USER_TEMPLATE = """Parent Acceptance Criterion:
|
|
118
|
+
{ac_content}
|
|
119
|
+
|
|
120
|
+
Insights from Discovery Phase:
|
|
121
|
+
{discover_insights}
|
|
122
|
+
|
|
123
|
+
Current Depth: {depth} / {max_depth}
|
|
124
|
+
|
|
125
|
+
Decompose this AC into 2-5 smaller, focused child ACs.
|
|
126
|
+
|
|
127
|
+
Respond with a JSON object:
|
|
128
|
+
{{
|
|
129
|
+
"children": [
|
|
130
|
+
"Child AC 1: specific, actionable description",
|
|
131
|
+
"Child AC 2: specific, actionable description",
|
|
132
|
+
...
|
|
133
|
+
],
|
|
134
|
+
"reasoning": "Brief explanation of your decomposition strategy"
|
|
135
|
+
}}
|
|
136
|
+
|
|
137
|
+
Only respond with the JSON, no other text."""
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _extract_json_from_response(response: str) -> dict[str, Any] | None:
|
|
141
|
+
"""Extract JSON from LLM response, handling various formats.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
response: Raw LLM response text.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Parsed JSON dict or None if parsing fails.
|
|
148
|
+
"""
|
|
149
|
+
# Try direct parsing first
|
|
150
|
+
try:
|
|
151
|
+
result = json.loads(response.strip())
|
|
152
|
+
if isinstance(result, dict):
|
|
153
|
+
return result
|
|
154
|
+
except json.JSONDecodeError:
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
# Try to find JSON in markdown code blocks
|
|
158
|
+
json_pattern = r"```(?:json)?\s*(.*?)```"
|
|
159
|
+
matches = re.findall(json_pattern, response, re.DOTALL)
|
|
160
|
+
for match in matches:
|
|
161
|
+
try:
|
|
162
|
+
result = json.loads(match.strip())
|
|
163
|
+
if isinstance(result, dict):
|
|
164
|
+
return result
|
|
165
|
+
except json.JSONDecodeError:
|
|
166
|
+
continue
|
|
167
|
+
|
|
168
|
+
# Try to find JSON-like content with array
|
|
169
|
+
brace_pattern = r"\{[^{}]*\"children\"\s*:\s*\[[^\]]+\][^{}]*\}"
|
|
170
|
+
matches = re.findall(brace_pattern, response, re.DOTALL)
|
|
171
|
+
for match in matches:
|
|
172
|
+
try:
|
|
173
|
+
result = json.loads(match.strip())
|
|
174
|
+
if isinstance(result, dict):
|
|
175
|
+
return result
|
|
176
|
+
except json.JSONDecodeError:
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
return None
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _validate_children(
|
|
183
|
+
children: list[str],
|
|
184
|
+
parent_content: str,
|
|
185
|
+
ac_id: str,
|
|
186
|
+
depth: int,
|
|
187
|
+
) -> Result[None, DecompositionError]:
|
|
188
|
+
"""Validate decomposition children.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
children: List of child AC contents.
|
|
192
|
+
parent_content: Parent AC content for cycle detection.
|
|
193
|
+
ac_id: Parent AC ID.
|
|
194
|
+
depth: Current depth.
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Result with None on success or DecompositionError on failure.
|
|
198
|
+
"""
|
|
199
|
+
# Check count
|
|
200
|
+
if len(children) < MIN_CHILDREN:
|
|
201
|
+
return Result.err(
|
|
202
|
+
DecompositionError(
|
|
203
|
+
f"Decomposition produced only {len(children)} children, minimum is {MIN_CHILDREN}",
|
|
204
|
+
ac_id=ac_id,
|
|
205
|
+
depth=depth,
|
|
206
|
+
error_type="insufficient_children",
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
if len(children) > MAX_CHILDREN:
|
|
211
|
+
return Result.err(
|
|
212
|
+
DecompositionError(
|
|
213
|
+
f"Decomposition produced {len(children)} children, maximum is {MAX_CHILDREN}",
|
|
214
|
+
ac_id=ac_id,
|
|
215
|
+
depth=depth,
|
|
216
|
+
error_type="too_many_children",
|
|
217
|
+
)
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# Check for cycles (child content identical to parent)
|
|
221
|
+
parent_normalized = parent_content.strip().lower()
|
|
222
|
+
for i, child in enumerate(children):
|
|
223
|
+
child_normalized = child.strip().lower()
|
|
224
|
+
if child_normalized == parent_normalized:
|
|
225
|
+
return Result.err(
|
|
226
|
+
DecompositionError(
|
|
227
|
+
f"Child {i + 1} is identical to parent (cyclic decomposition)",
|
|
228
|
+
ac_id=ac_id,
|
|
229
|
+
depth=depth,
|
|
230
|
+
error_type="cyclic_decomposition",
|
|
231
|
+
)
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# Check for empty children
|
|
235
|
+
for i, child in enumerate(children):
|
|
236
|
+
if not child.strip():
|
|
237
|
+
return Result.err(
|
|
238
|
+
DecompositionError(
|
|
239
|
+
f"Child {i + 1} is empty",
|
|
240
|
+
ac_id=ac_id,
|
|
241
|
+
depth=depth,
|
|
242
|
+
error_type="empty_child",
|
|
243
|
+
)
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
return Result.ok(None)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _compress_context(discover_insights: str, depth: int) -> str:
|
|
250
|
+
"""Compress discovery insights at depth 3+.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
discover_insights: Original insights from Discover phase.
|
|
254
|
+
depth: Current depth in AC tree.
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Compressed or original insights string.
|
|
258
|
+
"""
|
|
259
|
+
if depth < COMPRESSION_DEPTH:
|
|
260
|
+
return discover_insights
|
|
261
|
+
|
|
262
|
+
# At depth 3+, only keep first 500 characters
|
|
263
|
+
if len(discover_insights) > 500:
|
|
264
|
+
compressed = discover_insights[:500] + "... [compressed for depth]"
|
|
265
|
+
log.debug(
|
|
266
|
+
"decomposition.context.compressed",
|
|
267
|
+
original_length=len(discover_insights),
|
|
268
|
+
compressed_length=len(compressed),
|
|
269
|
+
depth=depth,
|
|
270
|
+
)
|
|
271
|
+
return compressed
|
|
272
|
+
|
|
273
|
+
return discover_insights
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
async def decompose_ac(
|
|
277
|
+
ac_content: str,
|
|
278
|
+
ac_id: str,
|
|
279
|
+
execution_id: str,
|
|
280
|
+
depth: int,
|
|
281
|
+
llm_adapter: LiteLLMAdapter,
|
|
282
|
+
discover_insights: str = "",
|
|
283
|
+
*,
|
|
284
|
+
model: str = "openrouter/anthropic/claude-3.5-sonnet",
|
|
285
|
+
) -> Result[DecompositionResult, DecompositionError | ProviderError]:
|
|
286
|
+
"""Decompose a non-atomic AC into child ACs using LLM.
|
|
287
|
+
|
|
288
|
+
Uses the Discover phase insights to inform intelligent decomposition.
|
|
289
|
+
Enforces max depth and prevents cyclic decomposition.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
ac_content: The AC content to decompose.
|
|
293
|
+
ac_id: Unique identifier for the parent AC.
|
|
294
|
+
execution_id: Associated execution ID.
|
|
295
|
+
depth: Current depth in AC tree (0-indexed).
|
|
296
|
+
llm_adapter: LLM adapter for making completion requests.
|
|
297
|
+
discover_insights: Insights from Discover phase (compressed at depth 3+).
|
|
298
|
+
model: Model to use for decomposition.
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
Result containing DecompositionResult or error.
|
|
302
|
+
|
|
303
|
+
Raises:
|
|
304
|
+
DecompositionError for max depth, cyclic decomposition, or validation failures.
|
|
305
|
+
ProviderError for LLM failures.
|
|
306
|
+
"""
|
|
307
|
+
log.info(
|
|
308
|
+
"decomposition.started",
|
|
309
|
+
ac_id=ac_id,
|
|
310
|
+
execution_id=execution_id,
|
|
311
|
+
depth=depth,
|
|
312
|
+
ac_length=len(ac_content),
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
# Check max depth
|
|
316
|
+
if depth >= MAX_DEPTH:
|
|
317
|
+
error = DecompositionError(
|
|
318
|
+
f"Max depth {MAX_DEPTH} reached, cannot decompose further",
|
|
319
|
+
ac_id=ac_id,
|
|
320
|
+
depth=depth,
|
|
321
|
+
error_type="max_depth_reached",
|
|
322
|
+
)
|
|
323
|
+
_failed_event = create_ac_decomposition_failed_event(
|
|
324
|
+
ac_id=ac_id,
|
|
325
|
+
execution_id=execution_id,
|
|
326
|
+
error_message=str(error),
|
|
327
|
+
error_type="max_depth_reached",
|
|
328
|
+
depth=depth,
|
|
329
|
+
)
|
|
330
|
+
log.warning(
|
|
331
|
+
"decomposition.max_depth_reached",
|
|
332
|
+
ac_id=ac_id,
|
|
333
|
+
depth=depth,
|
|
334
|
+
)
|
|
335
|
+
return Result.err(error)
|
|
336
|
+
|
|
337
|
+
# Compress context at depth 3+
|
|
338
|
+
compressed_insights = _compress_context(discover_insights, depth)
|
|
339
|
+
|
|
340
|
+
# Build LLM request
|
|
341
|
+
from ouroboros.providers.base import CompletionConfig, Message, MessageRole
|
|
342
|
+
|
|
343
|
+
messages = [
|
|
344
|
+
Message(role=MessageRole.SYSTEM, content=DECOMPOSITION_SYSTEM_PROMPT),
|
|
345
|
+
Message(
|
|
346
|
+
role=MessageRole.USER,
|
|
347
|
+
content=DECOMPOSITION_USER_TEMPLATE.format(
|
|
348
|
+
ac_content=ac_content,
|
|
349
|
+
discover_insights=compressed_insights or "No specific insights available.",
|
|
350
|
+
depth=depth,
|
|
351
|
+
max_depth=MAX_DEPTH,
|
|
352
|
+
),
|
|
353
|
+
),
|
|
354
|
+
]
|
|
355
|
+
|
|
356
|
+
config = CompletionConfig(
|
|
357
|
+
model=model,
|
|
358
|
+
temperature=0.5, # Balanced creativity and consistency
|
|
359
|
+
max_tokens=1000,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
llm_result = await llm_adapter.complete(messages, config)
|
|
363
|
+
|
|
364
|
+
if llm_result.is_err:
|
|
365
|
+
llm_error = ProviderError(
|
|
366
|
+
f"LLM decomposition failed: {llm_result.error}",
|
|
367
|
+
provider="litellm",
|
|
368
|
+
)
|
|
369
|
+
_failed_event = create_ac_decomposition_failed_event(
|
|
370
|
+
ac_id=ac_id,
|
|
371
|
+
execution_id=execution_id,
|
|
372
|
+
error_message=str(llm_error),
|
|
373
|
+
error_type="llm_failure",
|
|
374
|
+
depth=depth,
|
|
375
|
+
)
|
|
376
|
+
log.error(
|
|
377
|
+
"decomposition.llm_failed",
|
|
378
|
+
ac_id=ac_id,
|
|
379
|
+
error=str(llm_result.error),
|
|
380
|
+
)
|
|
381
|
+
return Result.err(llm_error)
|
|
382
|
+
|
|
383
|
+
# Parse LLM response
|
|
384
|
+
response_text = llm_result.value.content
|
|
385
|
+
parsed = _extract_json_from_response(response_text)
|
|
386
|
+
|
|
387
|
+
if parsed is None:
|
|
388
|
+
error = DecompositionError(
|
|
389
|
+
"Failed to parse LLM decomposition response",
|
|
390
|
+
ac_id=ac_id,
|
|
391
|
+
depth=depth,
|
|
392
|
+
error_type="parse_failure",
|
|
393
|
+
details={"response_preview": response_text[:200]},
|
|
394
|
+
)
|
|
395
|
+
_failed_event = create_ac_decomposition_failed_event(
|
|
396
|
+
ac_id=ac_id,
|
|
397
|
+
execution_id=execution_id,
|
|
398
|
+
error_message=str(error),
|
|
399
|
+
error_type="parse_failure",
|
|
400
|
+
depth=depth,
|
|
401
|
+
)
|
|
402
|
+
log.warning(
|
|
403
|
+
"decomposition.parse_failed",
|
|
404
|
+
ac_id=ac_id,
|
|
405
|
+
response_preview=response_text[:200],
|
|
406
|
+
)
|
|
407
|
+
return Result.err(error)
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
children = parsed.get("children", [])
|
|
411
|
+
reasoning = parsed.get("reasoning", "LLM decomposition")
|
|
412
|
+
|
|
413
|
+
# Ensure children is a list of strings
|
|
414
|
+
if not isinstance(children, list):
|
|
415
|
+
raise TypeError("children must be a list")
|
|
416
|
+
children = [str(c) for c in children]
|
|
417
|
+
|
|
418
|
+
# Validate children
|
|
419
|
+
validation_result = _validate_children(children, ac_content, ac_id, depth)
|
|
420
|
+
if validation_result.is_err:
|
|
421
|
+
_failed_event = create_ac_decomposition_failed_event(
|
|
422
|
+
ac_id=ac_id,
|
|
423
|
+
execution_id=execution_id,
|
|
424
|
+
error_message=str(validation_result.error),
|
|
425
|
+
error_type=validation_result.error.error_type,
|
|
426
|
+
depth=depth,
|
|
427
|
+
)
|
|
428
|
+
return Result.err(validation_result.error)
|
|
429
|
+
|
|
430
|
+
# Generate child IDs
|
|
431
|
+
child_ac_ids = tuple(f"ac_{uuid4().hex[:12]}" for _ in children)
|
|
432
|
+
child_acs = tuple(children)
|
|
433
|
+
|
|
434
|
+
# Create success event
|
|
435
|
+
decomposed_event = create_ac_decomposed_event(
|
|
436
|
+
parent_ac_id=ac_id,
|
|
437
|
+
execution_id=execution_id,
|
|
438
|
+
child_ac_ids=list(child_ac_ids),
|
|
439
|
+
child_contents=list(child_acs),
|
|
440
|
+
depth=depth,
|
|
441
|
+
reasoning=reasoning,
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
result = DecompositionResult(
|
|
445
|
+
parent_ac_id=ac_id,
|
|
446
|
+
child_acs=child_acs,
|
|
447
|
+
child_ac_ids=child_ac_ids,
|
|
448
|
+
reasoning=reasoning,
|
|
449
|
+
events=[decomposed_event],
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
log.info(
|
|
453
|
+
"decomposition.completed",
|
|
454
|
+
ac_id=ac_id,
|
|
455
|
+
child_count=len(child_acs),
|
|
456
|
+
reasoning=reasoning[:100],
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
return Result.ok(result)
|
|
460
|
+
|
|
461
|
+
except (ValueError, TypeError, KeyError) as e:
|
|
462
|
+
error = DecompositionError(
|
|
463
|
+
f"Failed to process decomposition response: {e}",
|
|
464
|
+
ac_id=ac_id,
|
|
465
|
+
depth=depth,
|
|
466
|
+
error_type="processing_error",
|
|
467
|
+
details={"exception": str(e)},
|
|
468
|
+
)
|
|
469
|
+
_failed_event = create_ac_decomposition_failed_event(
|
|
470
|
+
ac_id=ac_id,
|
|
471
|
+
execution_id=execution_id,
|
|
472
|
+
error_message=str(error),
|
|
473
|
+
error_type="processing_error",
|
|
474
|
+
depth=depth,
|
|
475
|
+
)
|
|
476
|
+
log.error(
|
|
477
|
+
"decomposition.processing_error",
|
|
478
|
+
ac_id=ac_id,
|
|
479
|
+
error=str(e),
|
|
480
|
+
)
|
|
481
|
+
return Result.err(error)
|