ouroboros-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (81) hide show
  1. ouroboros/__init__.py +15 -0
  2. ouroboros/__main__.py +9 -0
  3. ouroboros/bigbang/__init__.py +39 -0
  4. ouroboros/bigbang/ambiguity.py +464 -0
  5. ouroboros/bigbang/interview.py +530 -0
  6. ouroboros/bigbang/seed_generator.py +610 -0
  7. ouroboros/cli/__init__.py +9 -0
  8. ouroboros/cli/commands/__init__.py +7 -0
  9. ouroboros/cli/commands/config.py +79 -0
  10. ouroboros/cli/commands/init.py +425 -0
  11. ouroboros/cli/commands/run.py +201 -0
  12. ouroboros/cli/commands/status.py +85 -0
  13. ouroboros/cli/formatters/__init__.py +31 -0
  14. ouroboros/cli/formatters/panels.py +157 -0
  15. ouroboros/cli/formatters/progress.py +112 -0
  16. ouroboros/cli/formatters/tables.py +166 -0
  17. ouroboros/cli/main.py +60 -0
  18. ouroboros/config/__init__.py +81 -0
  19. ouroboros/config/loader.py +292 -0
  20. ouroboros/config/models.py +332 -0
  21. ouroboros/core/__init__.py +62 -0
  22. ouroboros/core/ac_tree.py +401 -0
  23. ouroboros/core/context.py +472 -0
  24. ouroboros/core/errors.py +246 -0
  25. ouroboros/core/seed.py +212 -0
  26. ouroboros/core/types.py +205 -0
  27. ouroboros/evaluation/__init__.py +110 -0
  28. ouroboros/evaluation/consensus.py +350 -0
  29. ouroboros/evaluation/mechanical.py +351 -0
  30. ouroboros/evaluation/models.py +235 -0
  31. ouroboros/evaluation/pipeline.py +286 -0
  32. ouroboros/evaluation/semantic.py +302 -0
  33. ouroboros/evaluation/trigger.py +278 -0
  34. ouroboros/events/__init__.py +5 -0
  35. ouroboros/events/base.py +80 -0
  36. ouroboros/events/decomposition.py +153 -0
  37. ouroboros/events/evaluation.py +248 -0
  38. ouroboros/execution/__init__.py +44 -0
  39. ouroboros/execution/atomicity.py +451 -0
  40. ouroboros/execution/decomposition.py +481 -0
  41. ouroboros/execution/double_diamond.py +1386 -0
  42. ouroboros/execution/subagent.py +275 -0
  43. ouroboros/observability/__init__.py +63 -0
  44. ouroboros/observability/drift.py +383 -0
  45. ouroboros/observability/logging.py +504 -0
  46. ouroboros/observability/retrospective.py +338 -0
  47. ouroboros/orchestrator/__init__.py +78 -0
  48. ouroboros/orchestrator/adapter.py +391 -0
  49. ouroboros/orchestrator/events.py +278 -0
  50. ouroboros/orchestrator/runner.py +597 -0
  51. ouroboros/orchestrator/session.py +486 -0
  52. ouroboros/persistence/__init__.py +23 -0
  53. ouroboros/persistence/checkpoint.py +511 -0
  54. ouroboros/persistence/event_store.py +183 -0
  55. ouroboros/persistence/migrations/__init__.py +1 -0
  56. ouroboros/persistence/migrations/runner.py +100 -0
  57. ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
  58. ouroboros/persistence/schema.py +56 -0
  59. ouroboros/persistence/uow.py +230 -0
  60. ouroboros/providers/__init__.py +28 -0
  61. ouroboros/providers/base.py +133 -0
  62. ouroboros/providers/claude_code_adapter.py +212 -0
  63. ouroboros/providers/litellm_adapter.py +316 -0
  64. ouroboros/py.typed +0 -0
  65. ouroboros/resilience/__init__.py +67 -0
  66. ouroboros/resilience/lateral.py +595 -0
  67. ouroboros/resilience/stagnation.py +727 -0
  68. ouroboros/routing/__init__.py +60 -0
  69. ouroboros/routing/complexity.py +272 -0
  70. ouroboros/routing/downgrade.py +664 -0
  71. ouroboros/routing/escalation.py +340 -0
  72. ouroboros/routing/router.py +204 -0
  73. ouroboros/routing/tiers.py +247 -0
  74. ouroboros/secondary/__init__.py +40 -0
  75. ouroboros/secondary/scheduler.py +467 -0
  76. ouroboros/secondary/todo_registry.py +483 -0
  77. ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
  78. ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
  79. ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
  80. ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
  81. ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,212 @@
1
+ """Claude Code adapter for LLM completion using Claude Agent SDK.
2
+
3
+ This adapter uses the Claude Agent SDK to make completion requests,
4
+ leveraging the user's Claude Code Max Plan authentication instead of
5
+ requiring separate API keys.
6
+
7
+ Usage:
8
+ adapter = ClaudeCodeAdapter()
9
+ result = await adapter.complete(
10
+ messages=[Message(role=MessageRole.USER, content="Hello!")],
11
+ config=CompletionConfig(model="claude-sonnet-4-20250514"),
12
+ )
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ import os
18
+
19
+ import structlog
20
+
21
+ from ouroboros.core.errors import ProviderError
22
+ from ouroboros.core.types import Result
23
+ from ouroboros.providers.base import (
24
+ CompletionConfig,
25
+ CompletionResponse,
26
+ Message,
27
+ MessageRole,
28
+ UsageInfo,
29
+ )
30
+
31
+ log = structlog.get_logger(__name__)
32
+
33
+
34
+ class ClaudeCodeAdapter:
35
+ """LLM adapter using Claude Agent SDK (Claude Code Max Plan).
36
+
37
+ This adapter provides the same interface as LiteLLMAdapter but uses
38
+ the Claude Agent SDK under the hood. This allows users to leverage
39
+ their Claude Code Max Plan subscription without needing separate API keys.
40
+
41
+ Example:
42
+ adapter = ClaudeCodeAdapter()
43
+ result = await adapter.complete(
44
+ messages=[Message(role=MessageRole.USER, content="Hello!")],
45
+ config=CompletionConfig(model="claude-sonnet-4-20250514"),
46
+ )
47
+ if result.is_ok:
48
+ print(result.value.content)
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ permission_mode: str = "default",
54
+ ) -> None:
55
+ """Initialize Claude Code adapter.
56
+
57
+ Args:
58
+ permission_mode: Permission mode for SDK operations.
59
+ - "default": Standard permissions
60
+ - "acceptEdits": Auto-approve edits (not needed for interview)
61
+ """
62
+ self._permission_mode: str = permission_mode
63
+ log.info(
64
+ "claude_code_adapter.initialized",
65
+ permission_mode=permission_mode,
66
+ )
67
+
68
+ async def complete(
69
+ self,
70
+ messages: list[Message],
71
+ config: CompletionConfig,
72
+ ) -> Result[CompletionResponse, ProviderError]:
73
+ """Make a completion request via Claude Agent SDK.
74
+
75
+ Args:
76
+ messages: The conversation messages to send.
77
+ config: Configuration for the completion request.
78
+
79
+ Returns:
80
+ Result containing either the completion response or a ProviderError.
81
+ """
82
+ try:
83
+ # Lazy import to avoid loading SDK at module import time
84
+ from claude_agent_sdk import ClaudeAgentOptions, query
85
+ except ImportError as e:
86
+ log.error("claude_code_adapter.sdk_not_installed", error=str(e))
87
+ return Result.err(
88
+ ProviderError(
89
+ message="Claude Agent SDK is not installed. Run: pip install claude-agent-sdk",
90
+ details={"import_error": str(e)},
91
+ )
92
+ )
93
+
94
+ # Build prompt from messages
95
+ prompt = self._build_prompt(messages)
96
+
97
+ log.debug(
98
+ "claude_code_adapter.request_started",
99
+ prompt_preview=prompt[:100],
100
+ message_count=len(messages),
101
+ )
102
+
103
+ try:
104
+ # Build options - no tools needed for interview (just conversation)
105
+ # Type ignore needed because SDK uses Literal type but we store as str
106
+ options = ClaudeAgentOptions(
107
+ allowed_tools=[], # No tools - pure conversation
108
+ permission_mode=self._permission_mode, # type: ignore[arg-type]
109
+ cwd=os.getcwd(),
110
+ )
111
+
112
+ # Collect the response
113
+ content = ""
114
+ session_id = None
115
+
116
+ async for sdk_message in query(prompt=prompt, options=options):
117
+ class_name = type(sdk_message).__name__
118
+
119
+ if class_name == "SystemMessage":
120
+ # Capture session ID from init
121
+ msg_data = getattr(sdk_message, "data", {})
122
+ session_id = msg_data.get("session_id")
123
+
124
+ elif class_name == "AssistantMessage":
125
+ # Extract text content
126
+ content_blocks = getattr(sdk_message, "content", [])
127
+ for block in content_blocks:
128
+ if type(block).__name__ == "TextBlock":
129
+ content += getattr(block, "text", "")
130
+
131
+ elif class_name == "ResultMessage":
132
+ # Final result - use result content if we don't have content yet
133
+ if not content:
134
+ content = getattr(sdk_message, "result", "") or ""
135
+
136
+ # Check for errors
137
+ is_error = getattr(sdk_message, "is_error", False)
138
+ if is_error:
139
+ error_msg = content or "Unknown error from Claude Agent SDK"
140
+ log.warning(
141
+ "claude_code_adapter.sdk_error",
142
+ error=error_msg,
143
+ )
144
+ return Result.err(
145
+ ProviderError(
146
+ message=error_msg,
147
+ details={"session_id": session_id},
148
+ )
149
+ )
150
+
151
+ log.info(
152
+ "claude_code_adapter.request_completed",
153
+ content_length=len(content),
154
+ session_id=session_id,
155
+ )
156
+
157
+ # Build response
158
+ response = CompletionResponse(
159
+ content=content,
160
+ model=config.model,
161
+ usage=UsageInfo(
162
+ prompt_tokens=0, # SDK doesn't expose token counts
163
+ completion_tokens=0,
164
+ total_tokens=0,
165
+ ),
166
+ finish_reason="stop",
167
+ raw_response={"session_id": session_id},
168
+ )
169
+
170
+ return Result.ok(response)
171
+
172
+ except Exception as e:
173
+ log.exception(
174
+ "claude_code_adapter.request_failed",
175
+ error=str(e),
176
+ )
177
+ return Result.err(
178
+ ProviderError(
179
+ message=f"Claude Agent SDK request failed: {e}",
180
+ details={"error_type": type(e).__name__},
181
+ )
182
+ )
183
+
184
+ def _build_prompt(self, messages: list[Message]) -> str:
185
+ """Build a single prompt string from messages.
186
+
187
+ The Claude Agent SDK expects a single prompt string, so we combine
188
+ the conversation history into a formatted prompt.
189
+
190
+ Args:
191
+ messages: List of conversation messages.
192
+
193
+ Returns:
194
+ Formatted prompt string.
195
+ """
196
+ parts: list[str] = []
197
+
198
+ for msg in messages:
199
+ if msg.role == MessageRole.SYSTEM:
200
+ parts.append(f"<system>\n{msg.content}\n</system>\n")
201
+ elif msg.role == MessageRole.USER:
202
+ parts.append(f"User: {msg.content}\n")
203
+ elif msg.role == MessageRole.ASSISTANT:
204
+ parts.append(f"Assistant: {msg.content}\n")
205
+
206
+ # Add instruction to respond
207
+ parts.append("\nPlease respond to the above conversation.")
208
+
209
+ return "\n".join(parts)
210
+
211
+
212
+ __all__ = ["ClaudeCodeAdapter"]
@@ -0,0 +1,316 @@
1
+ """LiteLLM adapter for unified LLM provider access.
2
+
3
+ This module provides the LiteLLMAdapter class that implements the LLMAdapter
4
+ protocol using LiteLLM for multi-provider support including OpenRouter.
5
+ """
6
+
7
+ import os
8
+ from typing import Any
9
+
10
+ import litellm
11
+ import stamina
12
+ import structlog
13
+
14
+ from ouroboros.core.errors import ProviderError
15
+ from ouroboros.core.types import Result
16
+ from ouroboros.providers.base import (
17
+ CompletionConfig,
18
+ CompletionResponse,
19
+ Message,
20
+ UsageInfo,
21
+ )
22
+
23
+ log = structlog.get_logger()
24
+
25
+ # LiteLLM exceptions that should trigger retries
26
+ RETRIABLE_EXCEPTIONS = (
27
+ litellm.RateLimitError,
28
+ litellm.ServiceUnavailableError,
29
+ litellm.Timeout,
30
+ litellm.APIConnectionError,
31
+ )
32
+
33
+
34
+ class LiteLLMAdapter:
35
+ """LLM adapter using LiteLLM for unified provider access.
36
+
37
+ This adapter supports multiple LLM providers through LiteLLM's unified
38
+ interface, including OpenRouter for model routing.
39
+
40
+ API keys are loaded from environment variables with the following priority:
41
+ 1. Environment variables: OPENROUTER_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY
42
+ 2. Explicit api_key parameter (overrides environment)
43
+
44
+ Example:
45
+ # Using environment variables (recommended)
46
+ adapter = LiteLLMAdapter()
47
+
48
+ # Or with explicit API key
49
+ adapter = LiteLLMAdapter(api_key="sk-...")
50
+
51
+ result = await adapter.complete(
52
+ messages=[Message(role=MessageRole.USER, content="Hello!")],
53
+ config=CompletionConfig(model="openrouter/openai/gpt-4"),
54
+ )
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ *,
60
+ api_key: str | None = None,
61
+ api_base: str | None = None,
62
+ timeout: float = 60.0,
63
+ max_retries: int = 3,
64
+ ) -> None:
65
+ """Initialize the LiteLLM adapter.
66
+
67
+ Args:
68
+ api_key: Optional API key (overrides environment variables).
69
+ api_base: Optional API base URL for custom endpoints.
70
+ timeout: Request timeout in seconds. Default 60.0.
71
+ max_retries: Maximum number of retries for transient errors. Default 3.
72
+ """
73
+ self._api_key = api_key
74
+ self._api_base = api_base
75
+ self._timeout = timeout
76
+ self._max_retries = max_retries
77
+
78
+ def _get_api_key(self, model: str) -> str | None:
79
+ """Get the appropriate API key for the model.
80
+
81
+ Priority:
82
+ 1. Explicit api_key from constructor
83
+ 2. Environment variables based on model prefix
84
+
85
+ Args:
86
+ model: The model identifier.
87
+
88
+ Returns:
89
+ The API key or None if not found.
90
+ """
91
+ if self._api_key:
92
+ return self._api_key
93
+
94
+ # Check environment variables based on model prefix
95
+ if model.startswith("openrouter/"):
96
+ return os.environ.get("OPENROUTER_API_KEY")
97
+ if model.startswith("anthropic/") or model.startswith("claude"):
98
+ return os.environ.get("ANTHROPIC_API_KEY")
99
+ if model.startswith("openai/") or model.startswith("gpt"):
100
+ return os.environ.get("OPENAI_API_KEY")
101
+
102
+ # Default to OpenRouter for unknown models
103
+ return os.environ.get("OPENROUTER_API_KEY")
104
+
105
+ def _build_completion_kwargs(
106
+ self,
107
+ messages: list[Message],
108
+ config: CompletionConfig,
109
+ ) -> dict[str, Any]:
110
+ """Build the kwargs for litellm.acompletion.
111
+
112
+ Args:
113
+ messages: The conversation messages.
114
+ config: The completion configuration.
115
+
116
+ Returns:
117
+ Dictionary of kwargs for litellm.acompletion.
118
+ """
119
+ kwargs: dict[str, Any] = {
120
+ "model": config.model,
121
+ "messages": [m.to_dict() for m in messages],
122
+ "temperature": config.temperature,
123
+ "max_tokens": config.max_tokens,
124
+ "top_p": config.top_p,
125
+ "timeout": self._timeout,
126
+ }
127
+
128
+ if config.stop:
129
+ kwargs["stop"] = config.stop
130
+
131
+ api_key = self._get_api_key(config.model)
132
+ if api_key:
133
+ kwargs["api_key"] = api_key
134
+
135
+ if self._api_base:
136
+ kwargs["api_base"] = self._api_base
137
+
138
+ return kwargs
139
+
140
+ async def _raw_complete(
141
+ self,
142
+ messages: list[Message],
143
+ config: CompletionConfig,
144
+ ) -> litellm.ModelResponse:
145
+ """Make the raw completion call with stamina retry.
146
+
147
+ This method is decorated with stamina retry for transient errors.
148
+ Exceptions bubble up for stamina to handle.
149
+
150
+ Args:
151
+ messages: The conversation messages.
152
+ config: The completion configuration.
153
+
154
+ Returns:
155
+ The raw LiteLLM response.
156
+
157
+ Raises:
158
+ litellm exceptions for API errors.
159
+ """
160
+ kwargs = self._build_completion_kwargs(messages, config)
161
+
162
+ log.debug(
163
+ "llm.request.started",
164
+ model=config.model,
165
+ message_count=len(messages),
166
+ temperature=config.temperature,
167
+ max_tokens=config.max_tokens,
168
+ )
169
+
170
+ response = await litellm.acompletion(**kwargs)
171
+
172
+ log.debug(
173
+ "llm.request.completed",
174
+ model=config.model,
175
+ finish_reason=response.choices[0].finish_reason,
176
+ )
177
+
178
+ return response
179
+
180
+ def _parse_response(
181
+ self,
182
+ response: litellm.ModelResponse,
183
+ config: CompletionConfig,
184
+ ) -> CompletionResponse:
185
+ """Parse the LiteLLM response into CompletionResponse.
186
+
187
+ Args:
188
+ response: The raw LiteLLM response.
189
+ config: The completion configuration.
190
+
191
+ Returns:
192
+ Parsed CompletionResponse.
193
+ """
194
+ choice = response.choices[0]
195
+ usage = response.usage
196
+
197
+ return CompletionResponse(
198
+ content=choice.message.content or "",
199
+ model=response.model or config.model,
200
+ usage=UsageInfo(
201
+ prompt_tokens=usage.prompt_tokens if usage else 0,
202
+ completion_tokens=usage.completion_tokens if usage else 0,
203
+ total_tokens=usage.total_tokens if usage else 0,
204
+ ),
205
+ finish_reason=choice.finish_reason or "stop",
206
+ raw_response=response.model_dump() if hasattr(response, "model_dump") else {},
207
+ )
208
+
209
+ async def complete(
210
+ self,
211
+ messages: list[Message],
212
+ config: CompletionConfig,
213
+ ) -> Result[CompletionResponse, ProviderError]:
214
+ """Make a completion request to the LLM provider.
215
+
216
+ This method handles retries internally using stamina and converts
217
+ all expected failures to Result.err(ProviderError).
218
+
219
+ Args:
220
+ messages: The conversation messages to send.
221
+ config: Configuration for the completion request.
222
+
223
+ Returns:
224
+ Result containing either the completion response or a ProviderError.
225
+ """
226
+ # Create the retry-decorated function with instance's max_retries
227
+ @stamina.retry(
228
+ on=RETRIABLE_EXCEPTIONS,
229
+ attempts=self._max_retries,
230
+ wait_initial=1.0,
231
+ wait_max=10.0,
232
+ wait_jitter=1.0,
233
+ )
234
+ async def _with_retry() -> litellm.ModelResponse:
235
+ return await self._raw_complete(messages, config)
236
+
237
+ try:
238
+ response = await _with_retry()
239
+ return Result.ok(self._parse_response(response, config))
240
+ except RETRIABLE_EXCEPTIONS as e:
241
+ # All retries exhausted
242
+ log.warning(
243
+ "llm.request.failed.retries_exhausted",
244
+ model=config.model,
245
+ error=str(e),
246
+ max_retries=self._max_retries,
247
+ )
248
+ return Result.err(
249
+ ProviderError.from_exception(e, provider=self._extract_provider(config.model))
250
+ )
251
+ except litellm.APIError as e:
252
+ # Non-retriable API error
253
+ log.warning(
254
+ "llm.request.failed.api_error",
255
+ model=config.model,
256
+ error=str(e),
257
+ status_code=getattr(e, "status_code", None),
258
+ )
259
+ return Result.err(
260
+ ProviderError.from_exception(e, provider=self._extract_provider(config.model))
261
+ )
262
+ except litellm.AuthenticationError as e:
263
+ log.warning(
264
+ "llm.request.failed.auth_error",
265
+ model=config.model,
266
+ error=str(e),
267
+ )
268
+ return Result.err(
269
+ ProviderError(
270
+ "Authentication failed - check API key",
271
+ provider=self._extract_provider(config.model),
272
+ status_code=401,
273
+ details={"original_exception": type(e).__name__},
274
+ )
275
+ )
276
+ except litellm.BadRequestError as e:
277
+ log.warning(
278
+ "llm.request.failed.bad_request",
279
+ model=config.model,
280
+ error=str(e),
281
+ )
282
+ return Result.err(
283
+ ProviderError.from_exception(e, provider=self._extract_provider(config.model))
284
+ )
285
+ except Exception as e:
286
+ # Unexpected error - log and convert to ProviderError
287
+ log.exception(
288
+ "llm.request.failed.unexpected",
289
+ model=config.model,
290
+ error=str(e),
291
+ )
292
+ return Result.err(
293
+ ProviderError(
294
+ f"Unexpected error: {e!s}",
295
+ provider=self._extract_provider(config.model),
296
+ details={"original_exception": type(e).__name__},
297
+ )
298
+ )
299
+
300
+ def _extract_provider(self, model: str) -> str:
301
+ """Extract the provider name from a model string.
302
+
303
+ Args:
304
+ model: The model identifier (e.g., 'openrouter/openai/gpt-4').
305
+
306
+ Returns:
307
+ The provider name (e.g., 'openrouter').
308
+ """
309
+ if "/" in model:
310
+ return model.split("/")[0]
311
+ # Common model prefixes
312
+ if model.startswith("gpt"):
313
+ return "openai"
314
+ if model.startswith("claude"):
315
+ return "anthropic"
316
+ return "unknown"
ouroboros/py.typed ADDED
File without changes
@@ -0,0 +1,67 @@
1
+ """Resilience module for stagnation detection and recovery.
2
+
3
+ This module implements Epic 4: Resilience & Stagnation Recovery.
4
+
5
+ Components:
6
+ - StagnationDetector: Detects 4 stagnation patterns
7
+ - StagnationPattern: Enum of pattern types
8
+ - ExecutionHistory: Tracks execution state for detection
9
+ - LateralThinker: Generates alternative approaches via personas
10
+ - ThinkingPersona: 5 personas for lateral thinking
11
+ - Events: Stagnation and lateral thinking event types
12
+
13
+ Story 4.1: Stagnation Detection (4 Patterns)
14
+ - Spinning: Same output repeated
15
+ - Oscillation: A→B→A→B alternating pattern
16
+ - No Drift: No progress toward goal
17
+ - Diminishing Returns: Progress slowing
18
+
19
+ Story 4.2: Lateral Thinking Personas
20
+ - Hacker: Unconventional workarounds
21
+ - Researcher: Seeks additional information
22
+ - Simplifier: Reduces complexity
23
+ - Architect: Restructures the approach
24
+ - Contrarian: Challenges assumptions
25
+ """
26
+
27
+ from ouroboros.resilience.lateral import (
28
+ AllPersonasExhaustedEvent,
29
+ LateralThinker,
30
+ LateralThinkingActivatedEvent,
31
+ LateralThinkingFailedEvent,
32
+ LateralThinkingResult,
33
+ LateralThinkingSucceededEvent,
34
+ PersonaStrategy,
35
+ ThinkingPersona,
36
+ )
37
+ from ouroboros.resilience.stagnation import (
38
+ DiminishingReturnsDetectedEvent,
39
+ ExecutionHistory,
40
+ NoDriftDetectedEvent,
41
+ OscillationDetectedEvent,
42
+ SpinningDetectedEvent,
43
+ StagnationDetection,
44
+ StagnationDetector,
45
+ StagnationPattern,
46
+ )
47
+
48
+ __all__ = [
49
+ # Story 4.1: Stagnation Detection
50
+ "StagnationDetector",
51
+ "StagnationPattern",
52
+ "StagnationDetection",
53
+ "ExecutionHistory",
54
+ "SpinningDetectedEvent",
55
+ "OscillationDetectedEvent",
56
+ "NoDriftDetectedEvent",
57
+ "DiminishingReturnsDetectedEvent",
58
+ # Story 4.2: Lateral Thinking
59
+ "LateralThinker",
60
+ "ThinkingPersona",
61
+ "PersonaStrategy",
62
+ "LateralThinkingResult",
63
+ "LateralThinkingActivatedEvent",
64
+ "LateralThinkingSucceededEvent",
65
+ "LateralThinkingFailedEvent",
66
+ "AllPersonasExhaustedEvent",
67
+ ]