ouroboros-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (81) hide show
  1. ouroboros/__init__.py +15 -0
  2. ouroboros/__main__.py +9 -0
  3. ouroboros/bigbang/__init__.py +39 -0
  4. ouroboros/bigbang/ambiguity.py +464 -0
  5. ouroboros/bigbang/interview.py +530 -0
  6. ouroboros/bigbang/seed_generator.py +610 -0
  7. ouroboros/cli/__init__.py +9 -0
  8. ouroboros/cli/commands/__init__.py +7 -0
  9. ouroboros/cli/commands/config.py +79 -0
  10. ouroboros/cli/commands/init.py +425 -0
  11. ouroboros/cli/commands/run.py +201 -0
  12. ouroboros/cli/commands/status.py +85 -0
  13. ouroboros/cli/formatters/__init__.py +31 -0
  14. ouroboros/cli/formatters/panels.py +157 -0
  15. ouroboros/cli/formatters/progress.py +112 -0
  16. ouroboros/cli/formatters/tables.py +166 -0
  17. ouroboros/cli/main.py +60 -0
  18. ouroboros/config/__init__.py +81 -0
  19. ouroboros/config/loader.py +292 -0
  20. ouroboros/config/models.py +332 -0
  21. ouroboros/core/__init__.py +62 -0
  22. ouroboros/core/ac_tree.py +401 -0
  23. ouroboros/core/context.py +472 -0
  24. ouroboros/core/errors.py +246 -0
  25. ouroboros/core/seed.py +212 -0
  26. ouroboros/core/types.py +205 -0
  27. ouroboros/evaluation/__init__.py +110 -0
  28. ouroboros/evaluation/consensus.py +350 -0
  29. ouroboros/evaluation/mechanical.py +351 -0
  30. ouroboros/evaluation/models.py +235 -0
  31. ouroboros/evaluation/pipeline.py +286 -0
  32. ouroboros/evaluation/semantic.py +302 -0
  33. ouroboros/evaluation/trigger.py +278 -0
  34. ouroboros/events/__init__.py +5 -0
  35. ouroboros/events/base.py +80 -0
  36. ouroboros/events/decomposition.py +153 -0
  37. ouroboros/events/evaluation.py +248 -0
  38. ouroboros/execution/__init__.py +44 -0
  39. ouroboros/execution/atomicity.py +451 -0
  40. ouroboros/execution/decomposition.py +481 -0
  41. ouroboros/execution/double_diamond.py +1386 -0
  42. ouroboros/execution/subagent.py +275 -0
  43. ouroboros/observability/__init__.py +63 -0
  44. ouroboros/observability/drift.py +383 -0
  45. ouroboros/observability/logging.py +504 -0
  46. ouroboros/observability/retrospective.py +338 -0
  47. ouroboros/orchestrator/__init__.py +78 -0
  48. ouroboros/orchestrator/adapter.py +391 -0
  49. ouroboros/orchestrator/events.py +278 -0
  50. ouroboros/orchestrator/runner.py +597 -0
  51. ouroboros/orchestrator/session.py +486 -0
  52. ouroboros/persistence/__init__.py +23 -0
  53. ouroboros/persistence/checkpoint.py +511 -0
  54. ouroboros/persistence/event_store.py +183 -0
  55. ouroboros/persistence/migrations/__init__.py +1 -0
  56. ouroboros/persistence/migrations/runner.py +100 -0
  57. ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
  58. ouroboros/persistence/schema.py +56 -0
  59. ouroboros/persistence/uow.py +230 -0
  60. ouroboros/providers/__init__.py +28 -0
  61. ouroboros/providers/base.py +133 -0
  62. ouroboros/providers/claude_code_adapter.py +212 -0
  63. ouroboros/providers/litellm_adapter.py +316 -0
  64. ouroboros/py.typed +0 -0
  65. ouroboros/resilience/__init__.py +67 -0
  66. ouroboros/resilience/lateral.py +595 -0
  67. ouroboros/resilience/stagnation.py +727 -0
  68. ouroboros/routing/__init__.py +60 -0
  69. ouroboros/routing/complexity.py +272 -0
  70. ouroboros/routing/downgrade.py +664 -0
  71. ouroboros/routing/escalation.py +340 -0
  72. ouroboros/routing/router.py +204 -0
  73. ouroboros/routing/tiers.py +247 -0
  74. ouroboros/secondary/__init__.py +40 -0
  75. ouroboros/secondary/scheduler.py +467 -0
  76. ouroboros/secondary/todo_registry.py +483 -0
  77. ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
  78. ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
  79. ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
  80. ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
  81. ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,100 @@
1
+ """Simple migration runner for SQLite.
2
+
3
+ This module provides a basic migration system for applying SQL scripts
4
+ to the database in order. Tracks applied migrations to avoid re-running.
5
+ """
6
+
7
+ import asyncio
8
+ from pathlib import Path
9
+
10
+ from sqlalchemy import text
11
+ from sqlalchemy.ext.asyncio import AsyncEngine
12
+
13
+ MIGRATIONS_DIR = Path(__file__).parent / "scripts"
14
+
15
+ # SQL for migration tracking table
16
+ CREATE_MIGRATIONS_TABLE = """
17
+ CREATE TABLE IF NOT EXISTS _migrations (
18
+ name TEXT PRIMARY KEY,
19
+ applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
20
+ )
21
+ """
22
+
23
+
24
+ async def _get_applied_migrations(engine: AsyncEngine) -> set[str]:
25
+ """Get set of already applied migration names.
26
+
27
+ Args:
28
+ engine: SQLAlchemy async engine.
29
+
30
+ Returns:
31
+ Set of applied migration names.
32
+ """
33
+ async with engine.begin() as conn:
34
+ # Ensure migrations table exists
35
+ await conn.execute(text(CREATE_MIGRATIONS_TABLE))
36
+
37
+ # Get applied migrations
38
+ result = await conn.execute(text("SELECT name FROM _migrations"))
39
+ return {row[0] for row in result.fetchall()}
40
+
41
+
42
+ async def _read_migration_file(migration_file: Path) -> str:
43
+ """Read migration file content using asyncio.to_thread to avoid blocking.
44
+
45
+ Args:
46
+ migration_file: Path to the migration SQL file.
47
+
48
+ Returns:
49
+ Content of the migration file.
50
+ """
51
+ return await asyncio.to_thread(migration_file.read_text)
52
+
53
+
54
+ async def run_migrations(engine: AsyncEngine) -> list[str]:
55
+ """Run all pending migrations.
56
+
57
+ Migrations are SQL files in the scripts/ directory, named with a
58
+ numeric prefix (e.g., 001_initial.sql). They are executed in order.
59
+ Already applied migrations are tracked and skipped.
60
+
61
+ Note: This is a simple migration system. For production, consider
62
+ using Alembic.
63
+
64
+ Args:
65
+ engine: SQLAlchemy async engine.
66
+
67
+ Returns:
68
+ List of newly applied migration names.
69
+ """
70
+ applied: list[str] = []
71
+
72
+ # Get already applied migrations
73
+ already_applied = await _get_applied_migrations(engine)
74
+
75
+ # Get all .sql files sorted by name
76
+ migration_files = sorted(MIGRATIONS_DIR.glob("*.sql"))
77
+
78
+ for migration_file in migration_files:
79
+ if migration_file.name in already_applied:
80
+ continue
81
+
82
+ # Read file content using asyncio.to_thread to avoid blocking
83
+ sql_content = await _read_migration_file(migration_file)
84
+
85
+ async with engine.begin() as conn:
86
+ # Split by semicolon and execute each statement
87
+ for statement in sql_content.split(";"):
88
+ statement = statement.strip()
89
+ if statement and not statement.startswith("--"):
90
+ await conn.execute(text(statement))
91
+
92
+ # Record this migration as applied
93
+ await conn.execute(
94
+ text("INSERT INTO _migrations (name) VALUES (:name)"),
95
+ {"name": migration_file.name},
96
+ )
97
+
98
+ applied.append(migration_file.name)
99
+
100
+ return applied
@@ -0,0 +1,20 @@
1
+ -- Migration: 001_initial
2
+ -- Description: Create initial events table for event sourcing
3
+ -- Created: 2026-01-16
4
+
5
+ CREATE TABLE IF NOT EXISTS events (
6
+ id VARCHAR(36) PRIMARY KEY,
7
+ aggregate_type VARCHAR(100) NOT NULL,
8
+ aggregate_id VARCHAR(36) NOT NULL,
9
+ event_type VARCHAR(200) NOT NULL,
10
+ payload JSON NOT NULL,
11
+ timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
12
+ consensus_id VARCHAR(36)
13
+ );
14
+
15
+ -- Indexes for efficient queries
16
+ CREATE INDEX IF NOT EXISTS ix_events_aggregate_type ON events (aggregate_type);
17
+ CREATE INDEX IF NOT EXISTS ix_events_aggregate_id ON events (aggregate_id);
18
+ CREATE INDEX IF NOT EXISTS ix_events_aggregate_type_id ON events (aggregate_type, aggregate_id);
19
+ CREATE INDEX IF NOT EXISTS ix_events_event_type ON events (event_type);
20
+ CREATE INDEX IF NOT EXISTS ix_events_timestamp ON events (timestamp);
@@ -0,0 +1,56 @@
1
+ """Database schema definitions using SQLAlchemy Core.
2
+
3
+ This module defines the events table schema for event sourcing.
4
+ SQLAlchemy Core is used (not ORM) for flexibility and explicit control.
5
+
6
+ Table: events
7
+ Single unified table for all event types following event sourcing pattern.
8
+ """
9
+
10
+ from datetime import UTC, datetime
11
+
12
+ from sqlalchemy import (
13
+ JSON,
14
+ Column,
15
+ DateTime,
16
+ Index,
17
+ MetaData,
18
+ String,
19
+ Table,
20
+ text,
21
+ )
22
+
23
+ # Global metadata instance for all tables
24
+ metadata = MetaData()
25
+
26
+ # Events table - single unified table for event sourcing
27
+ events_table = Table(
28
+ "events",
29
+ metadata,
30
+ # Primary key - UUID as string
31
+ Column("id", String(36), primary_key=True),
32
+ # Aggregate identification for event replay
33
+ Column("aggregate_type", String(100), nullable=False),
34
+ Column("aggregate_id", String(36), nullable=False),
35
+ # Event type following dot.notation.past_tense convention
36
+ # e.g., "ontology.concept.added", "execution.ac.completed"
37
+ Column("event_type", String(200), nullable=False),
38
+ # Event payload as JSON
39
+ Column("payload", JSON, nullable=False),
40
+ # Timestamp with timezone, defaults to UTC now
41
+ Column(
42
+ "timestamp",
43
+ DateTime(timezone=True),
44
+ nullable=False,
45
+ default=lambda: datetime.now(UTC),
46
+ server_default=text("CURRENT_TIMESTAMP"),
47
+ ),
48
+ # Optional consensus ID for multi-model consensus events
49
+ Column("consensus_id", String(36), nullable=True),
50
+ # Indexes for efficient queries
51
+ Index("ix_events_aggregate_type", "aggregate_type"),
52
+ Index("ix_events_aggregate_id", "aggregate_id"),
53
+ Index("ix_events_aggregate_type_id", "aggregate_type", "aggregate_id"),
54
+ Index("ix_events_event_type", "event_type"),
55
+ Index("ix_events_timestamp", "timestamp"),
56
+ )
@@ -0,0 +1,230 @@
1
+ """Unit of Work pattern for phase-based persistence.
2
+
3
+ This module provides:
4
+ - UnitOfWork: Accumulate events and persist at phase boundaries
5
+ - Transactional coordination between EventStore and CheckpointStore
6
+
7
+ The UnitOfWork pattern ensures that all related persistence operations
8
+ (events and checkpoints) are committed atomically at phase boundaries.
9
+ """
10
+
11
+ from collections.abc import Sequence
12
+
13
+ from ouroboros.core.errors import PersistenceError
14
+ from ouroboros.core.types import Result
15
+ from ouroboros.events.base import BaseEvent
16
+ from ouroboros.persistence.checkpoint import CheckpointData, CheckpointStore
17
+ from ouroboros.persistence.event_store import EventStore
18
+
19
+
20
+ class UnitOfWork:
21
+ """Unit of Work for coordinating event and checkpoint persistence.
22
+
23
+ Accumulates events during a phase and persists both events and checkpoints
24
+ atomically at phase boundaries. Provides transactional semantics for
25
+ persistence operations.
26
+
27
+ Usage:
28
+ uow = UnitOfWork(event_store, checkpoint_store)
29
+
30
+ # Accumulate events during phase
31
+ uow.add_event(event1)
32
+ uow.add_event(event2)
33
+
34
+ # Commit at phase boundary
35
+ checkpoint = CheckpointData.create("seed-123", "planning", state)
36
+ result = await uow.commit(checkpoint)
37
+ if result.is_ok:
38
+ # All events and checkpoint persisted
39
+ pass
40
+ """
41
+
42
+ def __init__(
43
+ self, event_store: EventStore, checkpoint_store: CheckpointStore
44
+ ) -> None:
45
+ """Initialize unit of work.
46
+
47
+ Args:
48
+ event_store: EventStore for persisting events.
49
+ checkpoint_store: CheckpointStore for persisting checkpoints.
50
+ """
51
+ self._event_store = event_store
52
+ self._checkpoint_store = checkpoint_store
53
+ self._pending_events: list[BaseEvent] = []
54
+
55
+ def add_event(self, event: BaseEvent) -> None:
56
+ """Add event to pending events for later commit.
57
+
58
+ Args:
59
+ event: Event to add to the unit of work.
60
+ """
61
+ self._pending_events.append(event)
62
+
63
+ def add_events(self, events: Sequence[BaseEvent]) -> None:
64
+ """Add multiple events to pending events.
65
+
66
+ Args:
67
+ events: Sequence of events to add.
68
+ """
69
+ self._pending_events.extend(events)
70
+
71
+ async def commit(
72
+ self, checkpoint: CheckpointData | None = None
73
+ ) -> Result[None, PersistenceError]:
74
+ """Commit all pending events and optional checkpoint.
75
+
76
+ Persists all accumulated events to EventStore and optionally saves
77
+ a checkpoint. Operations are performed in order:
78
+ 1. Persist all events
79
+ 2. Save checkpoint (if provided)
80
+
81
+ On failure, the operation stops and returns an error. Already-persisted
82
+ events remain in the store (event sourcing is append-only).
83
+
84
+ Args:
85
+ checkpoint: Optional checkpoint to save after events.
86
+
87
+ Returns:
88
+ Result.ok(None) on success,
89
+ Result.err(PersistenceError) on failure.
90
+ """
91
+ try:
92
+ # Persist all pending events atomically in a single batch
93
+ if self._pending_events:
94
+ await self._event_store.append_batch(self._pending_events)
95
+
96
+ # Save checkpoint if provided
97
+ if checkpoint is not None:
98
+ checkpoint_result = self._checkpoint_store.save(checkpoint)
99
+ if checkpoint_result.is_err:
100
+ return checkpoint_result
101
+
102
+ # Clear pending events after successful commit
103
+ self._pending_events.clear()
104
+
105
+ return Result.ok(None)
106
+
107
+ except PersistenceError as e:
108
+ # PersistenceError from event store, re-raise as Result
109
+ return Result.err(e)
110
+ except Exception as e:
111
+ # Unexpected error
112
+ return Result.err(
113
+ PersistenceError(
114
+ f"Unit of work commit failed: {e}",
115
+ operation="commit",
116
+ details={"pending_events": len(self._pending_events)},
117
+ )
118
+ )
119
+
120
+ def rollback(self) -> None:
121
+ """Rollback by discarding all pending events.
122
+
123
+ This is useful when an error occurs during phase execution
124
+ and you want to discard uncommitted events.
125
+
126
+ Note: This only affects pending events. Already-committed events
127
+ cannot be rolled back (event sourcing is append-only).
128
+ """
129
+ self._pending_events.clear()
130
+
131
+ @property
132
+ def pending_event_count(self) -> int:
133
+ """Get count of pending events awaiting commit.
134
+
135
+ Returns:
136
+ Number of events in the unit of work.
137
+ """
138
+ return len(self._pending_events)
139
+
140
+ def has_pending_events(self) -> bool:
141
+ """Check if there are pending events.
142
+
143
+ Returns:
144
+ True if there are uncommitted events.
145
+ """
146
+ return len(self._pending_events) > 0
147
+
148
+
149
+ class PhaseTransaction:
150
+ """Context manager for phase-based transactions.
151
+
152
+ Provides convenient context manager for phase execution with automatic
153
+ commit or rollback based on success/failure.
154
+
155
+ Usage:
156
+ async with PhaseTransaction(uow, seed_id, "planning", state) as tx:
157
+ # Execute phase logic
158
+ tx.add_event(event1)
159
+ tx.add_event(event2)
160
+ # Auto-commits on success, rolls back on exception
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ uow: UnitOfWork,
166
+ seed_id: str,
167
+ phase: str,
168
+ state: dict,
169
+ ) -> None:
170
+ """Initialize phase transaction.
171
+
172
+ Args:
173
+ uow: UnitOfWork instance to use.
174
+ seed_id: Seed identifier for checkpoint.
175
+ phase: Current phase name.
176
+ state: State data for checkpoint.
177
+ """
178
+ self._uow = uow
179
+ self._seed_id = seed_id
180
+ self._phase = phase
181
+ self._state = state
182
+ self._committed = False
183
+
184
+ def add_event(self, event: BaseEvent) -> None:
185
+ """Add event to the transaction.
186
+
187
+ Args:
188
+ event: Event to add.
189
+ """
190
+ self._uow.add_event(event)
191
+
192
+ def add_events(self, events: Sequence[BaseEvent]) -> None:
193
+ """Add multiple events to the transaction.
194
+
195
+ Args:
196
+ events: Sequence of events to add.
197
+ """
198
+ self._uow.add_events(events)
199
+
200
+ async def __aenter__(self) -> "PhaseTransaction":
201
+ """Enter context manager."""
202
+ return self
203
+
204
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> bool:
205
+ """Exit context manager with auto-commit or rollback.
206
+
207
+ Args:
208
+ exc_type: Exception type if an exception occurred.
209
+ exc_val: Exception value if an exception occurred.
210
+ exc_tb: Exception traceback if an exception occurred.
211
+
212
+ Returns:
213
+ False to propagate exceptions (we don't suppress them).
214
+ """
215
+ if exc_type is None and not self._committed:
216
+ # Success path: commit events and checkpoint
217
+ checkpoint = CheckpointData.create(
218
+ self._seed_id, self._phase, self._state
219
+ )
220
+ result = await self._uow.commit(checkpoint)
221
+ if result.is_err:
222
+ # Commit failed, raise the error
223
+ raise result.error
224
+ self._committed = True
225
+ elif exc_type is not None:
226
+ # Error path: rollback pending events
227
+ self._uow.rollback()
228
+
229
+ # Don't suppress exceptions
230
+ return False
@@ -0,0 +1,28 @@
1
+ """LLM provider adapters for Ouroboros.
2
+
3
+ This module provides unified access to LLM providers through the LLMAdapter
4
+ protocol and LiteLLMAdapter implementation.
5
+ """
6
+
7
+ from ouroboros.providers.base import (
8
+ CompletionConfig,
9
+ CompletionResponse,
10
+ LLMAdapter,
11
+ Message,
12
+ MessageRole,
13
+ UsageInfo,
14
+ )
15
+ from ouroboros.providers.litellm_adapter import LiteLLMAdapter
16
+
17
+ __all__ = [
18
+ # Protocol
19
+ "LLMAdapter",
20
+ # Models
21
+ "Message",
22
+ "MessageRole",
23
+ "CompletionConfig",
24
+ "CompletionResponse",
25
+ "UsageInfo",
26
+ # Implementations
27
+ "LiteLLMAdapter",
28
+ ]
@@ -0,0 +1,133 @@
1
+ """Base protocol and models for LLM provider adapters.
2
+
3
+ This module defines the LLMAdapter protocol and associated data models for
4
+ communicating with LLM providers in a unified way.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from enum import StrEnum
9
+ from typing import Protocol
10
+
11
+ from ouroboros.core.errors import ProviderError
12
+ from ouroboros.core.types import Result
13
+
14
+
15
+ class MessageRole(StrEnum):
16
+ """Role of a message in the conversation."""
17
+
18
+ SYSTEM = "system"
19
+ USER = "user"
20
+ ASSISTANT = "assistant"
21
+
22
+
23
+ @dataclass(frozen=True, slots=True)
24
+ class Message:
25
+ """A single message in a conversation.
26
+
27
+ Attributes:
28
+ role: The role of the message sender.
29
+ content: The text content of the message.
30
+ """
31
+
32
+ role: MessageRole
33
+ content: str
34
+
35
+ def to_dict(self) -> dict[str, str]:
36
+ """Convert message to dict format for LLM API calls.
37
+
38
+ Returns:
39
+ Dictionary with 'role' and 'content' keys.
40
+ """
41
+ return {"role": self.role.value, "content": self.content}
42
+
43
+
44
+ @dataclass(frozen=True, slots=True)
45
+ class CompletionConfig:
46
+ """Configuration for LLM completion requests.
47
+
48
+ Attributes:
49
+ model: The model identifier (e.g., 'openrouter/openai/gpt-4').
50
+ temperature: Sampling temperature (0.0-2.0). Default 0.7.
51
+ max_tokens: Maximum tokens to generate. Default 4096.
52
+ stop: Optional stop sequences.
53
+ top_p: Nucleus sampling parameter. Default 1.0.
54
+ """
55
+
56
+ model: str
57
+ temperature: float = 0.7
58
+ max_tokens: int = 4096
59
+ stop: list[str] | None = None
60
+ top_p: float = 1.0
61
+
62
+
63
+ @dataclass(frozen=True, slots=True)
64
+ class UsageInfo:
65
+ """Token usage information from a completion.
66
+
67
+ Attributes:
68
+ prompt_tokens: Number of tokens in the prompt.
69
+ completion_tokens: Number of tokens in the completion.
70
+ total_tokens: Total tokens used (prompt + completion).
71
+ """
72
+
73
+ prompt_tokens: int
74
+ completion_tokens: int
75
+ total_tokens: int
76
+
77
+
78
+ @dataclass(frozen=True, slots=True)
79
+ class CompletionResponse:
80
+ """Response from an LLM completion request.
81
+
82
+ Attributes:
83
+ content: The generated text content.
84
+ model: The model that generated the response.
85
+ usage: Token usage information.
86
+ finish_reason: Why the generation stopped (e.g., 'stop', 'length').
87
+ raw_response: Optional raw response from the provider for debugging.
88
+ """
89
+
90
+ content: str
91
+ model: str
92
+ usage: UsageInfo
93
+ finish_reason: str = "stop"
94
+ raw_response: dict[str, object] = field(default_factory=dict)
95
+
96
+
97
+ class LLMAdapter(Protocol):
98
+ """Protocol for LLM provider adapters.
99
+
100
+ All LLM adapters must implement this protocol to provide a unified
101
+ interface for making completion requests.
102
+
103
+ Example:
104
+ adapter: LLMAdapter = LiteLLMAdapter(api_key="...")
105
+ result = await adapter.complete(
106
+ messages=[Message(role=MessageRole.USER, content="Hello!")],
107
+ config=CompletionConfig(model="openrouter/openai/gpt-4"),
108
+ )
109
+ if result.is_ok:
110
+ print(result.value.content)
111
+ else:
112
+ log.error("LLM call failed", error=result.error)
113
+ """
114
+
115
+ async def complete(
116
+ self,
117
+ messages: list[Message],
118
+ config: CompletionConfig,
119
+ ) -> Result[CompletionResponse, ProviderError]:
120
+ """Make a completion request to the LLM provider.
121
+
122
+ This method handles retries internally and converts all expected
123
+ failures to Result.err(ProviderError). Exceptions should only
124
+ occur for programming errors (bugs).
125
+
126
+ Args:
127
+ messages: The conversation messages to send.
128
+ config: Configuration for the completion request.
129
+
130
+ Returns:
131
+ Result containing either the completion response or a ProviderError.
132
+ """
133
+ ...