ouroboros-ai 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ouroboros-ai might be problematic. Click here for more details.
- ouroboros/__init__.py +15 -2
- ouroboros/bigbang/ambiguity.py +109 -48
- ouroboros/bigbang/interview.py +9 -4
- ouroboros/cli/commands/run.py +9 -1
- ouroboros/core/__init__.py +11 -0
- ouroboros/core/security.py +327 -0
- ouroboros/observability/logging.py +70 -0
- ouroboros/providers/litellm_adapter.py +15 -1
- {ouroboros_ai-0.1.1.dist-info → ouroboros_ai-0.2.0.dist-info}/METADATA +32 -2
- {ouroboros_ai-0.1.1.dist-info → ouroboros_ai-0.2.0.dist-info}/RECORD +13 -12
- {ouroboros_ai-0.1.1.dist-info → ouroboros_ai-0.2.0.dist-info}/WHEEL +0 -0
- {ouroboros_ai-0.1.1.dist-info → ouroboros_ai-0.2.0.dist-info}/entry_points.txt +0 -0
- {ouroboros_ai-0.1.1.dist-info → ouroboros_ai-0.2.0.dist-info}/licenses/LICENSE +0 -0
ouroboros/__init__.py
CHANGED
|
@@ -1,6 +1,19 @@
|
|
|
1
|
-
"""Ouroboros - Self-Improving AI Workflow System.
|
|
1
|
+
"""Ouroboros - Self-Improving AI Workflow System.
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
A workflow system that uses Socratic questioning and ontological analysis
|
|
4
|
+
to transform ambiguous requirements into executable specifications.
|
|
5
|
+
|
|
6
|
+
Example:
|
|
7
|
+
# Using CLI
|
|
8
|
+
ouroboros init start "I want to build a task management CLI"
|
|
9
|
+
ouroboros run workflow seed.yaml
|
|
10
|
+
|
|
11
|
+
# Using Python
|
|
12
|
+
from ouroboros.core import Result, ValidationError
|
|
13
|
+
from ouroboros.bigbang import InterviewEngine
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
__version__ = "0.2.0"
|
|
4
17
|
|
|
5
18
|
__all__ = ["__version__", "main"]
|
|
6
19
|
|
ouroboros/bigbang/ambiguity.py
CHANGED
|
@@ -36,6 +36,9 @@ DEFAULT_MODEL = "openrouter/google/gemini-2.0-flash-001"
|
|
|
36
36
|
# Temperature for reproducible scoring
|
|
37
37
|
SCORING_TEMPERATURE = 0.1
|
|
38
38
|
|
|
39
|
+
# Maximum token limit to prevent cost explosion
|
|
40
|
+
MAX_TOKEN_LIMIT = 8192
|
|
41
|
+
|
|
39
42
|
|
|
40
43
|
class ComponentScore(BaseModel):
|
|
41
44
|
"""Individual component score with justification.
|
|
@@ -106,6 +109,17 @@ class AmbiguityScorer:
|
|
|
106
109
|
Uses LLM to evaluate clarity of goals, constraints, and success criteria
|
|
107
110
|
from interview conversation, producing reproducible scores.
|
|
108
111
|
|
|
112
|
+
Uses adaptive token allocation: starts with `initial_max_tokens` and
|
|
113
|
+
doubles on truncation up to `MAX_TOKEN_LIMIT`. Retries up to `max_retries`
|
|
114
|
+
times on both provider errors and parse failures.
|
|
115
|
+
|
|
116
|
+
Attributes:
|
|
117
|
+
llm_adapter: The LLM adapter for completions.
|
|
118
|
+
model: Model identifier to use.
|
|
119
|
+
temperature: Temperature for reproducibility (default 0.1).
|
|
120
|
+
initial_max_tokens: Starting token limit (default 2048).
|
|
121
|
+
max_retries: Maximum retry attempts (default 3).
|
|
122
|
+
|
|
109
123
|
Example:
|
|
110
124
|
scorer = AmbiguityScorer(llm_adapter=LiteLLMAdapter())
|
|
111
125
|
|
|
@@ -123,7 +137,8 @@ class AmbiguityScorer:
|
|
|
123
137
|
llm_adapter: LiteLLMAdapter
|
|
124
138
|
model: str = DEFAULT_MODEL
|
|
125
139
|
temperature: float = SCORING_TEMPERATURE
|
|
126
|
-
|
|
140
|
+
initial_max_tokens: int = 2048
|
|
141
|
+
max_retries: int = 3
|
|
127
142
|
|
|
128
143
|
async def score(
|
|
129
144
|
self, state: InterviewState
|
|
@@ -135,6 +150,9 @@ class AmbiguityScorer:
|
|
|
135
150
|
- Constraints (30% weight)
|
|
136
151
|
- Success criteria (30% weight)
|
|
137
152
|
|
|
153
|
+
Uses adaptive token allocation: starts with initial_max_tokens and
|
|
154
|
+
doubles on parse failure, up to max_retries attempts.
|
|
155
|
+
|
|
138
156
|
Args:
|
|
139
157
|
state: The interview state to score.
|
|
140
158
|
|
|
@@ -159,57 +177,98 @@ class AmbiguityScorer:
|
|
|
159
177
|
Message(role=MessageRole.USER, content=user_prompt),
|
|
160
178
|
]
|
|
161
179
|
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
max_tokens=self.max_tokens,
|
|
166
|
-
)
|
|
167
|
-
|
|
168
|
-
result = await self.llm_adapter.complete(messages, config)
|
|
180
|
+
current_max_tokens = self.initial_max_tokens
|
|
181
|
+
last_error: Exception | ProviderError | None = None
|
|
182
|
+
last_response: str = ""
|
|
169
183
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
184
|
+
for attempt in range(self.max_retries):
|
|
185
|
+
config = CompletionConfig(
|
|
186
|
+
model=self.model,
|
|
187
|
+
temperature=self.temperature,
|
|
188
|
+
max_tokens=current_max_tokens,
|
|
175
189
|
)
|
|
176
|
-
return Result.err(result.error)
|
|
177
|
-
|
|
178
|
-
# Parse the LLM response into scores
|
|
179
|
-
try:
|
|
180
|
-
breakdown = self._parse_scoring_response(result.value.content)
|
|
181
|
-
overall_score = self._calculate_overall_score(breakdown)
|
|
182
190
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
191
|
+
result = await self.llm_adapter.complete(messages, config)
|
|
192
|
+
|
|
193
|
+
# Fix #3: Retry on provider errors (rate limits, transient failures)
|
|
194
|
+
if result.is_err:
|
|
195
|
+
last_error = result.error
|
|
196
|
+
log.warning(
|
|
197
|
+
"ambiguity.scoring.provider_error_retrying",
|
|
198
|
+
interview_id=state.interview_id,
|
|
199
|
+
error=str(result.error),
|
|
200
|
+
attempt=attempt + 1,
|
|
201
|
+
max_retries=self.max_retries,
|
|
202
|
+
)
|
|
203
|
+
continue
|
|
187
204
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
overall_score=
|
|
192
|
-
is_ready_for_seed=ambiguity_score.is_ready_for_seed,
|
|
193
|
-
goal_clarity=breakdown.goal_clarity.clarity_score,
|
|
194
|
-
constraint_clarity=breakdown.constraint_clarity.clarity_score,
|
|
195
|
-
success_criteria_clarity=breakdown.success_criteria_clarity.clarity_score,
|
|
196
|
-
)
|
|
205
|
+
# Parse the LLM response into scores
|
|
206
|
+
try:
|
|
207
|
+
breakdown = self._parse_scoring_response(result.value.content)
|
|
208
|
+
overall_score = self._calculate_overall_score(breakdown)
|
|
197
209
|
|
|
198
|
-
|
|
210
|
+
ambiguity_score = AmbiguityScore(
|
|
211
|
+
overall_score=overall_score,
|
|
212
|
+
breakdown=breakdown,
|
|
213
|
+
)
|
|
199
214
|
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
details={"response_preview": result.value.content[:200]},
|
|
215
|
+
log.info(
|
|
216
|
+
"ambiguity.scoring.completed",
|
|
217
|
+
interview_id=state.interview_id,
|
|
218
|
+
overall_score=overall_score,
|
|
219
|
+
is_ready_for_seed=ambiguity_score.is_ready_for_seed,
|
|
220
|
+
goal_clarity=breakdown.goal_clarity.clarity_score,
|
|
221
|
+
constraint_clarity=breakdown.constraint_clarity.clarity_score,
|
|
222
|
+
success_criteria_clarity=breakdown.success_criteria_clarity.clarity_score,
|
|
223
|
+
tokens_used=current_max_tokens,
|
|
224
|
+
attempt=attempt + 1,
|
|
211
225
|
)
|
|
226
|
+
|
|
227
|
+
return Result.ok(ambiguity_score)
|
|
228
|
+
|
|
229
|
+
except (ValueError, KeyError) as e:
|
|
230
|
+
last_error = e
|
|
231
|
+
last_response = result.value.content
|
|
232
|
+
|
|
233
|
+
# Fix #2: Only increase tokens if response was truncated
|
|
234
|
+
is_truncated = result.value.finish_reason == "length"
|
|
235
|
+
|
|
236
|
+
if is_truncated:
|
|
237
|
+
# Fix #1: Cap token growth with MAX_TOKEN_LIMIT
|
|
238
|
+
next_tokens = min(current_max_tokens * 2, MAX_TOKEN_LIMIT)
|
|
239
|
+
log.warning(
|
|
240
|
+
"ambiguity.scoring.truncated_retrying",
|
|
241
|
+
interview_id=state.interview_id,
|
|
242
|
+
error=str(e),
|
|
243
|
+
attempt=attempt + 1,
|
|
244
|
+
current_tokens=current_max_tokens,
|
|
245
|
+
next_tokens=next_tokens,
|
|
246
|
+
)
|
|
247
|
+
current_max_tokens = next_tokens
|
|
248
|
+
else:
|
|
249
|
+
# Format error without truncation - retry with same tokens
|
|
250
|
+
log.warning(
|
|
251
|
+
"ambiguity.scoring.format_error_retrying",
|
|
252
|
+
interview_id=state.interview_id,
|
|
253
|
+
error=str(e),
|
|
254
|
+
attempt=attempt + 1,
|
|
255
|
+
finish_reason=result.value.finish_reason,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# All retries exhausted
|
|
259
|
+
log.warning(
|
|
260
|
+
"ambiguity.scoring.failed",
|
|
261
|
+
interview_id=state.interview_id,
|
|
262
|
+
error=str(last_error),
|
|
263
|
+
response=last_response[:500] if last_response else None,
|
|
264
|
+
max_retries_exhausted=True,
|
|
265
|
+
)
|
|
266
|
+
return Result.err(
|
|
267
|
+
ProviderError(
|
|
268
|
+
f"Failed to parse scoring response after {self.max_retries} attempts: {last_error}",
|
|
269
|
+
details={"response_preview": last_response[:200] if last_response else None},
|
|
212
270
|
)
|
|
271
|
+
)
|
|
213
272
|
|
|
214
273
|
def _build_interview_context(self, state: InterviewState) -> str:
|
|
215
274
|
"""Build context string from interview state.
|
|
@@ -254,15 +313,17 @@ Evaluate three components:
|
|
|
254
313
|
|
|
255
314
|
For each component, provide:
|
|
256
315
|
- A clarity score between 0.0 (completely unclear) and 1.0 (perfectly clear)
|
|
257
|
-
- A brief justification explaining the score
|
|
316
|
+
- A brief justification (1-2 sentences max) explaining the score
|
|
317
|
+
|
|
318
|
+
IMPORTANT: You MUST provide ALL six fields below. Keep justifications concise.
|
|
258
319
|
|
|
259
320
|
Respond in this exact format:
|
|
260
321
|
GOAL_CLARITY_SCORE: <score>
|
|
261
|
-
GOAL_CLARITY_JUSTIFICATION: <justification>
|
|
322
|
+
GOAL_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
|
|
262
323
|
CONSTRAINT_CLARITY_SCORE: <score>
|
|
263
|
-
CONSTRAINT_CLARITY_JUSTIFICATION: <justification>
|
|
324
|
+
CONSTRAINT_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
|
|
264
325
|
SUCCESS_CRITERIA_CLARITY_SCORE: <score>
|
|
265
|
-
SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: <justification>
|
|
326
|
+
SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
|
|
266
327
|
|
|
267
328
|
Be strict in your evaluation. Scores above 0.8 require very specific, measurable requirements."""
|
|
268
329
|
|
ouroboros/bigbang/interview.py
CHANGED
|
@@ -17,6 +17,7 @@ from pydantic import BaseModel, Field
|
|
|
17
17
|
import structlog
|
|
18
18
|
|
|
19
19
|
from ouroboros.core.errors import ProviderError, ValidationError
|
|
20
|
+
from ouroboros.core.security import InputValidator
|
|
20
21
|
from ouroboros.core.types import Result
|
|
21
22
|
from ouroboros.providers.base import (
|
|
22
23
|
CompletionConfig,
|
|
@@ -186,9 +187,11 @@ class InterviewEngine:
|
|
|
186
187
|
Returns:
|
|
187
188
|
Result containing the new InterviewState or ValidationError.
|
|
188
189
|
"""
|
|
189
|
-
|
|
190
|
+
# Validate initial context with security limits
|
|
191
|
+
is_valid, error_msg = InputValidator.validate_initial_context(initial_context)
|
|
192
|
+
if not is_valid:
|
|
190
193
|
return Result.err(
|
|
191
|
-
ValidationError(
|
|
194
|
+
ValidationError(error_msg, field="initial_context")
|
|
192
195
|
)
|
|
193
196
|
|
|
194
197
|
if interview_id is None:
|
|
@@ -285,9 +288,11 @@ class InterviewEngine:
|
|
|
285
288
|
Returns:
|
|
286
289
|
Result containing updated state or ValidationError.
|
|
287
290
|
"""
|
|
288
|
-
|
|
291
|
+
# Validate user response with security limits
|
|
292
|
+
is_valid, error_msg = InputValidator.validate_user_response(user_response)
|
|
293
|
+
if not is_valid:
|
|
289
294
|
return Result.err(
|
|
290
|
-
ValidationError(
|
|
295
|
+
ValidationError(error_msg, field="user_response")
|
|
291
296
|
)
|
|
292
297
|
|
|
293
298
|
if state.is_complete:
|
ouroboros/cli/commands/run.py
CHANGED
|
@@ -15,6 +15,7 @@ import yaml
|
|
|
15
15
|
|
|
16
16
|
from ouroboros.cli.formatters import console
|
|
17
17
|
from ouroboros.cli.formatters.panels import print_error, print_info, print_success
|
|
18
|
+
from ouroboros.core.security import InputValidator
|
|
18
19
|
|
|
19
20
|
app = typer.Typer(
|
|
20
21
|
name="run",
|
|
@@ -33,8 +34,15 @@ def _load_seed_from_yaml(seed_file: Path) -> dict:
|
|
|
33
34
|
Seed configuration dictionary.
|
|
34
35
|
|
|
35
36
|
Raises:
|
|
36
|
-
typer.Exit: If file cannot be loaded.
|
|
37
|
+
typer.Exit: If file cannot be loaded or exceeds size limit.
|
|
37
38
|
"""
|
|
39
|
+
# Security: Validate file size to prevent DoS
|
|
40
|
+
file_size = seed_file.stat().st_size
|
|
41
|
+
is_valid, error_msg = InputValidator.validate_seed_file_size(file_size)
|
|
42
|
+
if not is_valid:
|
|
43
|
+
print_error(f"Seed file validation failed: {error_msg}")
|
|
44
|
+
raise typer.Exit(1)
|
|
45
|
+
|
|
38
46
|
try:
|
|
39
47
|
with open(seed_file) as f:
|
|
40
48
|
return yaml.safe_load(f)
|
ouroboros/core/__init__.py
CHANGED
|
@@ -27,6 +27,12 @@ from ouroboros.core.seed import (
|
|
|
27
27
|
Seed,
|
|
28
28
|
SeedMetadata,
|
|
29
29
|
)
|
|
30
|
+
from ouroboros.core.security import (
|
|
31
|
+
InputValidator,
|
|
32
|
+
mask_api_key,
|
|
33
|
+
sanitize_for_logging,
|
|
34
|
+
validate_api_key_format,
|
|
35
|
+
)
|
|
30
36
|
from ouroboros.core.types import CostUnits, DriftScore, EventPayload, Result
|
|
31
37
|
|
|
32
38
|
__all__ = [
|
|
@@ -59,4 +65,9 @@ __all__ = [
|
|
|
59
65
|
"compress_context",
|
|
60
66
|
"compress_context_with_llm",
|
|
61
67
|
"create_filtered_context",
|
|
68
|
+
# Security utilities
|
|
69
|
+
"InputValidator",
|
|
70
|
+
"mask_api_key",
|
|
71
|
+
"validate_api_key_format",
|
|
72
|
+
"sanitize_for_logging",
|
|
62
73
|
]
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Security utilities for Ouroboros.
|
|
2
|
+
|
|
3
|
+
This module provides security-related utilities including:
|
|
4
|
+
- API key validation and masking
|
|
5
|
+
- Input sanitization
|
|
6
|
+
- Size limits for external inputs
|
|
7
|
+
|
|
8
|
+
Security Level: MEDIUM
|
|
9
|
+
- API keys are masked in logs and error messages
|
|
10
|
+
- Basic format validation for API keys
|
|
11
|
+
- Size limits to prevent DoS attacks
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import re
|
|
15
|
+
from typing import Any
|
|
16
|
+
|
|
17
|
+
# Maximum sizes for external inputs (DoS prevention)
|
|
18
|
+
MAX_INITIAL_CONTEXT_LENGTH = 50_000 # 50KB for initial interview context
|
|
19
|
+
MAX_USER_RESPONSE_LENGTH = 10_000 # 10KB for interview responses
|
|
20
|
+
MAX_SEED_FILE_SIZE = 1_000_000 # 1MB for seed YAML files
|
|
21
|
+
MAX_LLM_RESPONSE_LENGTH = 100_000 # 100KB for LLM responses
|
|
22
|
+
|
|
23
|
+
# API key patterns for validation (not exhaustive, basic format check)
|
|
24
|
+
_API_KEY_PATTERNS: dict[str, re.Pattern[str]] = {
|
|
25
|
+
"openai": re.compile(r"^sk-[a-zA-Z0-9_-]{20,}$"),
|
|
26
|
+
"anthropic": re.compile(r"^sk-ant-[a-zA-Z0-9_-]{20,}$"),
|
|
27
|
+
"openrouter": re.compile(r"^sk-or-[a-zA-Z0-9_-]{20,}$"),
|
|
28
|
+
"google": re.compile(r"^AIza[a-zA-Z0-9_-]{35}$"),
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
# Sensitive field names that should be masked
|
|
32
|
+
SENSITIVE_FIELD_NAMES = frozenset(
|
|
33
|
+
{
|
|
34
|
+
"password",
|
|
35
|
+
"api_key",
|
|
36
|
+
"apikey",
|
|
37
|
+
"api-key",
|
|
38
|
+
"secret",
|
|
39
|
+
"token",
|
|
40
|
+
"credential",
|
|
41
|
+
"auth",
|
|
42
|
+
"key",
|
|
43
|
+
"private",
|
|
44
|
+
"bearer",
|
|
45
|
+
"authorization",
|
|
46
|
+
}
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Sensitive value prefixes that indicate secrets
|
|
50
|
+
SENSITIVE_PREFIXES = (
|
|
51
|
+
"sk-",
|
|
52
|
+
"pk-",
|
|
53
|
+
"api-",
|
|
54
|
+
"bearer ",
|
|
55
|
+
"token ",
|
|
56
|
+
"secret_",
|
|
57
|
+
"AIza",
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def mask_api_key(api_key: str, visible_chars: int = 4) -> str:
|
|
62
|
+
"""Mask an API key for safe logging/display.
|
|
63
|
+
|
|
64
|
+
Shows only the last few characters to help identify which key is being used.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
api_key: The API key to mask.
|
|
68
|
+
visible_chars: Number of characters to show at the end (default 4).
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Masked API key like "sk-...xxxx" or "<empty>" if key is empty.
|
|
72
|
+
|
|
73
|
+
Example:
|
|
74
|
+
>>> mask_api_key("sk-1234567890abcdef")
|
|
75
|
+
'sk-...cdef'
|
|
76
|
+
"""
|
|
77
|
+
if not api_key:
|
|
78
|
+
return "<empty>"
|
|
79
|
+
|
|
80
|
+
if len(api_key) <= visible_chars + 4:
|
|
81
|
+
# Key is too short to meaningfully mask
|
|
82
|
+
return "*" * len(api_key)
|
|
83
|
+
|
|
84
|
+
# Show prefix (like "sk-") and last few chars
|
|
85
|
+
if "-" in api_key[:6]:
|
|
86
|
+
prefix_end = api_key.index("-") + 1
|
|
87
|
+
prefix = api_key[:prefix_end]
|
|
88
|
+
return f"{prefix}...{api_key[-visible_chars:]}"
|
|
89
|
+
|
|
90
|
+
return f"...{api_key[-visible_chars:]}"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def validate_api_key_format(api_key: str, provider: str | None = None) -> bool:
|
|
94
|
+
"""Validate API key format (basic check, not authorization).
|
|
95
|
+
|
|
96
|
+
This performs a basic format validation. It does NOT verify that the key
|
|
97
|
+
is actually valid or authorized - that requires an API call.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
api_key: The API key to validate.
|
|
101
|
+
provider: Optional provider name for specific validation.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
True if the key has a valid format.
|
|
105
|
+
|
|
106
|
+
Note:
|
|
107
|
+
This is a security convenience check, not a comprehensive validation.
|
|
108
|
+
Keys may be properly formatted but still invalid/expired.
|
|
109
|
+
"""
|
|
110
|
+
if not api_key or len(api_key) < 10:
|
|
111
|
+
return False
|
|
112
|
+
|
|
113
|
+
# If provider specified, use specific pattern
|
|
114
|
+
if provider and provider.lower() in _API_KEY_PATTERNS:
|
|
115
|
+
pattern = _API_KEY_PATTERNS[provider.lower()]
|
|
116
|
+
return bool(pattern.match(api_key))
|
|
117
|
+
|
|
118
|
+
# Generic validation: must look like an API key
|
|
119
|
+
# Should have letters, numbers, possibly dashes/underscores
|
|
120
|
+
if not re.match(r"^[a-zA-Z0-9_-]{10,}$", api_key):
|
|
121
|
+
# Check if it's a prefixed key
|
|
122
|
+
return any(pattern.match(api_key) for pattern in _API_KEY_PATTERNS.values())
|
|
123
|
+
|
|
124
|
+
return True
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def is_sensitive_field(field_name: str) -> bool:
|
|
128
|
+
"""Check if a field name indicates sensitive data.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
field_name: The field name to check.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
True if the field likely contains sensitive data.
|
|
135
|
+
"""
|
|
136
|
+
if not field_name:
|
|
137
|
+
return False
|
|
138
|
+
|
|
139
|
+
field_lower = field_name.lower()
|
|
140
|
+
return any(sensitive in field_lower for sensitive in SENSITIVE_FIELD_NAMES)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def is_sensitive_value(value: Any) -> bool:
|
|
144
|
+
"""Check if a value looks like sensitive data.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
value: The value to check.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
True if the value appears to be sensitive (API key, token, etc).
|
|
151
|
+
"""
|
|
152
|
+
if not isinstance(value, str):
|
|
153
|
+
return False
|
|
154
|
+
|
|
155
|
+
value_lower = value.lower()
|
|
156
|
+
return any(value_lower.startswith(prefix.lower()) for prefix in SENSITIVE_PREFIXES)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def mask_sensitive_value(value: Any, field_name: str | None = None) -> str:
|
|
160
|
+
"""Mask a potentially sensitive value for safe logging.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
value: The value to potentially mask.
|
|
164
|
+
field_name: Optional field name for context.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
Masked string if sensitive, otherwise string representation.
|
|
168
|
+
"""
|
|
169
|
+
if value is None:
|
|
170
|
+
return "<None>"
|
|
171
|
+
|
|
172
|
+
# Check if field name indicates sensitivity
|
|
173
|
+
if field_name and is_sensitive_field(field_name):
|
|
174
|
+
return "<REDACTED>"
|
|
175
|
+
|
|
176
|
+
# Check if value looks sensitive
|
|
177
|
+
if isinstance(value, str):
|
|
178
|
+
if is_sensitive_value(value):
|
|
179
|
+
return mask_api_key(value)
|
|
180
|
+
|
|
181
|
+
# Truncate long strings
|
|
182
|
+
if len(value) > 100:
|
|
183
|
+
return f"{value[:50]}...({len(value)} chars)"
|
|
184
|
+
|
|
185
|
+
return value
|
|
186
|
+
|
|
187
|
+
# For other types, show type info
|
|
188
|
+
if isinstance(value, (dict, list)):
|
|
189
|
+
return f"<{type(value).__name__} with {len(value)} items>"
|
|
190
|
+
|
|
191
|
+
return str(value)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def sanitize_for_logging(data: dict[str, Any]) -> dict[str, Any]:
|
|
195
|
+
"""Create a copy of data with sensitive values masked.
|
|
196
|
+
|
|
197
|
+
Use this before logging dictionaries that might contain sensitive data.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
data: Dictionary that might contain sensitive data.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
New dictionary with sensitive values masked.
|
|
204
|
+
|
|
205
|
+
Example:
|
|
206
|
+
>>> sanitize_for_logging({"api_key": "sk-secret123", "name": "test"})
|
|
207
|
+
{'api_key': '<REDACTED>', 'name': 'test'}
|
|
208
|
+
"""
|
|
209
|
+
result = {}
|
|
210
|
+
for key, value in data.items():
|
|
211
|
+
if is_sensitive_field(key):
|
|
212
|
+
result[key] = "<REDACTED>"
|
|
213
|
+
elif isinstance(value, str) and is_sensitive_value(value):
|
|
214
|
+
result[key] = mask_api_key(value)
|
|
215
|
+
elif isinstance(value, dict):
|
|
216
|
+
result[key] = sanitize_for_logging(value)
|
|
217
|
+
else:
|
|
218
|
+
result[key] = value
|
|
219
|
+
return result
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def truncate_input(text: str, max_length: int, suffix: str = "...") -> str:
|
|
223
|
+
"""Truncate text to maximum length with suffix.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
text: Text to truncate.
|
|
227
|
+
max_length: Maximum length including suffix.
|
|
228
|
+
suffix: Suffix to add if truncated (default "...").
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Truncated text or original if within limit.
|
|
232
|
+
"""
|
|
233
|
+
if len(text) <= max_length:
|
|
234
|
+
return text
|
|
235
|
+
|
|
236
|
+
return text[: max_length - len(suffix)] + suffix
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
class InputValidator:
|
|
240
|
+
"""Validator for external inputs with size limits.
|
|
241
|
+
|
|
242
|
+
Provides validation methods for different types of external inputs
|
|
243
|
+
to prevent DoS attacks and ensure data quality.
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
@staticmethod
|
|
247
|
+
def validate_initial_context(context: str) -> tuple[bool, str]:
|
|
248
|
+
"""Validate initial interview context.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
context: The initial context string.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Tuple of (is_valid, error_message). error_message is empty if valid.
|
|
255
|
+
"""
|
|
256
|
+
if not context:
|
|
257
|
+
return False, "Initial context cannot be empty"
|
|
258
|
+
|
|
259
|
+
stripped = context.strip()
|
|
260
|
+
if not stripped:
|
|
261
|
+
return False, "Initial context cannot be only whitespace"
|
|
262
|
+
|
|
263
|
+
if len(stripped) > MAX_INITIAL_CONTEXT_LENGTH:
|
|
264
|
+
return (
|
|
265
|
+
False,
|
|
266
|
+
f"Initial context exceeds maximum length ({MAX_INITIAL_CONTEXT_LENGTH} chars)",
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
return True, ""
|
|
270
|
+
|
|
271
|
+
@staticmethod
|
|
272
|
+
def validate_user_response(response: str) -> tuple[bool, str]:
|
|
273
|
+
"""Validate user response in interview.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
response: The user's response string.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
Tuple of (is_valid, error_message). error_message is empty if valid.
|
|
280
|
+
"""
|
|
281
|
+
if not response:
|
|
282
|
+
return False, "Response cannot be empty"
|
|
283
|
+
|
|
284
|
+
stripped = response.strip()
|
|
285
|
+
if not stripped:
|
|
286
|
+
return False, "Response cannot be only whitespace"
|
|
287
|
+
|
|
288
|
+
if len(stripped) > MAX_USER_RESPONSE_LENGTH:
|
|
289
|
+
return False, f"Response exceeds maximum length ({MAX_USER_RESPONSE_LENGTH} chars)"
|
|
290
|
+
|
|
291
|
+
return True, ""
|
|
292
|
+
|
|
293
|
+
@staticmethod
|
|
294
|
+
def validate_seed_file_size(file_size: int) -> tuple[bool, str]:
|
|
295
|
+
"""Validate seed file size.
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
file_size: Size of the seed file in bytes.
|
|
299
|
+
|
|
300
|
+
Returns:
|
|
301
|
+
Tuple of (is_valid, error_message). error_message is empty if valid.
|
|
302
|
+
"""
|
|
303
|
+
if file_size <= 0:
|
|
304
|
+
return False, "Seed file is empty"
|
|
305
|
+
|
|
306
|
+
if file_size > MAX_SEED_FILE_SIZE:
|
|
307
|
+
return False, f"Seed file exceeds maximum size ({MAX_SEED_FILE_SIZE // 1024}KB)"
|
|
308
|
+
|
|
309
|
+
return True, ""
|
|
310
|
+
|
|
311
|
+
@staticmethod
|
|
312
|
+
def validate_llm_response(response: str) -> tuple[bool, str]:
|
|
313
|
+
"""Validate LLM response length.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
response: The LLM response content.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Tuple of (is_valid, error_message). error_message is empty if valid.
|
|
320
|
+
"""
|
|
321
|
+
if not response:
|
|
322
|
+
return True, "" # Empty response is valid (model may return empty)
|
|
323
|
+
|
|
324
|
+
if len(response) > MAX_LLM_RESPONSE_LENGTH:
|
|
325
|
+
return False, f"LLM response exceeds maximum length ({MAX_LLM_RESPONSE_LENGTH} chars)"
|
|
326
|
+
|
|
327
|
+
return True, ""
|
|
@@ -50,6 +50,12 @@ from typing import Any
|
|
|
50
50
|
from pydantic import BaseModel, Field
|
|
51
51
|
import structlog
|
|
52
52
|
|
|
53
|
+
from ouroboros.core.security import (
|
|
54
|
+
is_sensitive_field,
|
|
55
|
+
is_sensitive_value,
|
|
56
|
+
mask_api_key,
|
|
57
|
+
)
|
|
58
|
+
|
|
53
59
|
|
|
54
60
|
class LogMode(str, Enum):
|
|
55
61
|
"""Logging output mode."""
|
|
@@ -159,6 +165,68 @@ def _setup_file_handler(config: LoggingConfig) -> TimedRotatingFileHandler | Non
|
|
|
159
165
|
return handler
|
|
160
166
|
|
|
161
167
|
|
|
168
|
+
def _mask_sensitive_data(
|
|
169
|
+
_logger: Any,
|
|
170
|
+
_method_name: str,
|
|
171
|
+
event_dict: dict[str, Any],
|
|
172
|
+
) -> dict[str, Any]:
|
|
173
|
+
"""Structlog processor that masks sensitive data in log entries.
|
|
174
|
+
|
|
175
|
+
Automatically detects and masks API keys, tokens, and other sensitive
|
|
176
|
+
values to prevent accidental exposure in logs.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
_logger: The logger instance (unused).
|
|
180
|
+
_method_name: The log method name (unused).
|
|
181
|
+
event_dict: The event dictionary to process.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Event dictionary with sensitive values masked.
|
|
185
|
+
"""
|
|
186
|
+
for key, value in list(event_dict.items()):
|
|
187
|
+
# Skip standard structlog keys
|
|
188
|
+
if key in ("event", "level", "timestamp", "filename", "lineno"):
|
|
189
|
+
continue
|
|
190
|
+
|
|
191
|
+
# Check if field name indicates sensitivity
|
|
192
|
+
if is_sensitive_field(key):
|
|
193
|
+
event_dict[key] = "<REDACTED>"
|
|
194
|
+
continue
|
|
195
|
+
|
|
196
|
+
# Check if value looks sensitive
|
|
197
|
+
if isinstance(value, str) and is_sensitive_value(value):
|
|
198
|
+
event_dict[key] = mask_api_key(value)
|
|
199
|
+
continue
|
|
200
|
+
|
|
201
|
+
# Recursively handle nested dicts
|
|
202
|
+
if isinstance(value, dict):
|
|
203
|
+
event_dict[key] = _mask_dict_sensitive_data(value)
|
|
204
|
+
|
|
205
|
+
return event_dict
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _mask_dict_sensitive_data(data: dict[str, Any]) -> dict[str, Any]:
|
|
209
|
+
"""Recursively mask sensitive data in a dictionary.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
data: Dictionary to process.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Dictionary with sensitive values masked.
|
|
216
|
+
"""
|
|
217
|
+
result = {}
|
|
218
|
+
for key, value in data.items():
|
|
219
|
+
if is_sensitive_field(key):
|
|
220
|
+
result[key] = "<REDACTED>"
|
|
221
|
+
elif isinstance(value, str) and is_sensitive_value(value):
|
|
222
|
+
result[key] = mask_api_key(value)
|
|
223
|
+
elif isinstance(value, dict):
|
|
224
|
+
result[key] = _mask_dict_sensitive_data(value)
|
|
225
|
+
else:
|
|
226
|
+
result[key] = value
|
|
227
|
+
return result
|
|
228
|
+
|
|
229
|
+
|
|
162
230
|
def _get_shared_processors() -> list[Any]:
|
|
163
231
|
"""Get the shared processor chain for structlog.
|
|
164
232
|
|
|
@@ -170,6 +238,8 @@ def _get_shared_processors() -> list[Any]:
|
|
|
170
238
|
return [
|
|
171
239
|
# Merge contextvars into event dict (for cross-async context)
|
|
172
240
|
structlog.contextvars.merge_contextvars,
|
|
241
|
+
# Mask sensitive data (API keys, tokens, etc.) - SECURITY
|
|
242
|
+
_mask_sensitive_data,
|
|
173
243
|
# Add log level to all entries
|
|
174
244
|
structlog.processors.add_log_level,
|
|
175
245
|
# Add timestamp in ISO 8601 format
|
|
@@ -12,6 +12,7 @@ import stamina
|
|
|
12
12
|
import structlog
|
|
13
13
|
|
|
14
14
|
from ouroboros.core.errors import ProviderError
|
|
15
|
+
from ouroboros.core.security import InputValidator, MAX_LLM_RESPONSE_LENGTH
|
|
15
16
|
from ouroboros.core.types import Result
|
|
16
17
|
from ouroboros.providers.base import (
|
|
17
18
|
CompletionConfig,
|
|
@@ -193,9 +194,22 @@ class LiteLLMAdapter:
|
|
|
193
194
|
"""
|
|
194
195
|
choice = response.choices[0]
|
|
195
196
|
usage = response.usage
|
|
197
|
+
content = choice.message.content or ""
|
|
198
|
+
|
|
199
|
+
# Security: Validate LLM response length to prevent DoS
|
|
200
|
+
is_valid, error_msg = InputValidator.validate_llm_response(content)
|
|
201
|
+
if not is_valid:
|
|
202
|
+
log.warning(
|
|
203
|
+
"llm.response.truncated",
|
|
204
|
+
model=config.model,
|
|
205
|
+
original_length=len(content),
|
|
206
|
+
max_length=MAX_LLM_RESPONSE_LENGTH,
|
|
207
|
+
)
|
|
208
|
+
# Truncate oversized responses instead of failing
|
|
209
|
+
content = content[:MAX_LLM_RESPONSE_LENGTH]
|
|
196
210
|
|
|
197
211
|
return CompletionResponse(
|
|
198
|
-
content=
|
|
212
|
+
content=content,
|
|
199
213
|
model=response.model or config.model,
|
|
200
214
|
usage=UsageInfo(
|
|
201
215
|
prompt_tokens=usage.prompt_tokens if usage else 0,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ouroboros-ai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.0
|
|
4
4
|
Summary: Self-Improving AI Workflow System
|
|
5
5
|
Author-email: Q00 <jqyu.lee@gmail.com>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -52,7 +52,7 @@ Description-Content-Type: text/markdown
|
|
|
52
52
|
<br/>
|
|
53
53
|
|
|
54
54
|
<p align="center">
|
|
55
|
-
<code>
|
|
55
|
+
<code>74 modules</code> · <code>1,341 tests</code> · <code>97%+ coverage</code>
|
|
56
56
|
</p>
|
|
57
57
|
|
|
58
58
|
<br/>
|
|
@@ -506,6 +506,36 @@ uv run ouroboros status health
|
|
|
506
506
|
|
|
507
507
|
<br/>
|
|
508
508
|
|
|
509
|
+
## ◈ Security
|
|
510
|
+
|
|
511
|
+
<br/>
|
|
512
|
+
|
|
513
|
+
Ouroboros includes built-in security features:
|
|
514
|
+
|
|
515
|
+
| Feature | Description |
|
|
516
|
+
|---------|-------------|
|
|
517
|
+
| **API Key Masking** | Keys are automatically masked in logs (`sk-...xxxx`) |
|
|
518
|
+
| **Log Sanitization** | Sensitive fields (password, token, secret) are redacted |
|
|
519
|
+
| **Input Validation** | Size limits prevent DoS attacks (50KB context, 1MB seed files) |
|
|
520
|
+
| **Credentials Protection** | `credentials.yaml` uses chmod 600 permissions |
|
|
521
|
+
|
|
522
|
+
```python
|
|
523
|
+
from ouroboros.core import mask_api_key, sanitize_for_logging
|
|
524
|
+
|
|
525
|
+
# Mask API keys for display
|
|
526
|
+
masked = mask_api_key("sk-1234567890abcdef") # "sk-...cdef"
|
|
527
|
+
|
|
528
|
+
# Sanitize dicts before logging
|
|
529
|
+
safe_data = sanitize_for_logging({"api_key": "sk-secret", "name": "test"})
|
|
530
|
+
# {"api_key": "<REDACTED>", "name": "test"}
|
|
531
|
+
```
|
|
532
|
+
|
|
533
|
+
<br/>
|
|
534
|
+
|
|
535
|
+
---
|
|
536
|
+
|
|
537
|
+
<br/>
|
|
538
|
+
|
|
509
539
|
## ◈ Development
|
|
510
540
|
|
|
511
541
|
<br/>
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
ouroboros/__init__.py,sha256=
|
|
1
|
+
ouroboros/__init__.py,sha256=lmQgHmNOWxGlmwayNvp1ckCuJycL8WzX5Y-7IzrFaVM,701
|
|
2
2
|
ouroboros/__main__.py,sha256=f_qnL0zPJwh9kfQqynX5adpqzj8ilj94zW5Q2loqGxE,168
|
|
3
3
|
ouroboros/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
ouroboros/bigbang/__init__.py,sha256=9xGqOYwMKBifb7QVwonc_wndNLMZb7ZH7xgMHaz_70A,951
|
|
5
|
-
ouroboros/bigbang/ambiguity.py,sha256=
|
|
6
|
-
ouroboros/bigbang/interview.py,sha256=
|
|
5
|
+
ouroboros/bigbang/ambiguity.py,sha256=hm-6LeuD_j14uzgZ2wnbBYq4Q24J7kEk4ag0DO0JtAU,18516
|
|
6
|
+
ouroboros/bigbang/interview.py,sha256=zm1VrDNqE8ouGG62h8qnNkIpnUf3HHv4NjzMKDIaWcY,17147
|
|
7
7
|
ouroboros/bigbang/seed_generator.py,sha256=7MY9a7Eua_zVGDWIVDlzOZJjeAwz0DRatXJg0PvMgiY,20082
|
|
8
8
|
ouroboros/cli/__init__.py,sha256=CRpxsqJadZL7bCS-yrULWC51tqPKfPsxQLgt0JiwP4g,225
|
|
9
9
|
ouroboros/cli/main.py,sha256=ldvqtVpw2xZwE8G7M34qY_7qg0RuNiydjdmmU-hdJvM,1485
|
|
10
10
|
ouroboros/cli/commands/__init__.py,sha256=LZpEvU80R4Cq0LwgkwOluEGNsmmJ9K7roeDQ6bsbbDc,193
|
|
11
11
|
ouroboros/cli/commands/config.py,sha256=kcqi0Wo09oo1MMyZIX4k2IDICV1SAX6HzAXZaIJGdKY,2100
|
|
12
12
|
ouroboros/cli/commands/init.py,sha256=HmXwTLyso6p8Df5aAguxh-XTIYZGkzGltGXqJvDxI78,13536
|
|
13
|
-
ouroboros/cli/commands/run.py,sha256=
|
|
13
|
+
ouroboros/cli/commands/run.py,sha256=DnxfbSdATDIaNYJXLcwAcR9NqNVGkVlHgYJImaSVn4I,6328
|
|
14
14
|
ouroboros/cli/commands/status.py,sha256=Bnqpj1UkqhpBPYA11DV-Z63Bz8pjrebhlzeMKwz3_Ps,2217
|
|
15
15
|
ouroboros/cli/formatters/__init__.py,sha256=-Ik7KXajaIExBxSAp5iYp8gO9SfXudGjyDe2nm2_msw,691
|
|
16
16
|
ouroboros/cli/formatters/panels.py,sha256=d5TANIZy6FEEdpfnZaZ0epe-qIHJbh13qTCt23ur1jA,3388
|
|
@@ -19,10 +19,11 @@ ouroboros/cli/formatters/tables.py,sha256=XDzeew8d7_b-cQ54QH16fljR-lmwwo94-9Gbpr
|
|
|
19
19
|
ouroboros/config/__init__.py,sha256=rQv4ph9qv1jP6YmIOOFBM-pjDR5br9RcW693mr0Hj_U,2006
|
|
20
20
|
ouroboros/config/loader.py,sha256=yqHdrQs3bHbpp49jbjctRDx-zfFNI2rLco8JX44Awr0,8907
|
|
21
21
|
ouroboros/config/models.py,sha256=d12m7-pCTQQASFfBTShIRS0zStn1gpzRWYe68Aky9T0,11740
|
|
22
|
-
ouroboros/core/__init__.py,sha256=
|
|
22
|
+
ouroboros/core/__init__.py,sha256=VHGSB01i56Rncbx-vfKqpvZ1oXDJUu0xp0kQ6tuwRqw,1622
|
|
23
23
|
ouroboros/core/ac_tree.py,sha256=GNyeWB3GVrQhYI83_g2ISYoviKnUf-U6vTY9p6xkklM,11949
|
|
24
24
|
ouroboros/core/context.py,sha256=A5WVPgsJlK-CDnDJx-_Tcfh_lE2AE3EYud45NKnYI2E,15675
|
|
25
25
|
ouroboros/core/errors.py,sha256=e4kiduueE3e2HvNyOJLnFRGFoue2vfW8JTerzxjp5TM,8057
|
|
26
|
+
ouroboros/core/security.py,sha256=LJq5FJzWdUIcjZGujI9xv1k3sFaD3XArBOlmArW-brg,9594
|
|
26
27
|
ouroboros/core/seed.py,sha256=OIO4p1evYqpIrt841LVDVMaBZq9RCkM-e78MOb94BS8,7114
|
|
27
28
|
ouroboros/core/types.py,sha256=SIc7XSIRizkeQU0kq4U02mZFsLtqVLmAo3ANypyUUfQ,6137
|
|
28
29
|
ouroboros/evaluation/__init__.py,sha256=rwNeCtbFvDmq2Ad3YXj2n1tz2i9fESHQKwgjIyCZtCs,3067
|
|
@@ -43,7 +44,7 @@ ouroboros/execution/double_diamond.py,sha256=lbk9cY3Awd0h_YFp1G5OJnDpkV8htanSDhQ
|
|
|
43
44
|
ouroboros/execution/subagent.py,sha256=_0-Ayz1p4r-cJP6kAYQP-bf9g2yLKXV81wffurBK9YM,8727
|
|
44
45
|
ouroboros/observability/__init__.py,sha256=jgLIxPgBPJgSLCUjxR28tO3gkOuknbnb0H87NwkCl6Q,1654
|
|
45
46
|
ouroboros/observability/drift.py,sha256=1BxZq-XIfhOJpTiBzbqgMpxziiJsb9KcLg_F5QKBIeM,11361
|
|
46
|
-
ouroboros/observability/logging.py,sha256=
|
|
47
|
+
ouroboros/observability/logging.py,sha256=MC_VzyAyJtTq_3iv7uLxvlO9eCxtdY2ZyJ7ObwDds98,16994
|
|
47
48
|
ouroboros/observability/retrospective.py,sha256=FH_9UC20RnH7OHNXMVIbsqC74B_4KIUy0UjtK-rguXU,11177
|
|
48
49
|
ouroboros/orchestrator/__init__.py,sha256=g1aZSEM9gbl12mHINYoS93X1gacqePwrK4ElujoN0Uk,2130
|
|
49
50
|
ouroboros/orchestrator/adapter.py,sha256=TpvgVMNfvNqvuffn41JDMYjWt2MFCLqTW1MtwOEZ-6E,13152
|
|
@@ -61,7 +62,7 @@ ouroboros/persistence/migrations/scripts/001_initial.sql,sha256=ZkABj9VKEyvwYwCm
|
|
|
61
62
|
ouroboros/providers/__init__.py,sha256=sFQ049Gizx2GxWUTlsCLZHaskV8NVwPDdkXiLEWhrbc,583
|
|
62
63
|
ouroboros/providers/base.py,sha256=u86bWAXtNIVCL1SxqXFK9sqpL6SZOc9h2vxAuVh7mxo,3823
|
|
63
64
|
ouroboros/providers/claude_code_adapter.py,sha256=rVz_5eYRPL9SMt5PQBIbYGHLkRymTCXjCwZ6oZwMrCM,7285
|
|
64
|
-
ouroboros/providers/litellm_adapter.py,sha256=
|
|
65
|
+
ouroboros/providers/litellm_adapter.py,sha256=ljl1SywN1QXEy6LrLhsUYvh9qc0RUuKIG8XFCRtU4yg,10761
|
|
65
66
|
ouroboros/resilience/__init__.py,sha256=jcMdyk5WwaIh7iFVQ5rwaexCnnVpnumJUgWf4GO6w_4,1980
|
|
66
67
|
ouroboros/resilience/lateral.py,sha256=Z4B7pOrD93D6bXu8BqrUvibqYSGyjv8Ubp6nWfLipjM,21582
|
|
67
68
|
ouroboros/resilience/stagnation.py,sha256=k9tiAm__CzclpfRB6Z-8jZdfRwvr2la-BsqDJmEq8Ao,25659
|
|
@@ -74,8 +75,8 @@ ouroboros/routing/tiers.py,sha256=QhBQUOo2-h5Z3dEtC0lcOzkRnqTi2W7Jl46750AVNig,73
|
|
|
74
75
|
ouroboros/secondary/__init__.py,sha256=kYQ7C4bnBzwDlPrU8qZrOPr2ZuTBaftGktOXl5WZl5Q,1123
|
|
75
76
|
ouroboros/secondary/scheduler.py,sha256=sPVVWJ1q0yewRAM-Rm1j_HMerSe4cavIvP9z4xlUuL4,13737
|
|
76
77
|
ouroboros/secondary/todo_registry.py,sha256=4W3C9Uro29VrVLCPKUlpH_BYpzQSbRNW1oMnDYyEhEw,13880
|
|
77
|
-
ouroboros_ai-0.
|
|
78
|
-
ouroboros_ai-0.
|
|
79
|
-
ouroboros_ai-0.
|
|
80
|
-
ouroboros_ai-0.
|
|
81
|
-
ouroboros_ai-0.
|
|
78
|
+
ouroboros_ai-0.2.0.dist-info/METADATA,sha256=-znRAEKqEghugiU67FXrH52Hyt4kBtigrQvwXW-3J_E,19661
|
|
79
|
+
ouroboros_ai-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
80
|
+
ouroboros_ai-0.2.0.dist-info/entry_points.txt,sha256=MoETHup6rVkR6AsyjoRzAgIuvVtYYm3Jw40itV3_VyI,53
|
|
81
|
+
ouroboros_ai-0.2.0.dist-info/licenses/LICENSE,sha256=n2X-q26TqpXnoBo0t_WouhFxWw663_q5FmbYDZayoHo,1060
|
|
82
|
+
ouroboros_ai-0.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|