ouroboros-ai 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

@@ -9,6 +9,8 @@ The scoring algorithm evaluates three key components:
9
9
  - Success Criteria Clarity (30%): How measurable the success criteria are
10
10
  """
11
11
 
12
+ import json
13
+ import re
12
14
  from dataclasses import dataclass
13
15
  from typing import Any
14
16
 
@@ -139,6 +141,7 @@ class AmbiguityScorer:
139
141
  temperature: float = SCORING_TEMPERATURE
140
142
  initial_max_tokens: int = 2048
141
143
  max_retries: int | None = None # None = unlimited retries
144
+ max_format_error_retries: int = 5 # Stop after N format errors (non-truncation)
142
145
 
143
146
  async def score(
144
147
  self, state: InterviewState
@@ -303,38 +306,19 @@ class AmbiguityScorer:
303
306
  Returns:
304
307
  System prompt string.
305
308
  """
306
- return """You are an expert requirements analyst evaluating the clarity of software requirements.
307
-
308
- Your task is to assess how clear and unambiguous the requirements are based on an interview conversation.
309
+ return """You are an expert requirements analyst. Evaluate the clarity of software requirements.
309
310
 
310
311
  Evaluate three components:
311
- 1. Goal Clarity (40% weight): Is the goal statement specific and well-defined?
312
- - Clear: "Build a CLI tool for task management with project grouping"
313
- - Unclear: "Build something useful for productivity"
314
-
315
- 2. Constraint Clarity (30% weight): Are constraints and limitations specified?
316
- - Clear: "Must use Python 3.14+, no external database dependencies"
317
- - Unclear: No mention of technical constraints or limitations
318
-
319
- 3. Success Criteria Clarity (30% weight): Are success criteria measurable?
320
- - Clear: "Tasks can be created, edited, deleted; supports filtering by status"
321
- - Unclear: "The tool should be easy to use"
322
-
323
- For each component, provide:
324
- - A clarity score between 0.0 (completely unclear) and 1.0 (perfectly clear)
325
- - A brief justification (1-2 sentences max) explaining the score
312
+ 1. Goal Clarity (40%): Is the goal specific and well-defined?
313
+ 2. Constraint Clarity (30%): Are constraints and limitations specified?
314
+ 3. Success Criteria Clarity (30%): Are success criteria measurable?
326
315
 
327
- IMPORTANT: You MUST provide ALL six fields below. Keep justifications concise.
316
+ Score each from 0.0 (unclear) to 1.0 (perfectly clear). Scores above 0.8 require very specific requirements.
328
317
 
329
- Respond in this exact format:
330
- GOAL_CLARITY_SCORE: <score>
331
- GOAL_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
332
- CONSTRAINT_CLARITY_SCORE: <score>
333
- CONSTRAINT_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
334
- SUCCESS_CRITERIA_CLARITY_SCORE: <score>
335
- SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
318
+ RESPOND ONLY WITH VALID JSON. No other text before or after.
336
319
 
337
- Be strict in your evaluation. Scores above 0.8 require very specific, measurable requirements."""
320
+ Required JSON format:
321
+ {"goal_clarity_score": 0.0, "goal_clarity_justification": "string", "constraint_clarity_score": 0.0, "constraint_clarity_justification": "string", "success_criteria_clarity_score": 0.0, "success_criteria_clarity_justification": "string"}"""
338
322
 
339
323
  def _build_scoring_user_prompt(self, context: str) -> str:
340
324
  """Build user prompt with interview context.
@@ -365,27 +349,23 @@ Analyze each component and provide scores with justifications."""
365
349
  Raises:
366
350
  ValueError: If response cannot be parsed.
367
351
  """
368
- lines = response.strip().split("\n")
369
- scores: dict[str, Any] = {}
370
-
371
- for line in lines:
372
- line = line.strip()
373
- if not line:
374
- continue
375
-
376
- for prefix in [
377
- "GOAL_CLARITY_SCORE:",
378
- "GOAL_CLARITY_JUSTIFICATION:",
379
- "CONSTRAINT_CLARITY_SCORE:",
380
- "CONSTRAINT_CLARITY_JUSTIFICATION:",
381
- "SUCCESS_CRITERIA_CLARITY_SCORE:",
382
- "SUCCESS_CRITERIA_CLARITY_JUSTIFICATION:",
383
- ]:
384
- if line.startswith(prefix):
385
- key = prefix[:-1].lower() # Remove colon and lowercase
386
- value = line[len(prefix) :].strip()
387
- scores[key] = value
388
- break
352
+ # Extract JSON from response (handle markdown code blocks)
353
+ text = response.strip()
354
+
355
+ # Try to find JSON in markdown code block
356
+ json_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, re.DOTALL)
357
+ if json_match:
358
+ text = json_match.group(1)
359
+ else:
360
+ # Try to find raw JSON object
361
+ json_match = re.search(r"\{.*\}", text, re.DOTALL)
362
+ if json_match:
363
+ text = json_match.group(0)
364
+
365
+ try:
366
+ data = json.loads(text)
367
+ except json.JSONDecodeError as e:
368
+ raise ValueError(f"Invalid JSON response: {e}") from e
389
369
 
390
370
  # Validate all required fields are present
391
371
  required_fields = [
@@ -398,35 +378,32 @@ Analyze each component and provide scores with justifications."""
398
378
  ]
399
379
 
400
380
  for field_name in required_fields:
401
- if field_name not in scores:
381
+ if field_name not in data:
402
382
  raise ValueError(f"Missing required field: {field_name}")
403
383
 
404
- # Parse scores to float
405
- def parse_score(value: str) -> float:
406
- try:
407
- score = float(value)
408
- return max(0.0, min(1.0, score)) # Clamp to [0, 1]
409
- except ValueError as e:
410
- raise ValueError(f"Invalid score value: {value}") from e
384
+ # Parse and clamp scores
385
+ def clamp_score(value: Any) -> float:
386
+ score = float(value)
387
+ return max(0.0, min(1.0, score))
411
388
 
412
389
  return ScoreBreakdown(
413
390
  goal_clarity=ComponentScore(
414
391
  name="Goal Clarity",
415
- clarity_score=parse_score(scores["goal_clarity_score"]),
392
+ clarity_score=clamp_score(data["goal_clarity_score"]),
416
393
  weight=GOAL_CLARITY_WEIGHT,
417
- justification=scores["goal_clarity_justification"],
394
+ justification=str(data["goal_clarity_justification"]),
418
395
  ),
419
396
  constraint_clarity=ComponentScore(
420
397
  name="Constraint Clarity",
421
- clarity_score=parse_score(scores["constraint_clarity_score"]),
398
+ clarity_score=clamp_score(data["constraint_clarity_score"]),
422
399
  weight=CONSTRAINT_CLARITY_WEIGHT,
423
- justification=scores["constraint_clarity_justification"],
400
+ justification=str(data["constraint_clarity_justification"]),
424
401
  ),
425
402
  success_criteria_clarity=ComponentScore(
426
403
  name="Success Criteria Clarity",
427
- clarity_score=parse_score(scores["success_criteria_clarity_score"]),
404
+ clarity_score=clamp_score(data["success_criteria_clarity_score"]),
428
405
  weight=SUCCESS_CRITERIA_CLARITY_WEIGHT,
429
- justification=scores["success_criteria_clarity_justification"],
406
+ justification=str(data["success_criteria_clarity_justification"]),
430
407
  ),
431
408
  )
432
409
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ouroboros-ai
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Summary: Self-Improving AI Workflow System
5
5
  Author-email: Q00 <jqyu.lee@gmail.com>
6
6
  License-File: LICENSE
@@ -2,7 +2,7 @@ ouroboros/__init__.py,sha256=lmQgHmNOWxGlmwayNvp1ckCuJycL8WzX5Y-7IzrFaVM,701
2
2
  ouroboros/__main__.py,sha256=f_qnL0zPJwh9kfQqynX5adpqzj8ilj94zW5Q2loqGxE,168
3
3
  ouroboros/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ouroboros/bigbang/__init__.py,sha256=9xGqOYwMKBifb7QVwonc_wndNLMZb7ZH7xgMHaz_70A,951
5
- ouroboros/bigbang/ambiguity.py,sha256=pmvahQP41OVL8o6N7DkNHM8zqsLYYhem_CfM2X5xuGE,18911
5
+ ouroboros/bigbang/ambiguity.py,sha256=5KM8xjATknjLZguVa90Yii6o3pzXE4PU4BJIP6Ii938,17955
6
6
  ouroboros/bigbang/interview.py,sha256=zm1VrDNqE8ouGG62h8qnNkIpnUf3HHv4NjzMKDIaWcY,17147
7
7
  ouroboros/bigbang/seed_generator.py,sha256=7MY9a7Eua_zVGDWIVDlzOZJjeAwz0DRatXJg0PvMgiY,20082
8
8
  ouroboros/cli/__init__.py,sha256=CRpxsqJadZL7bCS-yrULWC51tqPKfPsxQLgt0JiwP4g,225
@@ -75,8 +75,8 @@ ouroboros/routing/tiers.py,sha256=QhBQUOo2-h5Z3dEtC0lcOzkRnqTi2W7Jl46750AVNig,73
75
75
  ouroboros/secondary/__init__.py,sha256=kYQ7C4bnBzwDlPrU8qZrOPr2ZuTBaftGktOXl5WZl5Q,1123
76
76
  ouroboros/secondary/scheduler.py,sha256=sPVVWJ1q0yewRAM-Rm1j_HMerSe4cavIvP9z4xlUuL4,13737
77
77
  ouroboros/secondary/todo_registry.py,sha256=4W3C9Uro29VrVLCPKUlpH_BYpzQSbRNW1oMnDYyEhEw,13880
78
- ouroboros_ai-0.2.2.dist-info/METADATA,sha256=v_WwLBVh1qLB3vaFS2PwNCiODepMmq-zitj82ty4jPs,19661
79
- ouroboros_ai-0.2.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
80
- ouroboros_ai-0.2.2.dist-info/entry_points.txt,sha256=MoETHup6rVkR6AsyjoRzAgIuvVtYYm3Jw40itV3_VyI,53
81
- ouroboros_ai-0.2.2.dist-info/licenses/LICENSE,sha256=n2X-q26TqpXnoBo0t_WouhFxWw663_q5FmbYDZayoHo,1060
82
- ouroboros_ai-0.2.2.dist-info/RECORD,,
78
+ ouroboros_ai-0.2.3.dist-info/METADATA,sha256=pAjfUYPmqTUzuLJoNQcoJx88R8yZwj_ALVniBc6jLGg,19661
79
+ ouroboros_ai-0.2.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
80
+ ouroboros_ai-0.2.3.dist-info/entry_points.txt,sha256=MoETHup6rVkR6AsyjoRzAgIuvVtYYm3Jw40itV3_VyI,53
81
+ ouroboros_ai-0.2.3.dist-info/licenses/LICENSE,sha256=n2X-q26TqpXnoBo0t_WouhFxWw663_q5FmbYDZayoHo,1060
82
+ ouroboros_ai-0.2.3.dist-info/RECORD,,