synth-ai 0.4.1__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (153) hide show
  1. synth_ai/__init__.py +13 -13
  2. synth_ai/cli/__init__.py +6 -15
  3. synth_ai/cli/commands/eval/__init__.py +6 -15
  4. synth_ai/cli/commands/eval/config.py +338 -0
  5. synth_ai/cli/commands/eval/core.py +236 -1091
  6. synth_ai/cli/commands/eval/runner.py +704 -0
  7. synth_ai/cli/commands/eval/validation.py +44 -117
  8. synth_ai/cli/commands/filter/core.py +7 -7
  9. synth_ai/cli/commands/filter/validation.py +2 -2
  10. synth_ai/cli/commands/smoke/core.py +7 -17
  11. synth_ai/cli/commands/status/__init__.py +1 -64
  12. synth_ai/cli/commands/status/client.py +50 -151
  13. synth_ai/cli/commands/status/config.py +3 -83
  14. synth_ai/cli/commands/status/errors.py +4 -13
  15. synth_ai/cli/commands/status/subcommands/__init__.py +2 -8
  16. synth_ai/cli/commands/status/subcommands/config.py +13 -0
  17. synth_ai/cli/commands/status/subcommands/files.py +18 -63
  18. synth_ai/cli/commands/status/subcommands/jobs.py +28 -311
  19. synth_ai/cli/commands/status/subcommands/models.py +18 -62
  20. synth_ai/cli/commands/status/subcommands/runs.py +16 -63
  21. synth_ai/cli/commands/status/subcommands/session.py +67 -172
  22. synth_ai/cli/commands/status/subcommands/summary.py +24 -32
  23. synth_ai/cli/commands/status/subcommands/utils.py +41 -0
  24. synth_ai/cli/commands/status/utils.py +16 -107
  25. synth_ai/cli/commands/train/__init__.py +18 -20
  26. synth_ai/cli/commands/train/errors.py +3 -3
  27. synth_ai/cli/commands/train/prompt_learning_validation.py +15 -16
  28. synth_ai/cli/commands/train/validation.py +7 -7
  29. synth_ai/cli/commands/train/{judge_schemas.py → verifier_schemas.py} +33 -34
  30. synth_ai/cli/commands/train/verifier_validation.py +235 -0
  31. synth_ai/cli/demo_apps/demo_task_apps/math/config.toml +0 -1
  32. synth_ai/cli/demo_apps/demo_task_apps/math/modal_task_app.py +2 -6
  33. synth_ai/cli/demo_apps/math/config.toml +0 -1
  34. synth_ai/cli/demo_apps/math/modal_task_app.py +2 -6
  35. synth_ai/cli/demo_apps/mipro/task_app.py +25 -47
  36. synth_ai/cli/lib/apps/task_app.py +12 -13
  37. synth_ai/cli/lib/task_app_discovery.py +6 -6
  38. synth_ai/cli/lib/train_cfgs.py +10 -10
  39. synth_ai/cli/task_apps/__init__.py +11 -0
  40. synth_ai/cli/task_apps/commands.py +7 -15
  41. synth_ai/core/env.py +12 -1
  42. synth_ai/core/errors.py +1 -2
  43. synth_ai/core/integrations/cloudflare.py +209 -33
  44. synth_ai/core/tracing_v3/abstractions.py +46 -0
  45. synth_ai/data/__init__.py +3 -30
  46. synth_ai/data/enums.py +1 -20
  47. synth_ai/data/rewards.py +100 -3
  48. synth_ai/products/graph_evolve/__init__.py +1 -2
  49. synth_ai/products/graph_evolve/config.py +16 -16
  50. synth_ai/products/graph_evolve/converters/__init__.py +3 -3
  51. synth_ai/products/graph_evolve/converters/openai_sft.py +7 -7
  52. synth_ai/products/graph_evolve/examples/hotpotqa/config.toml +1 -1
  53. synth_ai/products/graph_gepa/__init__.py +23 -0
  54. synth_ai/products/graph_gepa/converters/__init__.py +19 -0
  55. synth_ai/products/graph_gepa/converters/openai_sft.py +29 -0
  56. synth_ai/sdk/__init__.py +45 -35
  57. synth_ai/sdk/api/eval/__init__.py +33 -0
  58. synth_ai/sdk/api/eval/job.py +732 -0
  59. synth_ai/sdk/api/research_agent/__init__.py +276 -66
  60. synth_ai/sdk/api/train/builders.py +181 -0
  61. synth_ai/sdk/api/train/cli.py +41 -33
  62. synth_ai/sdk/api/train/configs/__init__.py +6 -4
  63. synth_ai/sdk/api/train/configs/prompt_learning.py +127 -33
  64. synth_ai/sdk/api/train/configs/rl.py +264 -16
  65. synth_ai/sdk/api/train/configs/sft.py +165 -1
  66. synth_ai/sdk/api/train/graph_validators.py +12 -12
  67. synth_ai/sdk/api/train/graphgen.py +169 -51
  68. synth_ai/sdk/api/train/graphgen_models.py +95 -45
  69. synth_ai/sdk/api/train/local_api.py +10 -0
  70. synth_ai/sdk/api/train/pollers.py +36 -0
  71. synth_ai/sdk/api/train/prompt_learning.py +390 -60
  72. synth_ai/sdk/api/train/rl.py +41 -5
  73. synth_ai/sdk/api/train/sft.py +2 -0
  74. synth_ai/sdk/api/train/task_app.py +20 -0
  75. synth_ai/sdk/api/train/validators.py +17 -17
  76. synth_ai/sdk/graphs/completions.py +239 -33
  77. synth_ai/sdk/{judging/schemas.py → graphs/verifier_schemas.py} +23 -23
  78. synth_ai/sdk/learning/__init__.py +35 -5
  79. synth_ai/sdk/learning/context_learning_client.py +531 -0
  80. synth_ai/sdk/learning/context_learning_types.py +294 -0
  81. synth_ai/sdk/learning/prompt_learning_client.py +1 -1
  82. synth_ai/sdk/learning/prompt_learning_types.py +2 -1
  83. synth_ai/sdk/learning/rl/__init__.py +0 -4
  84. synth_ai/sdk/learning/rl/contracts.py +0 -4
  85. synth_ai/sdk/localapi/__init__.py +40 -0
  86. synth_ai/sdk/localapi/apps/__init__.py +28 -0
  87. synth_ai/sdk/localapi/client.py +10 -0
  88. synth_ai/sdk/localapi/contracts.py +10 -0
  89. synth_ai/sdk/localapi/helpers.py +519 -0
  90. synth_ai/sdk/localapi/rollouts.py +93 -0
  91. synth_ai/sdk/localapi/server.py +29 -0
  92. synth_ai/sdk/localapi/template.py +49 -0
  93. synth_ai/sdk/streaming/handlers.py +6 -6
  94. synth_ai/sdk/streaming/streamer.py +10 -6
  95. synth_ai/sdk/task/__init__.py +18 -5
  96. synth_ai/sdk/task/apps/__init__.py +37 -1
  97. synth_ai/sdk/task/client.py +9 -1
  98. synth_ai/sdk/task/config.py +6 -11
  99. synth_ai/sdk/task/contracts.py +137 -95
  100. synth_ai/sdk/task/in_process.py +32 -22
  101. synth_ai/sdk/task/in_process_runner.py +9 -4
  102. synth_ai/sdk/task/rubrics/__init__.py +2 -3
  103. synth_ai/sdk/task/rubrics/loaders.py +4 -4
  104. synth_ai/sdk/task/rubrics/strict.py +3 -4
  105. synth_ai/sdk/task/server.py +76 -16
  106. synth_ai/sdk/task/trace_correlation_helpers.py +190 -139
  107. synth_ai/sdk/task/validators.py +34 -49
  108. synth_ai/sdk/training/__init__.py +7 -16
  109. synth_ai/sdk/tunnels/__init__.py +118 -0
  110. synth_ai/sdk/tunnels/cleanup.py +83 -0
  111. synth_ai/sdk/tunnels/ports.py +120 -0
  112. synth_ai/sdk/tunnels/tunneled_api.py +363 -0
  113. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/METADATA +71 -4
  114. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/RECORD +118 -128
  115. synth_ai/cli/commands/baseline/__init__.py +0 -12
  116. synth_ai/cli/commands/baseline/core.py +0 -636
  117. synth_ai/cli/commands/baseline/list.py +0 -94
  118. synth_ai/cli/commands/eval/errors.py +0 -81
  119. synth_ai/cli/commands/status/formatters.py +0 -164
  120. synth_ai/cli/commands/status/subcommands/pricing.py +0 -23
  121. synth_ai/cli/commands/status/subcommands/usage.py +0 -203
  122. synth_ai/cli/commands/train/judge_validation.py +0 -305
  123. synth_ai/cli/usage.py +0 -159
  124. synth_ai/data/specs.py +0 -36
  125. synth_ai/sdk/api/research_agent/cli.py +0 -428
  126. synth_ai/sdk/api/research_agent/config.py +0 -357
  127. synth_ai/sdk/api/research_agent/job.py +0 -717
  128. synth_ai/sdk/baseline/__init__.py +0 -25
  129. synth_ai/sdk/baseline/config.py +0 -209
  130. synth_ai/sdk/baseline/discovery.py +0 -216
  131. synth_ai/sdk/baseline/execution.py +0 -154
  132. synth_ai/sdk/judging/__init__.py +0 -15
  133. synth_ai/sdk/judging/base.py +0 -24
  134. synth_ai/sdk/judging/client.py +0 -191
  135. synth_ai/sdk/judging/types.py +0 -42
  136. synth_ai/sdk/research_agent/__init__.py +0 -34
  137. synth_ai/sdk/research_agent/container_builder.py +0 -328
  138. synth_ai/sdk/research_agent/container_spec.py +0 -198
  139. synth_ai/sdk/research_agent/defaults.py +0 -34
  140. synth_ai/sdk/research_agent/results_collector.py +0 -69
  141. synth_ai/sdk/specs/__init__.py +0 -46
  142. synth_ai/sdk/specs/dataclasses.py +0 -149
  143. synth_ai/sdk/specs/loader.py +0 -144
  144. synth_ai/sdk/specs/serializer.py +0 -199
  145. synth_ai/sdk/specs/validation.py +0 -250
  146. synth_ai/sdk/tracing/__init__.py +0 -39
  147. synth_ai/sdk/usage/__init__.py +0 -37
  148. synth_ai/sdk/usage/client.py +0 -171
  149. synth_ai/sdk/usage/models.py +0 -261
  150. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/WHEEL +0 -0
  151. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/entry_points.txt +0 -0
  152. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/licenses/LICENSE +0 -0
  153. {synth_ai-0.4.1.dist-info → synth_ai-0.4.4.dist-info}/top_level.txt +0 -0
@@ -1,199 +0,0 @@
1
- """Serializers for converting specs to prompt-friendly formats."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Optional
6
-
7
- from synth_ai.sdk.specs.dataclasses import Spec
8
-
9
-
10
- def spec_to_prompt_context(
11
- spec: Spec,
12
- include_examples: bool = True,
13
- include_tests: bool = False,
14
- include_glossary: bool = True,
15
- max_rules: Optional[int] = None,
16
- priority_threshold: Optional[int] = None,
17
- ) -> str:
18
- """Convert a Spec to a prompt-friendly markdown format.
19
-
20
- Args:
21
- spec: The specification to serialize
22
- include_examples: Whether to include rule examples
23
- include_tests: Whether to include test cases
24
- include_glossary: Whether to include glossary terms
25
- max_rules: Maximum number of rules to include (None = all)
26
- priority_threshold: Only include rules with priority >= threshold
27
-
28
- Returns:
29
- Markdown-formatted string suitable for inclusion in prompts
30
- """
31
- lines = []
32
-
33
- # Header
34
- lines.append(f"# {spec.metadata.title}")
35
- if spec.metadata.description:
36
- lines.append(f"\n{spec.metadata.description}")
37
- lines.append(f"\n**Version:** {spec.metadata.version}")
38
- if spec.metadata.scope:
39
- lines.append(f"**Scope:** {spec.metadata.scope}")
40
- lines.append("")
41
-
42
- # Principles
43
- if spec.principles:
44
- lines.append("## Guiding Principles\n")
45
- for principle in spec.principles:
46
- lines.append(f"**{principle.id}**: {principle.text}")
47
- if principle.rationale:
48
- lines.append(f" - *Rationale:* {principle.rationale}")
49
- lines.append("")
50
-
51
- # Rules
52
- if spec.rules:
53
- # Filter rules by priority if specified
54
- rules_to_include = spec.rules
55
- if priority_threshold is not None:
56
- rules_to_include = [
57
- r for r in rules_to_include
58
- if r.priority is not None and r.priority >= priority_threshold
59
- ]
60
-
61
- # Sort by priority (highest first)
62
- rules_to_include = sorted(
63
- rules_to_include,
64
- key=lambda r: r.priority if r.priority is not None else 0,
65
- reverse=True,
66
- )
67
-
68
- # Limit number of rules
69
- if max_rules is not None:
70
- rules_to_include = rules_to_include[:max_rules]
71
-
72
- if rules_to_include:
73
- lines.append("## Rules and Policies\n")
74
-
75
- for rule in rules_to_include:
76
- # Rule header
77
- priority_str = f" [Priority: {rule.priority}]" if rule.priority else ""
78
- lines.append(f"### {rule.id}: {rule.title}{priority_str}\n")
79
-
80
- if rule.rationale:
81
- lines.append(f"*Rationale:* {rule.rationale}\n")
82
-
83
- # Constraints
84
- if rule.constraints.must or rule.constraints.must_not:
85
- lines.append("**Constraints:**")
86
-
87
- if rule.constraints.must:
88
- lines.append("- **MUST:**")
89
- for constraint in rule.constraints.must:
90
- lines.append(f" - {constraint}")
91
-
92
- if rule.constraints.must_not:
93
- lines.append("- **MUST NOT:**")
94
- for constraint in rule.constraints.must_not:
95
- lines.append(f" - {constraint}")
96
-
97
- if rule.constraints.should:
98
- lines.append("- **SHOULD:**")
99
- for constraint in rule.constraints.should:
100
- lines.append(f" - {constraint}")
101
-
102
- if rule.constraints.should_not:
103
- lines.append("- **SHOULD NOT:**")
104
- for constraint in rule.constraints.should_not:
105
- lines.append(f" - {constraint}")
106
-
107
- lines.append("")
108
-
109
- # Examples
110
- if include_examples and rule.examples:
111
- lines.append("**Examples:**\n")
112
-
113
- good_examples = [e for e in rule.examples if e.kind == "good"]
114
- bad_examples = [e for e in rule.examples if e.kind == "bad"]
115
-
116
- if good_examples:
117
- lines.append("✅ **Good:**")
118
- for ex in good_examples:
119
- lines.append(f"- Prompt: \"{ex.prompt}\"")
120
- lines.append(f" Response: \"{ex.response}\"")
121
- if ex.description:
122
- lines.append(f" *{ex.description}*")
123
- lines.append("")
124
-
125
- if bad_examples:
126
- lines.append("❌ **Bad:**")
127
- for ex in bad_examples:
128
- lines.append(f"- Prompt: \"{ex.prompt}\"")
129
- lines.append(f" Response: \"{ex.response}\"")
130
- if ex.description:
131
- lines.append(f" *{ex.description}*")
132
- lines.append("")
133
-
134
- # Tests
135
- if include_tests and rule.tests:
136
- lines.append("**Test Cases:**\n")
137
- for test in rule.tests:
138
- lines.append(f"- {test.id}: {test.challenge}")
139
- if test.asserts:
140
- lines.append(f" Asserts: {', '.join(test.asserts)}")
141
- if test.expected_behavior:
142
- lines.append(f" Expected: {test.expected_behavior}")
143
- lines.append("")
144
-
145
- # Glossary
146
- if include_glossary and spec.glossary:
147
- lines.append("## Glossary\n")
148
- for item in spec.glossary:
149
- aliases_str = f" (aliases: {', '.join(item.aliases)})" if item.aliases else ""
150
- lines.append(f"**{item.term}**{aliases_str}: {item.definition}")
151
- lines.append("")
152
-
153
- return "\n".join(lines)
154
-
155
-
156
- def spec_to_compact_context(spec: Spec, max_tokens: int = 5000) -> str:
157
- """Convert a Spec to a compact prompt context within token limit.
158
-
159
- Prioritizes high-priority rules and essential information.
160
-
161
- Args:
162
- spec: The specification to serialize
163
- max_tokens: Approximate maximum tokens (uses char estimation: 4 chars ≈ 1 token)
164
-
165
- Returns:
166
- Compact markdown-formatted string
167
- """
168
- # Start with high-priority rules only
169
- context = spec_to_prompt_context(
170
- spec,
171
- include_examples=True,
172
- include_tests=False,
173
- include_glossary=True,
174
- priority_threshold=7,
175
- )
176
-
177
- # If still too long, try with fewer examples
178
- max_chars = max_tokens * 4
179
- if len(context) > max_chars:
180
- context = spec_to_prompt_context(
181
- spec,
182
- include_examples=False,
183
- include_tests=False,
184
- include_glossary=True,
185
- priority_threshold=7,
186
- )
187
-
188
- # If still too long, reduce glossary
189
- if len(context) > max_chars:
190
- context = spec_to_prompt_context(
191
- spec,
192
- include_examples=False,
193
- include_tests=False,
194
- include_glossary=False,
195
- priority_threshold=8,
196
- )
197
-
198
- return context
199
-
@@ -1,250 +0,0 @@
1
- """Validation utilities for system specifications."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Any, Dict, List
6
-
7
- from synth_ai.sdk.specs.dataclasses import Spec
8
-
9
-
10
- class SpecValidationError(Exception):
11
- """Raised when spec validation fails."""
12
- pass
13
-
14
-
15
- class SpecValidator:
16
- """Validator for system specifications."""
17
-
18
- def __init__(self):
19
- self.errors: List[str] = []
20
- self.warnings: List[str] = []
21
-
22
- def validate(self, spec: Spec, strict: bool = False) -> bool:
23
- """Validate a spec and collect errors/warnings.
24
-
25
- Args:
26
- spec: The spec to validate
27
- strict: If True, warnings are treated as errors
28
-
29
- Returns:
30
- True if validation passes (no errors, or warnings if not strict)
31
-
32
- Raises:
33
- SpecValidationError: If validation fails with errors
34
- """
35
- self.errors = []
36
- self.warnings = []
37
-
38
- self._validate_metadata(spec)
39
- self._validate_principles(spec)
40
- self._validate_rules(spec)
41
- self._validate_glossary(spec)
42
- self._validate_consistency(spec)
43
-
44
- if strict and self.warnings:
45
- self.errors.extend(self.warnings)
46
- self.warnings = []
47
-
48
- if self.errors:
49
- error_msg = "\n".join([f" - {err}" for err in self.errors])
50
- raise SpecValidationError(f"Spec validation failed:\n{error_msg}")
51
-
52
- return True
53
-
54
- def _validate_metadata(self, spec: Spec) -> None:
55
- """Validate metadata fields."""
56
- md = spec.metadata
57
-
58
- if not md.id:
59
- self.errors.append("Metadata: 'id' is required")
60
- elif not md.id.startswith("spec."):
61
- self.warnings.append("Metadata: 'id' should start with 'spec.' prefix")
62
-
63
- if not md.title:
64
- self.errors.append("Metadata: 'title' is required")
65
-
66
- if not md.version:
67
- self.errors.append("Metadata: 'version' is required")
68
- elif not self._is_valid_semver(md.version):
69
- self.warnings.append(f"Metadata: version '{md.version}' is not valid semver (X.Y.Z)")
70
-
71
- if not md.scope:
72
- self.warnings.append("Metadata: 'scope' should be specified")
73
-
74
- def _validate_principles(self, spec: Spec) -> None:
75
- """Validate principles."""
76
- seen_ids = set()
77
-
78
- for i, principle in enumerate(spec.principles):
79
- if not principle.id:
80
- self.errors.append(f"Principle {i}: 'id' is required")
81
- elif principle.id in seen_ids:
82
- self.errors.append(f"Principle {i}: duplicate id '{principle.id}'")
83
- else:
84
- seen_ids.add(principle.id)
85
-
86
- if not principle.text:
87
- self.errors.append(f"Principle {principle.id}: 'text' is required")
88
-
89
- if not principle.id.startswith("P-"):
90
- self.warnings.append(f"Principle {principle.id}: id should start with 'P-' prefix")
91
-
92
- def _validate_rules(self, spec: Spec) -> None:
93
- """Validate rules."""
94
- seen_ids = set()
95
-
96
- for i, rule in enumerate(spec.rules):
97
- if not rule.id:
98
- self.errors.append(f"Rule {i}: 'id' is required")
99
- elif rule.id in seen_ids:
100
- self.errors.append(f"Rule {i}: duplicate id '{rule.id}'")
101
- else:
102
- seen_ids.add(rule.id)
103
-
104
- if not rule.title:
105
- self.errors.append(f"Rule {rule.id}: 'title' is required")
106
-
107
- if not rule.id.startswith("R-"):
108
- self.warnings.append(f"Rule {rule.id}: id should start with 'R-' prefix")
109
-
110
- # Validate constraints
111
- if not rule.constraints.must and not rule.constraints.must_not:
112
- self.warnings.append(
113
- f"Rule {rule.id}: no constraints defined (must/must_not are empty)"
114
- )
115
-
116
- # Validate examples
117
- for j, example in enumerate(rule.examples):
118
- if example.kind not in ("good", "bad"):
119
- self.errors.append(
120
- f"Rule {rule.id}, Example {j}: kind must be 'good' or 'bad', got '{example.kind}'"
121
- )
122
-
123
- if not example.prompt or not example.response:
124
- self.errors.append(
125
- f"Rule {rule.id}, Example {j}: both 'prompt' and 'response' are required"
126
- )
127
-
128
- # Validate priority
129
- if rule.priority is not None and (not isinstance(rule.priority, int) or rule.priority < 1 or rule.priority > 10):
130
- self.errors.append(
131
- f"Rule {rule.id}: priority must be an integer between 1 and 10, got {rule.priority}"
132
- )
133
-
134
- def _validate_glossary(self, spec: Spec) -> None:
135
- """Validate glossary."""
136
- seen_terms = set()
137
-
138
- for item in spec.glossary:
139
- term_lower = item.term.lower()
140
-
141
- if term_lower in seen_terms:
142
- self.errors.append(f"Glossary: duplicate term '{item.term}'")
143
- else:
144
- seen_terms.add(term_lower)
145
-
146
- if not item.definition:
147
- self.errors.append(f"Glossary: term '{item.term}' missing definition")
148
-
149
- # Check for duplicate aliases
150
- for alias in item.aliases:
151
- alias_lower = alias.lower()
152
- if alias_lower in seen_terms:
153
- self.warnings.append(
154
- f"Glossary: alias '{alias}' for term '{item.term}' conflicts with existing term"
155
- )
156
- seen_terms.add(alias_lower)
157
-
158
- def _validate_consistency(self, spec: Spec) -> None:
159
- """Validate cross-references and consistency."""
160
- # Check for orphaned imports
161
- if spec.metadata.imports:
162
- self.warnings.append(
163
- f"Metadata: imports specified but not validated ({len(spec.metadata.imports)} imports)"
164
- )
165
-
166
- # Warn if no rules or principles
167
- if not spec.rules and not spec.principles:
168
- self.warnings.append("Spec has no rules or principles defined")
169
-
170
- # Check for rules without examples
171
- rules_without_examples = [r.id for r in spec.rules if not r.examples]
172
- if rules_without_examples:
173
- self.warnings.append(
174
- f"Rules without examples: {', '.join(rules_without_examples)}"
175
- )
176
-
177
- @staticmethod
178
- def _is_valid_semver(version: str) -> bool:
179
- """Check if version follows semver format (X.Y.Z)."""
180
- parts = version.split(".")
181
- if len(parts) != 3:
182
- return False
183
- try:
184
- for part in parts:
185
- int(part)
186
- return True
187
- except ValueError:
188
- return False
189
-
190
-
191
- def validate_spec_dict(data: Dict[str, Any], strict: bool = False) -> List[str]:
192
- """Validate spec dictionary before loading.
193
-
194
- Args:
195
- data: Dictionary representation of spec
196
- strict: If True, treat warnings as errors
197
-
198
- Returns:
199
- List of validation errors (empty if valid)
200
- """
201
- errors = []
202
-
203
- # Check required top-level keys
204
- if "metadata" not in data:
205
- errors.append("Missing required key: 'metadata'")
206
- elif not isinstance(data["metadata"], dict):
207
- errors.append("'metadata' must be a dictionary")
208
- else:
209
- # Check required metadata fields
210
- for field in ["id", "title", "version"]:
211
- if field not in data["metadata"]:
212
- errors.append(f"Missing required metadata field: '{field}'")
213
-
214
- # Check optional top-level keys are correct type
215
- type_checks = {
216
- "principles": list,
217
- "rules": list,
218
- "interfaces": dict,
219
- "glossary": list,
220
- "changelog": list,
221
- }
222
-
223
- for key, expected_type in type_checks.items():
224
- if key in data and not isinstance(data[key], expected_type):
225
- errors.append(f"'{key}' must be a {expected_type.__name__}")
226
-
227
- return errors
228
-
229
-
230
- def validate_spec_file(path: str, strict: bool = False) -> bool:
231
- """Validate a spec file.
232
-
233
- Args:
234
- path: Path to spec JSON file
235
- strict: If True, treat warnings as errors
236
-
237
- Returns:
238
- True if validation passes
239
-
240
- Raises:
241
- SpecValidationError: If validation fails
242
- FileNotFoundError: If file doesn't exist
243
- json.JSONDecodeError: If file is not valid JSON
244
- """
245
- from synth_ai.sdk.specs.loader import load_spec_from_file
246
-
247
- spec = load_spec_from_file(path)
248
- validator = SpecValidator()
249
- return validator.validate(spec, strict=strict)
250
-
@@ -1,39 +0,0 @@
1
- """Tracing SDK - session trace types and utilities.
2
-
3
- This module provides a cleaner import path for tracing types.
4
- The underlying implementation remains in tracing_v3/.
5
-
6
- Example:
7
- from synth_ai.sdk.tracing import SessionTrace
8
-
9
- trace = SessionTrace(session_id="...", time_steps=[...])
10
- """
11
-
12
- from __future__ import annotations
13
-
14
- # Re-export from data layer (which re-exports from tracing_v3)
15
- from synth_ai.data.traces import (
16
- BaseEvent,
17
- EnvironmentEvent,
18
- LMCAISEvent,
19
- RuntimeEvent,
20
- SessionEventMarkovBlanketMessage,
21
- SessionMessageContent,
22
- SessionTimeStep,
23
- SessionTrace,
24
- TimeRecord,
25
- )
26
-
27
- __all__ = [
28
- "SessionTrace",
29
- "SessionTimeStep",
30
- "BaseEvent",
31
- "RuntimeEvent",
32
- "EnvironmentEvent",
33
- "LMCAISEvent",
34
- "SessionEventMarkovBlanketMessage",
35
- "SessionMessageContent",
36
- "TimeRecord",
37
- ]
38
-
39
-
@@ -1,37 +0,0 @@
1
- """Usage tracking module for Synth AI SDK.
2
-
3
- This module provides the UsageClient for fetching org usage/limits
4
- from the Synth backend, plus convenience methods for checking limits.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- from .client import UsageClient
10
- from .models import (
11
- APIUsage,
12
- InferenceUsage,
13
- JudgesUsage,
14
- OrgUsage,
15
- PromptOptUsage,
16
- RLUsage,
17
- ResearchUsage,
18
- SFTUsage,
19
- TotalUsage,
20
- UsageMetric,
21
- UsagePeriod,
22
- )
23
-
24
- __all__ = [
25
- "UsageClient",
26
- "OrgUsage",
27
- "UsageMetric",
28
- "UsagePeriod",
29
- "APIUsage",
30
- "InferenceUsage",
31
- "JudgesUsage",
32
- "PromptOptUsage",
33
- "RLUsage",
34
- "SFTUsage",
35
- "ResearchUsage",
36
- "TotalUsage",
37
- ]
@@ -1,171 +0,0 @@
1
- """Usage client for fetching org usage and limits.
2
-
3
- This module provides the UsageClient class that communicates with the
4
- Synth backend to fetch usage data and check limits.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- import os
10
- from typing import Any
11
-
12
- from synth_ai.core.errors import UsageLimitError
13
- from synth_ai.core.http import AsyncHttpClient, http_request
14
-
15
- from .models import OrgUsage, UsageMetric
16
-
17
-
18
- def _get_base_url() -> str:
19
- """Get the backend base URL from environment."""
20
- return os.getenv("BACKEND_BASE_URL", "https://api.usesynth.ai")
21
-
22
-
23
- def _get_api_key() -> str:
24
- """Get the API key from environment."""
25
- return os.getenv("SYNTH_API_KEY", "")
26
-
27
-
28
- class UsageClient:
29
- """Client for fetching org usage and limits.
30
-
31
- Usage:
32
- # Sync usage
33
- client = UsageClient()
34
- usage = client.get()
35
- print(usage.tier)
36
- print(usage.apis.inference.tokens_per_day.remaining)
37
-
38
- # Async usage
39
- async with UsageClient() as client:
40
- usage = await client.get_async()
41
-
42
- # Check a specific limit
43
- client.check("prompt_opt", "jobs_per_day") # raises UsageLimitError if exhausted
44
- """
45
-
46
- def __init__(
47
- self,
48
- base_url: str | None = None,
49
- api_key: str | None = None,
50
- ) -> None:
51
- """Initialize the usage client.
52
-
53
- Args:
54
- base_url: Backend URL (defaults to BACKEND_BASE_URL env var)
55
- api_key: API key (defaults to SYNTH_API_KEY env var)
56
- """
57
- self._base_url = base_url or _get_base_url()
58
- self._api_key = api_key or _get_api_key()
59
- self._async_client: AsyncHttpClient | None = None
60
-
61
- async def __aenter__(self) -> UsageClient:
62
- """Enter async context."""
63
- self._async_client = AsyncHttpClient(self._base_url, self._api_key)
64
- await self._async_client.__aenter__()
65
- return self
66
-
67
- async def __aexit__(
68
- self,
69
- exc_type: type[BaseException] | None,
70
- exc: BaseException | None,
71
- tb: Any,
72
- ) -> None:
73
- """Exit async context."""
74
- if self._async_client is not None:
75
- await self._async_client.__aexit__(exc_type, exc, tb)
76
- self._async_client = None
77
-
78
- def get(self) -> OrgUsage:
79
- """Fetch current usage (synchronous).
80
-
81
- Returns:
82
- OrgUsage object with all usage metrics and limits
83
-
84
- Raises:
85
- HTTPError: If the API request fails
86
- """
87
- url = f"{self._base_url}/api/v1/usage"
88
- headers = {"authorization": f"Bearer {self._api_key}"}
89
- status, data = http_request("GET", url, headers=headers)
90
-
91
- if status != 200:
92
- from synth_ai.core.errors import HTTPError
93
- raise HTTPError(
94
- status=status,
95
- url=url,
96
- message="Failed to fetch usage",
97
- detail=data if isinstance(data, dict) else None,
98
- )
99
-
100
- if isinstance(data, dict):
101
- return OrgUsage.from_dict(data)
102
- raise ValueError(f"Unexpected response type: {type(data)}")
103
-
104
- async def get_async(self) -> OrgUsage:
105
- """Fetch current usage (asynchronous).
106
-
107
- Returns:
108
- OrgUsage object with all usage metrics and limits
109
-
110
- Raises:
111
- HTTPError: If the API request fails
112
- """
113
- if self._async_client is None:
114
- raise RuntimeError("Must use as async context manager")
115
-
116
- data = await self._async_client.get("/api/v1/usage")
117
- return OrgUsage.from_dict(data)
118
-
119
- def check(self, api: str, metric: str) -> UsageMetric:
120
- """Check if a specific limit has capacity remaining.
121
-
122
- Args:
123
- api: API name (inference, judges, prompt_opt, rl, sft, research)
124
- metric: Metric name (e.g., requests_per_min, jobs_per_day)
125
-
126
- Returns:
127
- The UsageMetric if capacity is available
128
-
129
- Raises:
130
- UsageLimitError: If the limit is exhausted
131
- ValueError: If the api/metric combination is invalid
132
- """
133
- usage = self.get()
134
- return self._check_metric(usage, api, metric)
135
-
136
- async def check_async(self, api: str, metric: str) -> UsageMetric:
137
- """Check if a specific limit has capacity remaining (async).
138
-
139
- Args:
140
- api: API name (inference, judges, prompt_opt, rl, sft, research)
141
- metric: Metric name (e.g., requests_per_min, jobs_per_day)
142
-
143
- Returns:
144
- The UsageMetric if capacity is available
145
-
146
- Raises:
147
- UsageLimitError: If the limit is exhausted
148
- ValueError: If the api/metric combination is invalid
149
- """
150
- usage = await self.get_async()
151
- return self._check_metric(usage, api, metric)
152
-
153
- def _check_metric(self, usage: OrgUsage, api: str, metric: str) -> UsageMetric:
154
- """Internal method to check a metric and raise if exhausted."""
155
- m = usage.get_metric(api, metric)
156
- if m is None:
157
- raise ValueError(f"Unknown api/metric: {api}/{metric}")
158
-
159
- if m.is_exhausted:
160
- raise UsageLimitError(
161
- limit_type=metric,
162
- api=api,
163
- current=m.used,
164
- limit=m.limit,
165
- tier=usage.tier,
166
- )
167
-
168
- return m
169
-
170
-
171
- __all__ = ["UsageClient"]