synth-ai 0.2.16__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synth-ai might be problematic. Click here for more details.

Files changed (299) hide show
  1. examples/analyze_semantic_words.sh +2 -2
  2. examples/baseline/banking77_baseline.py +204 -0
  3. examples/baseline/crafter_baseline.py +407 -0
  4. examples/baseline/pokemon_red_baseline.py +326 -0
  5. examples/baseline/simple_baseline.py +56 -0
  6. examples/baseline/warming_up_to_rl_baseline.py +239 -0
  7. examples/blog_posts/gepa/README.md +355 -0
  8. examples/blog_posts/gepa/configs/banking77_gepa_local.toml +95 -0
  9. examples/blog_posts/gepa/configs/banking77_gepa_test.toml +82 -0
  10. examples/blog_posts/gepa/configs/banking77_mipro_local.toml +52 -0
  11. examples/blog_posts/gepa/configs/hotpotqa_gepa_local.toml +59 -0
  12. examples/blog_posts/gepa/configs/hotpotqa_gepa_qwen.toml +36 -0
  13. examples/blog_posts/gepa/configs/hotpotqa_mipro_local.toml +53 -0
  14. examples/blog_posts/gepa/configs/hover_gepa_local.toml +59 -0
  15. examples/blog_posts/gepa/configs/hover_gepa_qwen.toml +36 -0
  16. examples/blog_posts/gepa/configs/hover_mipro_local.toml +53 -0
  17. examples/blog_posts/gepa/configs/ifbench_gepa_local.toml +59 -0
  18. examples/blog_posts/gepa/configs/ifbench_gepa_qwen.toml +36 -0
  19. examples/blog_posts/gepa/configs/ifbench_mipro_local.toml +53 -0
  20. examples/blog_posts/gepa/configs/pupa_gepa_local.toml +60 -0
  21. examples/blog_posts/gepa/configs/pupa_mipro_local.toml +54 -0
  22. examples/blog_posts/gepa/deploy_banking77_task_app.sh +41 -0
  23. examples/blog_posts/gepa/gepa_baseline.py +204 -0
  24. examples/blog_posts/gepa/query_prompts_example.py +97 -0
  25. examples/blog_posts/gepa/run_gepa_banking77.sh +87 -0
  26. examples/blog_posts/gepa/task_apps.py +105 -0
  27. examples/blog_posts/gepa/test_gepa_local.sh +67 -0
  28. examples/blog_posts/gepa/verify_banking77_setup.sh +123 -0
  29. examples/blog_posts/pokemon_vl/README.md +98 -0
  30. examples/blog_posts/pokemon_vl/configs/eval_gpt5nano.toml +26 -0
  31. examples/blog_posts/pokemon_vl/configs/eval_qwen3_vl.toml +27 -0
  32. examples/blog_posts/pokemon_vl/configs/eval_rl_final.toml +24 -0
  33. examples/blog_posts/pokemon_vl/configs/filter_high_reward.toml +10 -0
  34. examples/blog_posts/pokemon_vl/configs/train_rl_from_sft.toml +43 -0
  35. examples/blog_posts/pokemon_vl/configs/train_sft_qwen4b_vl.toml +40 -0
  36. examples/blog_posts/pokemon_vl/extract_images.py +239 -0
  37. examples/blog_posts/pokemon_vl/pokemon_vl_baseline.py +326 -0
  38. examples/blog_posts/pokemon_vl/run_eval_extract_images.py +209 -0
  39. examples/blog_posts/pokemon_vl/run_qwen_eval_extract_images.py +212 -0
  40. examples/blog_posts/pokemon_vl/text_box_analysis.md +106 -0
  41. examples/blog_posts/warming_up_to_rl/ARCHITECTURE.md +195 -0
  42. examples/blog_posts/warming_up_to_rl/FINAL_TEST_RESULTS.md +127 -0
  43. examples/blog_posts/warming_up_to_rl/INFERENCE_SUCCESS.md +132 -0
  44. examples/blog_posts/warming_up_to_rl/README.md +158 -0
  45. examples/blog_posts/warming_up_to_rl/SMOKE_TESTING.md +164 -0
  46. examples/blog_posts/warming_up_to_rl/SMOKE_TEST_COMPLETE.md +253 -0
  47. examples/blog_posts/warming_up_to_rl/configs/eval_baseline_qwen32b_10x20.toml +25 -0
  48. examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b.toml +25 -0
  49. examples/blog_posts/warming_up_to_rl/configs/eval_ft_qwen4b_10x20.toml +26 -0
  50. examples/blog_posts/warming_up_to_rl/configs/eval_groq_qwen32b.toml +25 -0
  51. examples/blog_posts/warming_up_to_rl/configs/eval_openai_gpt_oss_120b.toml +29 -0
  52. examples/blog_posts/warming_up_to_rl/configs/filter_high_reward_dataset.toml +10 -0
  53. examples/blog_posts/warming_up_to_rl/configs/smoke_test.toml +75 -0
  54. examples/blog_posts/warming_up_to_rl/configs/train_rl_from_sft.toml +91 -0
  55. examples/blog_posts/warming_up_to_rl/configs/train_sft_qwen4b.toml +40 -0
  56. examples/blog_posts/warming_up_to_rl/warming_up_to_rl_baseline.py +187 -0
  57. examples/dev/qwen3_32b_qlora_4xh100.toml +5 -0
  58. examples/multi_step/configs/VERILOG_REWARDS.md +4 -0
  59. examples/multi_step/configs/VERILOG_RL_CHECKLIST.md +4 -0
  60. examples/multi_step/configs/crafter_rl_outcome.toml +2 -1
  61. examples/multi_step/configs/crafter_rl_stepwise_hosted_judge.toml +65 -107
  62. examples/multi_step/configs/crafter_rl_stepwise_shaped.toml +2 -1
  63. examples/multi_step/configs/crafter_rl_stepwise_simple.toml +2 -1
  64. examples/multi_step/configs/crafter_rl_stepwise_simple_NEW_FORMAT.toml +105 -0
  65. examples/multi_step/configs/verilog_rl_lora.toml +80 -123
  66. examples/qwen_coder/configs/coder_lora_30b.toml +1 -3
  67. examples/qwen_coder/configs/coder_lora_4b.toml +4 -1
  68. examples/qwen_coder/configs/coder_lora_small.toml +1 -3
  69. examples/qwen_vl/README.md +10 -12
  70. examples/qwen_vl/SETUP_COMPLETE.md +7 -8
  71. examples/qwen_vl/VISION_TESTS_COMPLETE.md +2 -3
  72. examples/qwen_vl/collect_data_via_cli.md +76 -84
  73. examples/qwen_vl/collect_vision_traces.py +4 -4
  74. examples/qwen_vl/configs/crafter_rl_vision_qwen3vl4b.toml +40 -57
  75. examples/qwen_vl/configs/crafter_vlm_sft_example.toml +1 -2
  76. examples/qwen_vl/configs/eval_gpt4o_mini_vision.toml +20 -37
  77. examples/qwen_vl/configs/eval_gpt5nano_vision.toml +21 -40
  78. examples/qwen_vl/configs/eval_qwen3vl_vision.toml +26 -0
  79. examples/qwen_vl/configs/{filter_qwen2vl_sft.toml → filter_qwen3vl_sft.toml} +4 -5
  80. examples/qwen_vl/configs/filter_vision_sft.toml +2 -3
  81. examples/qwen_vl/crafter_qwen_vl_agent.py +5 -5
  82. examples/qwen_vl/run_vision_comparison.sh +6 -7
  83. examples/rl/README.md +5 -5
  84. examples/rl/configs/rl_from_base_qwen.toml +26 -1
  85. examples/rl/configs/rl_from_base_qwen17.toml +6 -2
  86. examples/rl/task_app/README.md +1 -2
  87. examples/rl/task_app/math_single_step.py +2 -2
  88. examples/run_crafter_demo.sh +2 -2
  89. examples/sft/README.md +1 -1
  90. examples/sft/configs/crafter_fft_qwen0p6b.toml +4 -1
  91. examples/sft/configs/crafter_lora_qwen0p6b.toml +4 -1
  92. examples/swe/task_app/README.md +32 -2
  93. examples/swe/task_app/grpo_swe_mini.py +4 -0
  94. examples/swe/task_app/hosted/envs/crafter/react_agent.py +1 -1
  95. examples/swe/task_app/hosted/envs/mini_swe/environment.py +37 -10
  96. examples/swe/task_app/hosted/inference/openai_client.py +4 -38
  97. examples/swe/task_app/hosted/policy_routes.py +17 -0
  98. examples/swe/task_app/hosted/rollout.py +4 -2
  99. examples/swe/task_app/morph_backend.py +178 -0
  100. examples/task_apps/banking77/__init__.py +6 -0
  101. examples/task_apps/banking77/banking77_task_app.py +841 -0
  102. examples/task_apps/banking77/deploy_wrapper.py +46 -0
  103. examples/task_apps/crafter/CREATE_SFT_DATASET.md +4 -0
  104. examples/task_apps/crafter/FILTER_COMMAND_STATUS.md +4 -0
  105. examples/task_apps/crafter/FILTER_COMMAND_SUCCESS.md +4 -0
  106. examples/task_apps/crafter/task_app/README.md +1 -1
  107. examples/task_apps/crafter/task_app/grpo_crafter.py +90 -5
  108. examples/task_apps/crafter/task_app/grpo_crafter_task_app.py +1 -1
  109. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/policy.py +4 -26
  110. examples/task_apps/crafter/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -2
  111. examples/task_apps/crafter/task_app/synth_envs_hosted/hosted_app.py +49 -0
  112. examples/task_apps/crafter/task_app/synth_envs_hosted/inference/openai_client.py +372 -107
  113. examples/task_apps/crafter/task_app/synth_envs_hosted/policy_routes.py +81 -12
  114. examples/task_apps/crafter/task_app/synth_envs_hosted/rollout.py +82 -11
  115. examples/task_apps/crafter/task_app/synth_envs_hosted/utils.py +194 -1
  116. examples/task_apps/enron/task_app/grpo_enron_task_app.py +1 -1
  117. examples/task_apps/gepa_benchmarks/__init__.py +7 -0
  118. examples/task_apps/gepa_benchmarks/common.py +260 -0
  119. examples/task_apps/gepa_benchmarks/hotpotqa_task_app.py +507 -0
  120. examples/task_apps/gepa_benchmarks/hover_task_app.py +436 -0
  121. examples/task_apps/gepa_benchmarks/ifbench_task_app.py +563 -0
  122. examples/task_apps/gepa_benchmarks/pupa_task_app.py +460 -0
  123. examples/task_apps/math/README.md +1 -2
  124. examples/task_apps/pokemon_red/README.md +3 -4
  125. examples/task_apps/pokemon_red/README_IMAGE_ONLY_EVAL.md +4 -0
  126. examples/task_apps/pokemon_red/eval_image_only_gpt4o.toml +6 -5
  127. examples/task_apps/pokemon_red/eval_pokemon_red_policy.py +1 -2
  128. examples/task_apps/pokemon_red/task_app.py +288 -39
  129. examples/task_apps/sokoban/README.md +2 -3
  130. examples/task_apps/verilog/eval_groq_qwen32b.toml +12 -14
  131. examples/task_apps/verilog/task_app/grpo_verilog_task_app.py +1 -1
  132. examples/vlm/configs/crafter_vlm_gpt4o.toml +4 -1
  133. examples/warming_up_to_rl/configs/crafter_fft.toml +4 -1
  134. examples/warming_up_to_rl/configs/crafter_fft_4b.toml +0 -2
  135. examples/warming_up_to_rl/configs/rl_from_base_qwen4b.toml +3 -2
  136. examples/warming_up_to_rl/run_local_rollout_traced.py +1 -1
  137. examples/warming_up_to_rl/task_app/README.md +1 -1
  138. examples/warming_up_to_rl/task_app/grpo_crafter.py +185 -5
  139. examples/warming_up_to_rl/task_app/grpo_crafter_task_app.py +1 -1
  140. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/policy.py +3 -27
  141. examples/warming_up_to_rl/task_app/synth_envs_hosted/envs/crafter/react_agent.py +1 -1
  142. examples/warming_up_to_rl/task_app/synth_envs_hosted/hosted_app.py +49 -0
  143. examples/warming_up_to_rl/task_app/synth_envs_hosted/inference/openai_client.py +156 -45
  144. examples/warming_up_to_rl/task_app/synth_envs_hosted/policy_routes.py +37 -4
  145. examples/warming_up_to_rl/task_app/synth_envs_hosted/rollout.py +33 -3
  146. examples/warming_up_to_rl/task_app/synth_envs_hosted/utils.py +67 -0
  147. examples/workflows/math_rl/configs/rl_from_base_qwen.toml +27 -0
  148. examples/workflows/math_rl/configs/rl_from_base_qwen17.toml +6 -0
  149. synth_ai/api/train/builders.py +99 -4
  150. synth_ai/api/train/cli.py +516 -26
  151. synth_ai/api/train/config_finder.py +13 -2
  152. synth_ai/api/train/configs/__init__.py +23 -2
  153. synth_ai/api/train/configs/prompt_learning.py +442 -0
  154. synth_ai/api/train/configs/rl.py +61 -7
  155. synth_ai/api/train/configs/sft.py +6 -2
  156. synth_ai/api/train/configs/shared.py +59 -2
  157. synth_ai/api/train/task_app.py +1 -1
  158. synth_ai/api/train/validators.py +277 -0
  159. synth_ai/auth/credentials.py +119 -0
  160. synth_ai/baseline/__init__.py +25 -0
  161. synth_ai/baseline/config.py +209 -0
  162. synth_ai/baseline/discovery.py +214 -0
  163. synth_ai/baseline/execution.py +146 -0
  164. synth_ai/cli/__init__.py +94 -18
  165. synth_ai/cli/__main__.py +0 -0
  166. synth_ai/cli/claude.py +70 -0
  167. synth_ai/cli/codex.py +84 -0
  168. synth_ai/cli/commands/__init__.py +18 -0
  169. synth_ai/cli/commands/baseline/__init__.py +12 -0
  170. synth_ai/cli/commands/baseline/core.py +637 -0
  171. synth_ai/cli/commands/baseline/list.py +93 -0
  172. synth_ai/cli/commands/demo/__init__.py +6 -0
  173. synth_ai/cli/commands/demo/core.py +163 -0
  174. synth_ai/cli/commands/eval/__init__.py +19 -0
  175. synth_ai/cli/commands/eval/core.py +1112 -0
  176. synth_ai/cli/commands/eval/errors.py +81 -0
  177. synth_ai/cli/commands/eval/validation.py +133 -0
  178. synth_ai/cli/commands/filter/__init__.py +12 -0
  179. synth_ai/cli/commands/filter/core.py +424 -0
  180. synth_ai/cli/commands/filter/errors.py +55 -0
  181. synth_ai/cli/commands/filter/validation.py +77 -0
  182. synth_ai/cli/commands/help/__init__.py +177 -0
  183. synth_ai/cli/commands/help/core.py +72 -0
  184. synth_ai/cli/commands/smoke/__init__.py +7 -0
  185. synth_ai/cli/commands/smoke/core.py +1436 -0
  186. synth_ai/cli/commands/status/__init__.py +64 -0
  187. synth_ai/cli/commands/status/client.py +192 -0
  188. synth_ai/cli/commands/status/config.py +92 -0
  189. synth_ai/cli/commands/status/errors.py +20 -0
  190. synth_ai/cli/commands/status/formatters.py +164 -0
  191. synth_ai/cli/commands/status/subcommands/__init__.py +9 -0
  192. synth_ai/cli/commands/status/subcommands/files.py +79 -0
  193. synth_ai/cli/commands/status/subcommands/jobs.py +334 -0
  194. synth_ai/cli/commands/status/subcommands/models.py +79 -0
  195. synth_ai/cli/commands/status/subcommands/pricing.py +22 -0
  196. synth_ai/cli/commands/status/subcommands/runs.py +81 -0
  197. synth_ai/cli/commands/status/subcommands/summary.py +47 -0
  198. synth_ai/cli/commands/status/subcommands/usage.py +203 -0
  199. synth_ai/cli/commands/status/utils.py +114 -0
  200. synth_ai/cli/commands/train/__init__.py +53 -0
  201. synth_ai/cli/commands/train/core.py +21 -0
  202. synth_ai/cli/commands/train/errors.py +117 -0
  203. synth_ai/cli/commands/train/judge_schemas.py +200 -0
  204. synth_ai/cli/commands/train/judge_validation.py +305 -0
  205. synth_ai/cli/commands/train/validation.py +386 -0
  206. synth_ai/cli/demo.py +30 -158
  207. synth_ai/cli/deploy/__init__.py +43 -0
  208. synth_ai/cli/deploy.py +162 -0
  209. synth_ai/cli/eval/__init__.py +36 -0
  210. synth_ai/cli/eval/core.py +5 -0
  211. synth_ai/cli/eval/errors.py +31 -0
  212. synth_ai/cli/eval/validation.py +5 -0
  213. synth_ai/cli/filter/__init__.py +28 -0
  214. synth_ai/cli/filter/core.py +5 -0
  215. synth_ai/cli/filter/errors.py +23 -0
  216. synth_ai/cli/filter/validation.py +5 -0
  217. synth_ai/cli/legacy_root_backup.py +14 -8
  218. synth_ai/cli/modal_serve/__init__.py +12 -0
  219. synth_ai/cli/modal_serve/core.py +14 -0
  220. synth_ai/cli/modal_serve/errors.py +8 -0
  221. synth_ai/cli/modal_serve/validation.py +11 -0
  222. synth_ai/cli/opencode.py +107 -0
  223. synth_ai/cli/root.py +9 -5
  224. synth_ai/cli/serve/__init__.py +12 -0
  225. synth_ai/cli/serve/core.py +14 -0
  226. synth_ai/cli/serve/errors.py +8 -0
  227. synth_ai/cli/serve/validation.py +11 -0
  228. synth_ai/cli/setup.py +20 -265
  229. synth_ai/cli/status.py +7 -126
  230. synth_ai/cli/task_app_deploy.py +1 -10
  231. synth_ai/cli/task_app_modal_serve.py +4 -9
  232. synth_ai/cli/task_app_serve.py +4 -11
  233. synth_ai/cli/task_apps.py +51 -1480
  234. synth_ai/cli/train/__init__.py +12 -0
  235. synth_ai/cli/train/core.py +21 -0
  236. synth_ai/cli/train/errors.py +8 -0
  237. synth_ai/cli/train/validation.py +24 -0
  238. synth_ai/cli/train.py +1 -14
  239. synth_ai/demos/crafter/grpo_crafter_task_app.py +1 -1
  240. synth_ai/demos/demo_task_apps/crafter/grpo_crafter_task_app.py +1 -1
  241. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +7 -4
  242. synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +9 -5
  243. synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +4 -3
  244. synth_ai/environments/examples/red/engine.py +33 -12
  245. synth_ai/environments/examples/red/engine_helpers/reward_components.py +151 -179
  246. synth_ai/environments/examples/red/environment.py +26 -0
  247. synth_ai/environments/examples/red/trace_hooks_v3.py +168 -0
  248. synth_ai/http.py +12 -0
  249. synth_ai/judge_schemas.py +10 -10
  250. synth_ai/learning/__init__.py +10 -0
  251. synth_ai/learning/prompt_learning_client.py +276 -0
  252. synth_ai/learning/prompt_learning_types.py +184 -0
  253. synth_ai/learning/rl/client.py +3 -1
  254. synth_ai/pricing/__init__.py +2 -0
  255. synth_ai/pricing/model_pricing.py +57 -0
  256. synth_ai/streaming/__init__.py +29 -0
  257. synth_ai/streaming/config.py +94 -0
  258. synth_ai/streaming/handlers.py +518 -0
  259. synth_ai/streaming/streamer.py +320 -0
  260. synth_ai/streaming/types.py +95 -0
  261. synth_ai/task/apps/__init__.py +1 -0
  262. synth_ai/task/config.py +2 -0
  263. synth_ai/task/tracing_utils.py +25 -25
  264. synth_ai/task/validators.py +45 -9
  265. synth_ai/task_app_cfgs.py +21 -0
  266. synth_ai/tracing_v3/config.py +162 -19
  267. synth_ai/tracing_v3/constants.py +1 -1
  268. synth_ai/tracing_v3/db_config.py +24 -38
  269. synth_ai/tracing_v3/migration_helper.py +1 -2
  270. synth_ai/tracing_v3/storage/config.py +47 -13
  271. synth_ai/tracing_v3/storage/factory.py +3 -3
  272. synth_ai/tracing_v3/turso/daemon.py +113 -11
  273. synth_ai/tracing_v3/turso/native_manager.py +92 -16
  274. synth_ai/types.py +8 -0
  275. synth_ai/urls.py +11 -0
  276. synth_ai/utils/__init__.py +30 -1
  277. synth_ai/utils/agents.py +74 -0
  278. synth_ai/utils/bin.py +39 -0
  279. synth_ai/utils/cli.py +149 -5
  280. synth_ai/utils/env.py +40 -33
  281. synth_ai/utils/http.py +4 -1
  282. synth_ai/utils/json.py +72 -0
  283. synth_ai/utils/modal.py +285 -3
  284. synth_ai/utils/paths.py +48 -0
  285. synth_ai/utils/uvicorn.py +113 -0
  286. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/METADATA +109 -6
  287. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/RECORD +291 -142
  288. examples/qwen_vl/configs/eval_qwen2vl_vision.toml +0 -44
  289. synth_ai/cli/tui.py +0 -62
  290. synth_ai/tui/__init__.py +0 -5
  291. synth_ai/tui/__main__.py +0 -13
  292. synth_ai/tui/cli/__init__.py +0 -1
  293. synth_ai/tui/cli/query_experiments.py +0 -164
  294. synth_ai/tui/cli/query_experiments_v3.py +0 -164
  295. synth_ai/tui/dashboard.py +0 -911
  296. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/WHEEL +0 -0
  297. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/entry_points.txt +0 -0
  298. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/licenses/LICENSE +0 -0
  299. {synth_ai-0.2.16.dist-info → synth_ai-0.2.19.dist-info}/top_level.txt +0 -0
synth_ai/judge_schemas.py CHANGED
@@ -9,7 +9,7 @@ This is the canonical contract that the backend MUST conform to.
9
9
 
10
10
  from __future__ import annotations
11
11
 
12
- from typing import Any, Literal
12
+ from typing import Any, Literal, Optional
13
13
 
14
14
  from pydantic import BaseModel, Field
15
15
 
@@ -31,7 +31,7 @@ class ReviewPayload(BaseModel):
31
31
  description="Map of criterion keys to their scores"
32
32
  )
33
33
  total: float = Field(default=0.0, description="Aggregated total score")
34
- summary: str | None = Field(None, description="Optional text summary")
34
+ summary: Optional[str] = Field(None, description="Optional text summary")
35
35
 
36
36
 
37
37
  class JudgeScoreResponse(BaseModel):
@@ -46,7 +46,7 @@ class JudgeScoreResponse(BaseModel):
46
46
  default_factory=list,
47
47
  description="List of per-event rubric reviews (one per step)"
48
48
  )
49
- outcome_review: ReviewPayload | None = Field(
49
+ outcome_review: Optional[ReviewPayload] = Field(
50
50
  None,
51
51
  description="Optional outcome-level rubric review"
52
52
  )
@@ -63,7 +63,7 @@ class JudgeScoreResponse(BaseModel):
63
63
  description="Request metadata (provider, options, etc.)"
64
64
  )
65
65
 
66
- def aggregate_event_reward(self) -> float | None:
66
+ def aggregate_event_reward(self) -> Optional[float]:
67
67
  """
68
68
  Aggregate all event totals into a single reward.
69
69
 
@@ -74,7 +74,7 @@ class JudgeScoreResponse(BaseModel):
74
74
  return None
75
75
  return sum(self.event_totals)
76
76
 
77
- def aggregate_outcome_reward(self) -> float | None:
77
+ def aggregate_outcome_reward(self) -> Optional[float]:
78
78
  """
79
79
  Extract outcome reward from outcome_review.
80
80
 
@@ -92,15 +92,15 @@ class JudgeTaskApp(BaseModel):
92
92
  """Task application metadata."""
93
93
 
94
94
  id: str = Field(..., description="Task app identifier")
95
- base_url: str | None = Field(None, description="Optional base URL for task app")
95
+ base_url: Optional[str] = Field(None, description="Optional base URL for task app")
96
96
 
97
97
 
98
98
  class JudgeOptions(BaseModel):
99
99
  """Judge provider and configuration options."""
100
100
 
101
- provider: str | None = Field(None, description="Judge provider (e.g., 'openai', 'groq')")
102
- model: str | None = Field(None, description="Model identifier")
103
- rubric_id: str | None = Field(None, description="Rubric identifier")
101
+ provider: Optional[str] = Field(None, description="Judge provider (e.g., 'openai', 'groq')")
102
+ model: Optional[str] = Field(None, description="Model identifier")
103
+ rubric_id: Optional[str] = Field(None, description="Rubric identifier")
104
104
  event: bool = Field(True, description="Enable event-level judging")
105
105
  outcome: bool = Field(True, description="Enable outcome-level judging")
106
106
 
@@ -123,5 +123,5 @@ class JudgeScoreRequest(BaseModel):
123
123
  task_app: JudgeTaskApp = Field(..., description="Task application metadata")
124
124
  trace: JudgeTracePayload = Field(..., description="Trajectory trace to evaluate")
125
125
  options: JudgeOptions = Field(default_factory=lambda: JudgeOptions(), description="Judge options")
126
- rubric: dict[str, Any] | None = Field(None, description="Optional explicit rubric criteria")
126
+ rubric: Optional[dict[str, Any]] = Field(None, description="Optional explicit rubric criteria")
127
127
 
@@ -3,6 +3,12 @@ from synth_ai.task import task_app_health, validate_task_app_url
3
3
  from .client import LearningClient
4
4
  from .health import backend_health, balance_autumn_normalized, pricing_preflight
5
5
  from .jobs import JobHandle, JobsApiResolver
6
+ from .prompt_learning_client import (
7
+ PromptLearningClient,
8
+ get_prompt_text,
9
+ get_prompts,
10
+ get_scoring_summary,
11
+ )
6
12
  from .rl import (
7
13
  MAX_ENVIRONMENT_API_KEY_BYTES,
8
14
  RlClient,
@@ -32,6 +38,10 @@ __all__ = [
32
38
  "FtClient",
33
39
  "SFTJobConfig",
34
40
  "prepare_sft_job_payload",
41
+ "PromptLearningClient",
42
+ "get_prompts",
43
+ "get_prompt_text",
44
+ "get_scoring_summary",
35
45
  "RolloutEnvSpec",
36
46
  "RolloutPolicySpec",
37
47
  "RolloutRecordConfig",
@@ -0,0 +1,276 @@
1
+ """Client utilities for querying prompt learning job results."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ from .._utils.http import AsyncHttpClient
8
+ from .prompt_learning_types import PromptResults
9
+
10
+
11
+ def _validate_job_id(job_id: str) -> None:
12
+ """Validate that job_id has the expected prompt learning format.
13
+
14
+ Args:
15
+ job_id: Job ID to validate
16
+
17
+ Raises:
18
+ ValueError: If job_id doesn't start with 'pl_'
19
+ """
20
+ if not job_id.startswith("pl_"):
21
+ raise ValueError(
22
+ f"Invalid prompt learning job ID format: {job_id!r}. "
23
+ f"Expected format: 'pl_<identifier>' (e.g., 'pl_9c58b711c2644083')"
24
+ )
25
+
26
+
27
+ class PromptLearningClient:
28
+ """Client for interacting with prompt learning jobs and retrieving results."""
29
+
30
+ def __init__(self, base_url: str, api_key: str, *, timeout: float = 30.0) -> None:
31
+ """Initialize the prompt learning client.
32
+
33
+ Args:
34
+ base_url: Base URL of the backend API (e.g., "http://localhost:8000")
35
+ api_key: API key for authentication
36
+ timeout: Request timeout in seconds
37
+ """
38
+ self._base_url = base_url.rstrip("/")
39
+ self._api_key = api_key
40
+ self._timeout = timeout
41
+
42
+ async def get_job(self, job_id: str) -> Dict[str, Any]:
43
+ """Get job metadata and status.
44
+
45
+ Args:
46
+ job_id: Job ID (e.g., "pl_9c58b711c2644083")
47
+
48
+ Returns:
49
+ Job metadata including status, best_score, created_at, etc.
50
+
51
+ Raises:
52
+ ValueError: If job_id format is invalid
53
+ """
54
+ _validate_job_id(job_id)
55
+ async with AsyncHttpClient(self._base_url, self._api_key, timeout=self._timeout) as http:
56
+ return await http.get(f"/api/prompt-learning/online/jobs/{job_id}")
57
+
58
+ async def get_events(
59
+ self, job_id: str, *, since_seq: int = 0, limit: int = 5000
60
+ ) -> List[Dict[str, Any]]:
61
+ """Get events for a prompt learning job.
62
+
63
+ Args:
64
+ job_id: Job ID
65
+ since_seq: Return events after this sequence number
66
+ limit: Maximum number of events to return
67
+
68
+ Returns:
69
+ List of event dictionaries with type, message, data, etc.
70
+
71
+ Raises:
72
+ ValueError: If job_id format is invalid or response structure is unexpected
73
+ """
74
+ _validate_job_id(job_id)
75
+ params = {"since_seq": since_seq, "limit": limit}
76
+ async with AsyncHttpClient(self._base_url, self._api_key, timeout=self._timeout) as http:
77
+ js = await http.get(
78
+ f"/api/prompt-learning/online/jobs/{job_id}/events",
79
+ params=params
80
+ )
81
+ if isinstance(js, dict) and isinstance(js.get("events"), list):
82
+ return js["events"]
83
+ # Unexpected response structure - raise instead of silently returning empty list
84
+ raise ValueError(
85
+ f"Unexpected response structure from events endpoint. "
86
+ f"Expected dict with 'events' list, got: {type(js).__name__}"
87
+ )
88
+
89
+ async def get_prompts(self, job_id: str) -> PromptResults:
90
+ """Get the best prompts and scoring metadata from a completed job.
91
+
92
+ Args:
93
+ job_id: Job ID
94
+
95
+ Returns:
96
+ PromptResults dataclass containing:
97
+ - best_prompt: The top-performing prompt with sections and metadata
98
+ - best_score: The best accuracy score achieved
99
+ - top_prompts: List of top-K prompts with train/val scores
100
+ - optimized_candidates: All frontier/Pareto-optimal candidates
101
+ - attempted_candidates: All candidates tried during optimization
102
+
103
+ Raises:
104
+ ValueError: If job_id format is invalid
105
+ """
106
+ _validate_job_id(job_id)
107
+ events = await self.get_events(job_id, limit=10000)
108
+
109
+ result = PromptResults()
110
+
111
+ # Extract results from events
112
+ for event in events:
113
+ event_type = event.get("type", "")
114
+ event_data = event.get("data", {})
115
+
116
+ # Best prompt event
117
+ if event_type == "prompt.learning.best.prompt":
118
+ result.best_prompt = event_data.get("best_prompt")
119
+ result.best_score = event_data.get("best_score")
120
+
121
+ # Top-K prompt content events
122
+ elif event_type == "prompt.learning.top.prompt.content":
123
+ result.top_prompts.append({
124
+ "rank": event_data.get("rank"),
125
+ "train_accuracy": event_data.get("train_accuracy"),
126
+ "val_accuracy": event_data.get("val_accuracy"),
127
+ "template": event_data.get("template"),
128
+ "full_text": event_data.get("full_text"),
129
+ })
130
+
131
+ # Final results event (contains all candidates)
132
+ elif event_type == "prompt.learning.final.results":
133
+ result.optimized_candidates = event_data.get("optimized_candidates", [])
134
+ result.attempted_candidates = event_data.get("attempted_candidates", [])
135
+
136
+ # Validation results
137
+ elif event_type == "prompt.learning.validation.scored":
138
+ result.validation_results.append(event_data)
139
+
140
+ # Completion event (fallback for best_score)
141
+ elif event_type == "prompt.learning.gepa.complete":
142
+ if result.best_score is None:
143
+ result.best_score = event_data.get("best_score")
144
+
145
+ return result
146
+
147
+ async def get_prompt_text(self, job_id: str, rank: int = 1) -> Optional[str]:
148
+ """Get the full text of a specific prompt by rank.
149
+
150
+ Args:
151
+ job_id: Job ID
152
+ rank: Prompt rank (1 = best, 2 = second best, etc.)
153
+
154
+ Returns:
155
+ Full prompt text or None if not found
156
+
157
+ Raises:
158
+ ValueError: If job_id format is invalid or rank < 1
159
+ """
160
+ _validate_job_id(job_id)
161
+ if rank < 1:
162
+ raise ValueError(f"Rank must be >= 1, got: {rank}")
163
+ prompts_data = await self.get_prompts(job_id)
164
+ top_prompts = prompts_data.top_prompts
165
+
166
+ for prompt_info in top_prompts:
167
+ if prompt_info.get("rank") == rank:
168
+ return prompt_info.get("full_text")
169
+
170
+ return None
171
+
172
+ async def get_scoring_summary(self, job_id: str) -> Dict[str, Any]:
173
+ """Get a summary of scoring metrics for all candidates.
174
+
175
+ Args:
176
+ job_id: Job ID
177
+
178
+ Returns:
179
+ Dictionary with scoring statistics:
180
+ - best_train_accuracy: Best training accuracy
181
+ - best_val_accuracy: Best validation accuracy (if available)
182
+ - num_candidates_tried: Total candidates evaluated
183
+ - num_frontier_candidates: Number in Pareto frontier
184
+ - score_distribution: Histogram of accuracy scores
185
+
186
+ Raises:
187
+ ValueError: If job_id format is invalid
188
+ """
189
+ _validate_job_id(job_id)
190
+ prompts_data = await self.get_prompts(job_id)
191
+
192
+ attempted = prompts_data.attempted_candidates
193
+ optimized = prompts_data.optimized_candidates
194
+ validation = prompts_data.validation_results
195
+
196
+ # Extract train accuracies (only from candidates that have accuracy field)
197
+ train_accuracies = [
198
+ c["accuracy"] for c in attempted if "accuracy" in c
199
+ ]
200
+
201
+ # Extract val accuracies (only from validations that have accuracy field)
202
+ val_accuracies = [
203
+ v["accuracy"] for v in validation if "accuracy" in v
204
+ ]
205
+
206
+ # Score distribution (bins)
207
+ bins = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
208
+ distribution = {f"{bins[i]:.1f}-{bins[i+1]:.1f}": 0 for i in range(len(bins) - 1)}
209
+ for acc in train_accuracies:
210
+ for i in range(len(bins) - 1):
211
+ if bins[i] <= acc < bins[i+1] or (i == len(bins) - 2 and acc == bins[i+1]):
212
+ distribution[f"{bins[i]:.1f}-{bins[i+1]:.1f}"] += 1
213
+ break
214
+
215
+ return {
216
+ "best_train_accuracy": max(train_accuracies) if train_accuracies else None,
217
+ "best_val_accuracy": max(val_accuracies) if val_accuracies else None,
218
+ "num_candidates_tried": len(attempted),
219
+ "num_frontier_candidates": len(optimized),
220
+ "score_distribution": distribution,
221
+ "mean_train_accuracy": sum(train_accuracies) / len(train_accuracies) if train_accuracies else None,
222
+ }
223
+
224
+
225
+ # Synchronous wrapper for convenience
226
+ def get_prompts(job_id: str, base_url: str, api_key: str) -> PromptResults:
227
+ """Synchronous wrapper to get prompts from a job.
228
+
229
+ Args:
230
+ job_id: Job ID (e.g., "pl_9c58b711c2644083")
231
+ base_url: Backend API base URL
232
+ api_key: API key for authentication
233
+
234
+ Returns:
235
+ PromptResults dataclass with prompt results
236
+ """
237
+ import asyncio
238
+
239
+ client = PromptLearningClient(base_url, api_key)
240
+ return asyncio.run(client.get_prompts(job_id))
241
+
242
+
243
+ def get_prompt_text(job_id: str, base_url: str, api_key: str, rank: int = 1) -> Optional[str]:
244
+ """Synchronous wrapper to get prompt text by rank.
245
+
246
+ Args:
247
+ job_id: Job ID
248
+ base_url: Backend API base URL
249
+ api_key: API key for authentication
250
+ rank: Prompt rank (1 = best, 2 = second best, etc.)
251
+
252
+ Returns:
253
+ Full prompt text or None if not found
254
+ """
255
+ import asyncio
256
+
257
+ client = PromptLearningClient(base_url, api_key)
258
+ return asyncio.run(client.get_prompt_text(job_id, rank))
259
+
260
+
261
+ def get_scoring_summary(job_id: str, base_url: str, api_key: str) -> Dict[str, Any]:
262
+ """Synchronous wrapper to get scoring summary.
263
+
264
+ Args:
265
+ job_id: Job ID
266
+ base_url: Backend API base URL
267
+ api_key: API key for authentication
268
+
269
+ Returns:
270
+ Dictionary with scoring statistics
271
+ """
272
+ import asyncio
273
+
274
+ client = PromptLearningClient(base_url, api_key)
275
+ return asyncio.run(client.get_scoring_summary(job_id))
276
+
@@ -0,0 +1,184 @@
1
+ """Type definitions for prompt learning data structures."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Any, Dict, List, Optional
7
+
8
+
9
+ @dataclass
10
+ class TextReplacement:
11
+ """A text replacement in a prompt transformation."""
12
+
13
+ new_text: str
14
+ apply_to_role: str = "system"
15
+ old_text: Optional[str] = None
16
+ position: Optional[int] = None
17
+
18
+
19
+ @dataclass
20
+ class CandidateScore:
21
+ """Scoring information for a candidate prompt."""
22
+
23
+ accuracy: float
24
+ prompt_length: int = 0
25
+ tool_call_rate: float = 0.0
26
+ instance_scores: List[float] = field(default_factory=list)
27
+
28
+
29
+ @dataclass
30
+ class PromptSection:
31
+ """A section of a prompt (e.g., system, user, assistant)."""
32
+
33
+ role: str
34
+ content: str
35
+
36
+
37
+ @dataclass
38
+ class Candidate:
39
+ """A candidate prompt from the optimization process."""
40
+
41
+ accuracy: float
42
+ prompt_length: int = 0
43
+ tool_call_rate: float = 0.0
44
+ instance_scores: List[float] = field(default_factory=list)
45
+ object: Optional[Dict[str, Any]] = None
46
+
47
+ @classmethod
48
+ def from_dict(cls, data: Dict[str, Any]) -> Candidate:
49
+ """Create a Candidate from a dictionary."""
50
+ return cls(
51
+ accuracy=data.get("accuracy", 0.0),
52
+ prompt_length=data.get("prompt_length", 0),
53
+ tool_call_rate=data.get("tool_call_rate", 0.0),
54
+ instance_scores=data.get("instance_scores", []),
55
+ object=data.get("object"),
56
+ )
57
+
58
+
59
+ @dataclass
60
+ class OptimizedCandidate:
61
+ """An optimized candidate from the Pareto frontier."""
62
+
63
+ score: CandidateScore
64
+ payload_kind: str # "transformation" or "template"
65
+ object: Optional[Dict[str, Any]] = None
66
+ instance_scores: Optional[List[float]] = None
67
+
68
+ @classmethod
69
+ def from_dict(cls, data: Dict[str, Any]) -> OptimizedCandidate:
70
+ """Create an OptimizedCandidate from a dictionary."""
71
+ score_data = data.get("score", {})
72
+ if isinstance(score_data, dict):
73
+ score = CandidateScore(
74
+ accuracy=score_data.get("accuracy", 0.0),
75
+ prompt_length=score_data.get("prompt_length", 0),
76
+ tool_call_rate=score_data.get("tool_call_rate", 0.0),
77
+ instance_scores=score_data.get("instance_scores", []),
78
+ )
79
+ else:
80
+ score = CandidateScore(accuracy=0.0)
81
+
82
+ return cls(
83
+ score=score,
84
+ payload_kind=data.get("payload_kind", "unknown"),
85
+ object=data.get("object"),
86
+ instance_scores=data.get("instance_scores"),
87
+ )
88
+
89
+
90
+ @dataclass
91
+ class PromptLearningEvent:
92
+ """A generic prompt learning event."""
93
+
94
+ type: str
95
+ message: str
96
+ data: Dict[str, Any]
97
+ seq: int
98
+ created_at: Optional[str] = None
99
+
100
+ @classmethod
101
+ def from_dict(cls, data: Dict[str, Any]) -> PromptLearningEvent:
102
+ """Create a PromptLearningEvent from a dictionary."""
103
+ return cls(
104
+ type=data.get("type", ""),
105
+ message=data.get("message", ""),
106
+ data=data.get("data", {}),
107
+ seq=data.get("seq", 0),
108
+ created_at=data.get("created_at"),
109
+ )
110
+
111
+
112
+ @dataclass
113
+ class BestPromptEventData:
114
+ """Data for prompt.learning.best.prompt event."""
115
+
116
+ best_score: float
117
+ best_prompt: Dict[str, Any]
118
+
119
+ @classmethod
120
+ def from_dict(cls, data: Dict[str, Any]) -> BestPromptEventData:
121
+ """Create BestPromptEventData from a dictionary."""
122
+ return cls(
123
+ best_score=data.get("best_score", 0.0),
124
+ best_prompt=data.get("best_prompt", {}),
125
+ )
126
+
127
+
128
+ @dataclass
129
+ class FinalResultsEventData:
130
+ """Data for prompt.learning.final.results event."""
131
+
132
+ attempted_candidates: List[Dict[str, Any]]
133
+ optimized_candidates: List[Dict[str, Any]]
134
+
135
+ @classmethod
136
+ def from_dict(cls, data: Dict[str, Any]) -> FinalResultsEventData:
137
+ """Create FinalResultsEventData from a dictionary."""
138
+ return cls(
139
+ attempted_candidates=data.get("attempted_candidates", []),
140
+ optimized_candidates=data.get("optimized_candidates", []),
141
+ )
142
+
143
+
144
+ @dataclass
145
+ class ValidationScoredEventData:
146
+ """Data for prompt.learning.validation.scored event."""
147
+
148
+ accuracy: float
149
+ instance_scores: List[float] = field(default_factory=list)
150
+ is_baseline: bool = False
151
+
152
+ @classmethod
153
+ def from_dict(cls, data: Dict[str, Any]) -> ValidationScoredEventData:
154
+ """Create ValidationScoredEventData from a dictionary."""
155
+ return cls(
156
+ accuracy=data.get("accuracy", 0.0),
157
+ instance_scores=data.get("instance_scores", []),
158
+ is_baseline=data.get("is_baseline", False),
159
+ )
160
+
161
+
162
+ @dataclass
163
+ class PromptResults:
164
+ """Results from a completed prompt learning job."""
165
+
166
+ best_prompt: Optional[Dict[str, Any]] = None
167
+ best_score: Optional[float] = None
168
+ top_prompts: List[Dict[str, Any]] = field(default_factory=list)
169
+ optimized_candidates: List[Dict[str, Any]] = field(default_factory=list)
170
+ attempted_candidates: List[Dict[str, Any]] = field(default_factory=list)
171
+ validation_results: List[Dict[str, Any]] = field(default_factory=list)
172
+
173
+ @classmethod
174
+ def from_dict(cls, data: Dict[str, Any]) -> PromptResults:
175
+ """Create PromptResults from a dictionary."""
176
+ return cls(
177
+ best_prompt=data.get("best_prompt"),
178
+ best_score=data.get("best_score"),
179
+ top_prompts=data.get("top_prompts", []),
180
+ optimized_candidates=data.get("optimized_candidates", []),
181
+ attempted_candidates=data.get("attempted_candidates", []),
182
+ validation_results=data.get("validation_results", []),
183
+ )
184
+
@@ -107,7 +107,9 @@ class RlClient:
107
107
  async with AsyncHttpClient(self._base_url, self._api_key, timeout=30.0) as http:
108
108
  try:
109
109
  js = await http.get(
110
- f"{_api_base(self._base_url)}/learning/jobs/{job_id}/events", params=params
110
+ f"{_api_base(self._base_url)}/learning/jobs/{job_id}/events",
111
+ params=params,
112
+ headers={"accept": "application/json"},
111
113
  )
112
114
  except HTTPError as he:
113
115
  with suppress(Exception):
@@ -0,0 +1,2 @@
1
+ """Pricing module for SDK."""
2
+
@@ -0,0 +1,57 @@
1
+ """Static pricing table for supported models.
2
+
3
+ This module provides per-token pricing used by the SDK status commands.
4
+ Rates are expressed in USD per token and split into input/output prices.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from dataclasses import dataclass
9
+ from typing import Dict
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class TokenRates:
14
+ input_usd: float
15
+ output_usd: float
16
+
17
+
18
+ # Default per-token prices (USD), sourced Nov 3, 2025 — update as contracts change
19
+ MODEL_PRICES: Dict[str, Dict[str, TokenRates]] = {
20
+ # OpenAI official pricing
21
+ "openai": {
22
+ # GPT-5 family
23
+ "gpt-5": TokenRates(input_usd=0.00000125, output_usd=0.00001000), # $1.25 / $10 per 1M
24
+ "gpt-5-mini": TokenRates(input_usd=0.00000025, output_usd=0.00000200), # $0.25 / $2.00 per 1M
25
+ "gpt-5-nano": TokenRates(input_usd=0.00000005, output_usd=0.00000040), # $0.05 / $0.40 per 1M
26
+
27
+ "gpt-4o-mini": TokenRates(input_usd=0.00000015, output_usd=0.00000060), # $0.15 / $0.60 per 1M
28
+ "gpt-4o": TokenRates(input_usd=0.00000250, output_usd=0.00001000), # $2.50 / $10.00 per 1M
29
+ },
30
+ # Groq OSS via OpenAI-compatible path (latest Groq docs)
31
+ "groq": {
32
+ "openai/gpt-oss-20b": TokenRates(input_usd=0.000000075, output_usd=0.000000300), # $0.075 / $0.30 per 1M
33
+
34
+ "openai/gpt-oss-120b": TokenRates(input_usd=0.000000150, output_usd=0.000000600), # $0.15 / $0.60 per 1M
35
+
36
+ # Additional Groq on-demand models
37
+ "moonshotai/kimi-k2-0905": TokenRates(input_usd=0.000001000, output_usd=0.000003000), # $1.00 / $3.00 per 1M
38
+
39
+ "meta/llama-guard-4-12b": TokenRates(input_usd=0.000000200, output_usd=0.000000200), # $0.20 / $0.20 per 1M
40
+ "qwen/qwen3-32b": TokenRates(input_usd=0.000000290, output_usd=0.000000590), # $0.29 / $0.59 per 1M
41
+ "meta/llama-3.3-70b-versatile": TokenRates(input_usd=0.000000590, output_usd=0.000000790), # $0.59 / $0.79 per 1M
42
+ "meta/llama-3.1-8b-instant": TokenRates(input_usd=0.000000050, output_usd=0.000000080), # $0.05 / $0.08 per 1M
43
+ },
44
+ # Google Gemini pricing — per-token USD (per 1M ÷ 1e6), Nov 3, 2025
45
+ "google": {
46
+ # Gemini 2.5 Pro (two tiers by prompt size)
47
+ "gemini-2.5-pro": TokenRates(input_usd=0.00000125, output_usd=0.00001000), # <=200k tokens
48
+ "gemini-2.5-pro-gt200k": TokenRates(input_usd=0.00000250, output_usd=0.00001500), # >200k tokens
49
+
50
+ # Gemini 2.5 Flash (hybrid reasoning)
51
+ "gemini-2.5-flash": TokenRates(input_usd=0.00000030, output_usd=0.00000250),
52
+
53
+ # Gemini 2.5 Flash-Lite (cheapest)
54
+ "gemini-2.5-flash-lite": TokenRates(input_usd=0.00000010, output_usd=0.00000040),
55
+ },
56
+ }
57
+
@@ -0,0 +1,29 @@
1
+ from .config import StreamConfig
2
+ from .handlers import (
3
+ BufferedHandler,
4
+ CallbackHandler,
5
+ CLIHandler,
6
+ IntegrationTestHandler,
7
+ JSONHandler,
8
+ LossCurveHandler,
9
+ RichHandler,
10
+ StreamHandler,
11
+ )
12
+ from .streamer import JobStreamer, StreamEndpoints
13
+ from .types import StreamMessage, StreamType
14
+
15
+ __all__ = [
16
+ "BufferedHandler",
17
+ "CallbackHandler",
18
+ "CLIHandler",
19
+ "IntegrationTestHandler",
20
+ "JSONHandler",
21
+ "LossCurveHandler",
22
+ "JobStreamer",
23
+ "RichHandler",
24
+ "StreamEndpoints",
25
+ "StreamConfig",
26
+ "StreamHandler",
27
+ "StreamMessage",
28
+ "StreamType",
29
+ ]