synth-ai 0.2.4.dev5__py3-none-any.whl → 0.2.4.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (229) hide show
  1. synth_ai/__init__.py +18 -9
  2. synth_ai/cli/__init__.py +10 -5
  3. synth_ai/cli/balance.py +22 -17
  4. synth_ai/cli/calc.py +2 -3
  5. synth_ai/cli/demo.py +3 -5
  6. synth_ai/cli/legacy_root_backup.py +58 -32
  7. synth_ai/cli/man.py +22 -19
  8. synth_ai/cli/recent.py +9 -8
  9. synth_ai/cli/root.py +58 -13
  10. synth_ai/cli/status.py +13 -6
  11. synth_ai/cli/traces.py +45 -21
  12. synth_ai/cli/watch.py +40 -37
  13. synth_ai/config/base_url.py +1 -3
  14. synth_ai/core/experiment.py +1 -2
  15. synth_ai/environments/__init__.py +2 -6
  16. synth_ai/environments/environment/artifacts/base.py +3 -1
  17. synth_ai/environments/environment/db/sqlite.py +1 -1
  18. synth_ai/environments/environment/registry.py +19 -20
  19. synth_ai/environments/environment/resources/sqlite.py +2 -3
  20. synth_ai/environments/environment/rewards/core.py +3 -2
  21. synth_ai/environments/environment/tools/__init__.py +6 -4
  22. synth_ai/environments/examples/crafter_classic/__init__.py +1 -1
  23. synth_ai/environments/examples/crafter_classic/engine.py +21 -17
  24. synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +1 -0
  25. synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +2 -1
  26. synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +2 -1
  27. synth_ai/environments/examples/crafter_classic/engine_serialization_patch_v3.py +3 -2
  28. synth_ai/environments/examples/crafter_classic/environment.py +16 -15
  29. synth_ai/environments/examples/crafter_classic/taskset.py +2 -2
  30. synth_ai/environments/examples/crafter_classic/trace_hooks_v3.py +2 -3
  31. synth_ai/environments/examples/crafter_classic/world_config_patch_simple.py +2 -1
  32. synth_ai/environments/examples/crafter_custom/crafter/__init__.py +2 -2
  33. synth_ai/environments/examples/crafter_custom/crafter/config.py +2 -2
  34. synth_ai/environments/examples/crafter_custom/crafter/env.py +1 -5
  35. synth_ai/environments/examples/crafter_custom/crafter/objects.py +1 -2
  36. synth_ai/environments/examples/crafter_custom/crafter/worldgen.py +1 -2
  37. synth_ai/environments/examples/crafter_custom/dataset_builder.py +5 -5
  38. synth_ai/environments/examples/crafter_custom/environment.py +13 -13
  39. synth_ai/environments/examples/crafter_custom/run_dataset.py +5 -5
  40. synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +2 -2
  41. synth_ai/environments/examples/enron/art_helpers/local_email_db.py +5 -4
  42. synth_ai/environments/examples/enron/art_helpers/types_enron.py +2 -1
  43. synth_ai/environments/examples/enron/engine.py +18 -14
  44. synth_ai/environments/examples/enron/environment.py +12 -11
  45. synth_ai/environments/examples/enron/taskset.py +7 -7
  46. synth_ai/environments/examples/minigrid/__init__.py +6 -6
  47. synth_ai/environments/examples/minigrid/engine.py +6 -6
  48. synth_ai/environments/examples/minigrid/environment.py +6 -6
  49. synth_ai/environments/examples/minigrid/puzzle_loader.py +3 -2
  50. synth_ai/environments/examples/minigrid/taskset.py +13 -13
  51. synth_ai/environments/examples/nethack/achievements.py +1 -1
  52. synth_ai/environments/examples/nethack/engine.py +8 -7
  53. synth_ai/environments/examples/nethack/environment.py +10 -9
  54. synth_ai/environments/examples/nethack/helpers/__init__.py +8 -9
  55. synth_ai/environments/examples/nethack/helpers/action_mapping.py +1 -1
  56. synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +2 -1
  57. synth_ai/environments/examples/nethack/helpers/observation_utils.py +1 -1
  58. synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +3 -4
  59. synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +6 -5
  60. synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +5 -5
  61. synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +7 -6
  62. synth_ai/environments/examples/nethack/taskset.py +5 -5
  63. synth_ai/environments/examples/red/engine.py +9 -8
  64. synth_ai/environments/examples/red/engine_helpers/reward_components.py +2 -1
  65. synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +7 -7
  66. synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +2 -1
  67. synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +2 -1
  68. synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +2 -1
  69. synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +2 -1
  70. synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +2 -1
  71. synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +2 -1
  72. synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +2 -1
  73. synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +2 -1
  74. synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +2 -1
  75. synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +2 -1
  76. synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +2 -1
  77. synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +3 -2
  78. synth_ai/environments/examples/red/engine_helpers/state_extraction.py +2 -1
  79. synth_ai/environments/examples/red/environment.py +18 -15
  80. synth_ai/environments/examples/red/taskset.py +5 -3
  81. synth_ai/environments/examples/sokoban/engine.py +16 -13
  82. synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +3 -2
  83. synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +2 -1
  84. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +1 -1
  85. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +7 -5
  86. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +1 -1
  87. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +2 -1
  88. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +5 -4
  89. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +3 -2
  90. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +2 -1
  91. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +5 -4
  92. synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +1 -1
  93. synth_ai/environments/examples/sokoban/environment.py +15 -14
  94. synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +5 -3
  95. synth_ai/environments/examples/sokoban/puzzle_loader.py +3 -2
  96. synth_ai/environments/examples/sokoban/taskset.py +13 -10
  97. synth_ai/environments/examples/tictactoe/engine.py +6 -6
  98. synth_ai/environments/examples/tictactoe/environment.py +8 -7
  99. synth_ai/environments/examples/tictactoe/taskset.py +6 -5
  100. synth_ai/environments/examples/verilog/engine.py +4 -3
  101. synth_ai/environments/examples/verilog/environment.py +11 -10
  102. synth_ai/environments/examples/verilog/taskset.py +14 -12
  103. synth_ai/environments/examples/wordle/__init__.py +29 -0
  104. synth_ai/environments/examples/wordle/engine.py +398 -0
  105. synth_ai/environments/examples/wordle/environment.py +159 -0
  106. synth_ai/environments/examples/wordle/helpers/generate_instances_wordfreq.py +75 -0
  107. synth_ai/environments/examples/wordle/taskset.py +230 -0
  108. synth_ai/environments/reproducibility/core.py +1 -1
  109. synth_ai/environments/reproducibility/tree.py +21 -21
  110. synth_ai/environments/service/app.py +11 -2
  111. synth_ai/environments/service/core_routes.py +137 -105
  112. synth_ai/environments/service/external_registry.py +1 -2
  113. synth_ai/environments/service/registry.py +1 -1
  114. synth_ai/environments/stateful/core.py +1 -2
  115. synth_ai/environments/stateful/engine.py +1 -1
  116. synth_ai/environments/tasks/api.py +4 -4
  117. synth_ai/environments/tasks/core.py +14 -12
  118. synth_ai/environments/tasks/filters.py +6 -4
  119. synth_ai/environments/tasks/utils.py +13 -11
  120. synth_ai/evals/base.py +2 -3
  121. synth_ai/experimental/synth_oss.py +4 -4
  122. synth_ai/learning/gateway.py +1 -3
  123. synth_ai/learning/prompts/banking77_injection_eval.py +168 -0
  124. synth_ai/learning/prompts/hello_world_in_context_injection_ex.py +213 -0
  125. synth_ai/learning/prompts/mipro.py +282 -1
  126. synth_ai/learning/prompts/random_search.py +246 -0
  127. synth_ai/learning/prompts/run_mipro_banking77.py +172 -0
  128. synth_ai/learning/prompts/run_random_search_banking77.py +324 -0
  129. synth_ai/lm/__init__.py +5 -5
  130. synth_ai/lm/caching/ephemeral.py +9 -9
  131. synth_ai/lm/caching/handler.py +20 -20
  132. synth_ai/lm/caching/persistent.py +10 -10
  133. synth_ai/lm/config.py +3 -3
  134. synth_ai/lm/constants.py +7 -7
  135. synth_ai/lm/core/all.py +17 -3
  136. synth_ai/lm/core/exceptions.py +0 -2
  137. synth_ai/lm/core/main.py +26 -41
  138. synth_ai/lm/core/main_v3.py +20 -10
  139. synth_ai/lm/core/vendor_clients.py +18 -17
  140. synth_ai/lm/injection.py +80 -0
  141. synth_ai/lm/overrides.py +206 -0
  142. synth_ai/lm/provider_support/__init__.py +1 -1
  143. synth_ai/lm/provider_support/anthropic.py +51 -24
  144. synth_ai/lm/provider_support/openai.py +51 -22
  145. synth_ai/lm/structured_outputs/handler.py +34 -32
  146. synth_ai/lm/structured_outputs/inject.py +24 -27
  147. synth_ai/lm/structured_outputs/rehabilitate.py +19 -15
  148. synth_ai/lm/tools/base.py +17 -16
  149. synth_ai/lm/unified_interface.py +17 -18
  150. synth_ai/lm/vendors/base.py +20 -18
  151. synth_ai/lm/vendors/core/anthropic_api.py +50 -25
  152. synth_ai/lm/vendors/core/gemini_api.py +31 -36
  153. synth_ai/lm/vendors/core/mistral_api.py +19 -19
  154. synth_ai/lm/vendors/core/openai_api.py +11 -10
  155. synth_ai/lm/vendors/openai_standard.py +144 -88
  156. synth_ai/lm/vendors/openai_standard_responses.py +74 -61
  157. synth_ai/lm/vendors/retries.py +9 -1
  158. synth_ai/lm/vendors/supported/custom_endpoint.py +26 -26
  159. synth_ai/lm/vendors/supported/deepseek.py +10 -10
  160. synth_ai/lm/vendors/supported/grok.py +8 -8
  161. synth_ai/lm/vendors/supported/ollama.py +2 -1
  162. synth_ai/lm/vendors/supported/openrouter.py +11 -9
  163. synth_ai/lm/vendors/synth_client.py +69 -63
  164. synth_ai/lm/warmup.py +8 -7
  165. synth_ai/tracing/__init__.py +22 -10
  166. synth_ai/tracing_v1/__init__.py +22 -20
  167. synth_ai/tracing_v3/__init__.py +7 -7
  168. synth_ai/tracing_v3/abstractions.py +56 -52
  169. synth_ai/tracing_v3/config.py +4 -2
  170. synth_ai/tracing_v3/db_config.py +6 -8
  171. synth_ai/tracing_v3/decorators.py +29 -30
  172. synth_ai/tracing_v3/examples/basic_usage.py +12 -12
  173. synth_ai/tracing_v3/hooks.py +21 -21
  174. synth_ai/tracing_v3/llm_call_record_helpers.py +85 -98
  175. synth_ai/tracing_v3/lm_call_record_abstractions.py +2 -4
  176. synth_ai/tracing_v3/migration_helper.py +3 -5
  177. synth_ai/tracing_v3/replica_sync.py +30 -32
  178. synth_ai/tracing_v3/session_tracer.py +35 -29
  179. synth_ai/tracing_v3/storage/__init__.py +1 -1
  180. synth_ai/tracing_v3/storage/base.py +8 -7
  181. synth_ai/tracing_v3/storage/config.py +4 -4
  182. synth_ai/tracing_v3/storage/factory.py +4 -4
  183. synth_ai/tracing_v3/storage/utils.py +9 -9
  184. synth_ai/tracing_v3/turso/__init__.py +3 -3
  185. synth_ai/tracing_v3/turso/daemon.py +9 -9
  186. synth_ai/tracing_v3/turso/manager.py +60 -48
  187. synth_ai/tracing_v3/turso/models.py +24 -19
  188. synth_ai/tracing_v3/utils.py +5 -5
  189. synth_ai/tui/__main__.py +1 -1
  190. synth_ai/tui/cli/query_experiments.py +2 -3
  191. synth_ai/tui/cli/query_experiments_v3.py +2 -3
  192. synth_ai/tui/dashboard.py +97 -86
  193. synth_ai/v0/tracing/abstractions.py +28 -28
  194. synth_ai/v0/tracing/base_client.py +9 -9
  195. synth_ai/v0/tracing/client_manager.py +7 -7
  196. synth_ai/v0/tracing/config.py +7 -7
  197. synth_ai/v0/tracing/context.py +6 -6
  198. synth_ai/v0/tracing/decorators.py +6 -5
  199. synth_ai/v0/tracing/events/manage.py +1 -1
  200. synth_ai/v0/tracing/events/store.py +5 -4
  201. synth_ai/v0/tracing/immediate_client.py +4 -5
  202. synth_ai/v0/tracing/local.py +3 -3
  203. synth_ai/v0/tracing/log_client_base.py +4 -5
  204. synth_ai/v0/tracing/retry_queue.py +5 -6
  205. synth_ai/v0/tracing/trackers.py +25 -25
  206. synth_ai/v0/tracing/upload.py +6 -0
  207. synth_ai/v0/tracing_v1/__init__.py +1 -1
  208. synth_ai/v0/tracing_v1/abstractions.py +28 -28
  209. synth_ai/v0/tracing_v1/base_client.py +9 -9
  210. synth_ai/v0/tracing_v1/client_manager.py +7 -7
  211. synth_ai/v0/tracing_v1/config.py +7 -7
  212. synth_ai/v0/tracing_v1/context.py +6 -6
  213. synth_ai/v0/tracing_v1/decorators.py +7 -6
  214. synth_ai/v0/tracing_v1/events/manage.py +1 -1
  215. synth_ai/v0/tracing_v1/events/store.py +5 -4
  216. synth_ai/v0/tracing_v1/immediate_client.py +4 -5
  217. synth_ai/v0/tracing_v1/local.py +3 -3
  218. synth_ai/v0/tracing_v1/log_client_base.py +4 -5
  219. synth_ai/v0/tracing_v1/retry_queue.py +5 -6
  220. synth_ai/v0/tracing_v1/trackers.py +25 -25
  221. synth_ai/v0/tracing_v1/upload.py +25 -24
  222. synth_ai/zyk/__init__.py +1 -0
  223. {synth_ai-0.2.4.dev5.dist-info → synth_ai-0.2.4.dev7.dist-info}/METADATA +2 -11
  224. synth_ai-0.2.4.dev7.dist-info/RECORD +299 -0
  225. synth_ai-0.2.4.dev5.dist-info/RECORD +0 -287
  226. {synth_ai-0.2.4.dev5.dist-info → synth_ai-0.2.4.dev7.dist-info}/WHEEL +0 -0
  227. {synth_ai-0.2.4.dev5.dist-info → synth_ai-0.2.4.dev7.dist-info}/entry_points.txt +0 -0
  228. {synth_ai-0.2.4.dev5.dist-info → synth_ai-0.2.4.dev7.dist-info}/licenses/LICENSE +0 -0
  229. {synth_ai-0.2.4.dev5.dist-info → synth_ai-0.2.4.dev7.dist-info}/top_level.txt +0 -0
@@ -6,14 +6,13 @@ Analogous to the modified OpenAI version.
6
6
  import logging
7
7
  import types
8
8
  from dataclasses import dataclass
9
- from typing import Optional
10
9
 
11
10
  try:
12
11
  import anthropic
13
- except ImportError:
12
+ except ImportError as err:
14
13
  raise ModuleNotFoundError(
15
14
  "Please install anthropic to use this feature: 'pip install anthropic'"
16
- )
15
+ ) from err
17
16
 
18
17
  try:
19
18
  from anthropic import AsyncClient, Client
@@ -28,6 +27,14 @@ from langfuse.utils import _get_timestamp
28
27
  from langfuse.utils.langfuse_singleton import LangfuseSingleton
29
28
  from wrapt import wrap_function_wrapper
30
29
 
30
+ from synth_ai.lm.overrides import (
31
+ apply_injection as apply_injection_overrides,
32
+ )
33
+ from synth_ai.lm.overrides import (
34
+ apply_param_overrides,
35
+ apply_tool_overrides,
36
+ use_overrides_for_messages,
37
+ )
31
38
  from synth_ai.lm.provider_support.suppress_logging import *
32
39
  from synth_ai.tracing_v1.trackers import (
33
40
  synth_tracker_async,
@@ -349,7 +356,17 @@ def _wrap(anthropic_resource: AnthropicDefinition, initialize, wrapped, args, kw
349
356
  generation = new_langfuse.generation(**generation_data)
350
357
 
351
358
  try:
352
- anthropic_response = wrapped(*args, **arg_extractor.get_anthropic_args())
359
+ call_kwargs = arg_extractor.get_anthropic_args()
360
+ # Apply context-scoped injection to chat messages if present
361
+ if isinstance(call_kwargs, dict) and "messages" in call_kwargs:
362
+ try:
363
+ with use_overrides_for_messages(call_kwargs["messages"]): # type: ignore[arg-type]
364
+ call_kwargs["messages"] = apply_injection_overrides(call_kwargs["messages"]) # type: ignore[arg-type]
365
+ call_kwargs = apply_tool_overrides(call_kwargs)
366
+ call_kwargs = apply_param_overrides(call_kwargs)
367
+ except Exception:
368
+ pass
369
+ anthropic_response = wrapped(*args, **call_kwargs)
353
370
 
354
371
  # If it's a streaming call, returns a generator
355
372
  if isinstance(anthropic_response, types.GeneratorType):
@@ -363,10 +380,10 @@ def _wrap(anthropic_resource: AnthropicDefinition, initialize, wrapped, args, kw
363
380
  else:
364
381
  model, completion, usage = _extract_anthropic_completion(anthropic_response)
365
382
  # Synth tracking
366
- if "messages" in arg_extractor.get_anthropic_args():
383
+ if "messages" in call_kwargs:
367
384
  # print("\nWRAP: Messages API path")
368
- system_content = arg_extractor.get_anthropic_args().get("system")
369
- original_messages = arg_extractor.get_anthropic_args()["messages"]
385
+ system_content = call_kwargs.get("system")
386
+ original_messages = call_kwargs["messages"]
370
387
  # print(f"WRAP: Original messages: {original_messages}")
371
388
  # print(f"WRAP: System content: {system_content}")
372
389
 
@@ -397,9 +414,9 @@ def _wrap(anthropic_resource: AnthropicDefinition, initialize, wrapped, args, kw
397
414
  )
398
415
  # print("Finished tracking LM output")
399
416
 
400
- elif "prompt" in arg_extractor.get_anthropic_args():
417
+ elif "prompt" in call_kwargs:
401
418
  # print("\nWRAP: Completions API path")
402
- user_prompt = arg_extractor.get_anthropic_args().get("prompt", "")
419
+ user_prompt = call_kwargs.get("prompt", "")
403
420
  # print(f"WRAP: User prompt: {user_prompt}")
404
421
  messages = [{"role": "user", "content": user_prompt}]
405
422
  # print(f"WRAP: Messages created: {messages}")
@@ -476,17 +493,27 @@ async def _wrap_async(anthropic_resource: AnthropicDefinition, initialize, wrapp
476
493
 
477
494
  try:
478
495
  logger.debug("About to call wrapped function")
479
- response = await wrapped(*args, **kwargs)
496
+ call_kwargs = kwargs
497
+ # Apply context-scoped injection to chat messages if present
498
+ if isinstance(call_kwargs, dict) and "messages" in call_kwargs:
499
+ try:
500
+ with use_overrides_for_messages(call_kwargs["messages"]): # type: ignore[arg-type]
501
+ call_kwargs["messages"] = apply_injection_overrides(call_kwargs["messages"]) # type: ignore[arg-type]
502
+ call_kwargs = apply_tool_overrides(call_kwargs)
503
+ call_kwargs = apply_param_overrides(call_kwargs)
504
+ except Exception:
505
+ pass
506
+ response = await wrapped(*args, **call_kwargs)
480
507
  logger.debug(f"Got response: {response}")
481
508
 
482
509
  model, completion, usage = _extract_anthropic_completion(response)
483
510
  logger.debug(f"Extracted completion - Model: {model}, Usage: {usage}")
484
511
 
485
512
  # Synth tracking
486
- if "messages" in arg_extractor.get_anthropic_args():
513
+ if "messages" in call_kwargs:
487
514
  # logger.debug("WRAP_ASYNC: Messages API path detected")
488
- system_content = arg_extractor.get_anthropic_args().get("system")
489
- original_messages = arg_extractor.get_anthropic_args()["messages"]
515
+ system_content = call_kwargs.get("system")
516
+ original_messages = call_kwargs["messages"]
490
517
  # logger.debug("WRAP_ASYNC: Original messages: %s", original_messages)
491
518
  # logger.debug("WRAP_ASYNC: System content: %s", system_content)
492
519
 
@@ -511,9 +538,9 @@ async def _wrap_async(anthropic_resource: AnthropicDefinition, initialize, wrapp
511
538
  model_name=model,
512
539
  finetune=False,
513
540
  )
514
- elif "prompt" in arg_extractor.get_anthropic_args():
541
+ elif "prompt" in call_kwargs:
515
542
  # Handle Completions API format
516
- user_prompt = arg_extractor.get_anthropic_args().get("prompt", "")
543
+ user_prompt = call_kwargs.get("prompt", "")
517
544
  messages = [{"role": "user", "content": user_prompt}]
518
545
  assistant_msg = [{"role": "assistant", "content": completion}]
519
546
 
@@ -772,7 +799,7 @@ class LangfuseAnthropicResponseGeneratorAsync:
772
799
 
773
800
 
774
801
  class AnthropicLangfuse:
775
- _langfuse: Optional[Langfuse] = None
802
+ _langfuse: Langfuse | None = None
776
803
 
777
804
  def initialize(self):
778
805
  self._langfuse = LangfuseSingleton().get(
@@ -919,14 +946,14 @@ class AnthropicLangfuse:
919
946
 
920
947
  anthropic.AsyncClient.__init__ = new_async_init
921
948
 
922
- setattr(anthropic, "langfuse_public_key", None)
923
- setattr(anthropic, "langfuse_secret_key", None)
924
- setattr(anthropic, "langfuse_host", None)
925
- setattr(anthropic, "langfuse_debug", None)
926
- setattr(anthropic, "langfuse_enabled", True)
927
- setattr(anthropic, "langfuse_sample_rate", None)
928
- setattr(anthropic, "langfuse_auth_check", self.langfuse_auth_check)
929
- setattr(anthropic, "flush_langfuse", self.flush)
949
+ anthropic.langfuse_public_key = None
950
+ anthropic.langfuse_secret_key = None
951
+ anthropic.langfuse_host = None
952
+ anthropic.langfuse_debug = None
953
+ anthropic.langfuse_enabled = True
954
+ anthropic.langfuse_sample_rate = None
955
+ anthropic.langfuse_auth_check = self.langfuse_auth_check
956
+ anthropic.flush_langfuse = self.flush
930
957
 
931
958
 
932
959
  modifier = AnthropicLangfuse()
@@ -4,7 +4,6 @@ import types
4
4
  from collections import defaultdict
5
5
  from dataclasses import dataclass
6
6
  from inspect import isclass
7
- from typing import List, Optional
8
7
 
9
8
  import openai.resources
10
9
  from langfuse import Langfuse
@@ -16,14 +15,24 @@ from packaging.version import Version
16
15
  from pydantic import BaseModel
17
16
  from wrapt import wrap_function_wrapper
18
17
 
18
+ from synth_ai.lm.overrides import (
19
+ apply_injection as apply_injection_overrides,
20
+ )
21
+ from synth_ai.lm.overrides import (
22
+ apply_param_overrides,
23
+ apply_tool_overrides,
24
+ use_overrides_for_messages,
25
+ )
19
26
  from synth_ai.lm.provider_support.suppress_logging import *
20
27
  from synth_ai.tracing_v1.abstractions import MessageInputs
21
28
  from synth_ai.tracing_v1.trackers import synth_tracker_async, synth_tracker_sync
22
29
 
23
30
  try:
24
31
  import openai
25
- except ImportError:
26
- raise ModuleNotFoundError("Please install OpenAI to use this feature: 'pip install openai'")
32
+ except ImportError as err:
33
+ raise ModuleNotFoundError(
34
+ "Please install OpenAI to use this feature: 'pip install openai'"
35
+ ) from err
27
36
 
28
37
  # CREDIT TO LANGFUSE FOR OPEN-SOURCING THE CODE THAT THIS IS BASED ON
29
38
  # USING WITH MIT LICENSE PERMISSION
@@ -52,7 +61,7 @@ class OpenAiDefinition:
52
61
  method: str
53
62
  type: str
54
63
  sync: bool
55
- min_version: Optional[str] = None
64
+ min_version: str | None = None
56
65
 
57
66
 
58
67
  OPENAI_METHODS_V0 = [
@@ -205,7 +214,7 @@ def _extract_chat_response(kwargs: dict):
205
214
  Extracts the LLM output from the response.
206
215
  """
207
216
  response = {
208
- "role": kwargs.get("role", None),
217
+ "role": kwargs.get("role"),
209
218
  }
210
219
 
211
220
  if kwargs.get("function_call") is not None:
@@ -214,7 +223,7 @@ def _extract_chat_response(kwargs: dict):
214
223
  if kwargs.get("tool_calls") is not None:
215
224
  response.update({"tool_calls": kwargs["tool_calls"]})
216
225
 
217
- response["content"] = kwargs.get("content", None)
226
+ response["content"] = kwargs.get("content")
218
227
  return response
219
228
 
220
229
 
@@ -411,7 +420,7 @@ def _extract_streamed_openai_response(resource, chunks):
411
420
  usage = chunk_usage
412
421
 
413
422
  # Process choices
414
- choices = chunk.get("choices", [])
423
+ choices = chunk.get("choices", []) # noqa: F841
415
424
  # logger.debug(f"Extracted - model: {model}, choices: {choices}")
416
425
 
417
426
  # logger.debug(f"Final completion: {completion}")
@@ -475,7 +484,17 @@ def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs)
475
484
  )
476
485
  generation = new_langfuse.generation(**generation)
477
486
  try:
478
- openai_response = wrapped(**arg_extractor.get_openai_args())
487
+ openai_args = arg_extractor.get_openai_args()
488
+ # Apply context-scoped injection to chat messages if present
489
+ if isinstance(openai_args, dict) and "messages" in openai_args:
490
+ try:
491
+ with use_overrides_for_messages(openai_args["messages"]): # type: ignore[arg-type]
492
+ openai_args["messages"] = apply_injection_overrides(openai_args["messages"]) # type: ignore[arg-type]
493
+ openai_args = apply_tool_overrides(openai_args)
494
+ openai_args = apply_param_overrides(openai_args)
495
+ except Exception:
496
+ pass
497
+ openai_response = wrapped(**openai_args)
479
498
 
480
499
  if _is_streaming_response(openai_response):
481
500
  return LangfuseResponseGeneratorSync(
@@ -527,7 +546,7 @@ def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs)
527
546
  )
528
547
 
529
548
  elif open_ai_resource.type == "chat":
530
- messages = arg_extractor.get_openai_args().get("messages", [])
549
+ messages = openai_args.get("messages", [])
531
550
  message_input = MessageInputs(messages=messages)
532
551
 
533
552
  # Track user input
@@ -605,7 +624,17 @@ async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, a
605
624
  generation = new_langfuse.generation(**generation)
606
625
 
607
626
  try:
608
- openai_response = await wrapped(**arg_extractor.get_openai_args())
627
+ openai_args = arg_extractor.get_openai_args()
628
+ # Apply context-scoped injection to chat messages if present
629
+ if isinstance(openai_args, dict) and "messages" in openai_args:
630
+ try:
631
+ with use_overrides_for_messages(openai_args["messages"]): # type: ignore[arg-type]
632
+ openai_args["messages"] = apply_injection_overrides(openai_args["messages"]) # type: ignore[arg-type]
633
+ openai_args = apply_tool_overrides(openai_args)
634
+ openai_args = apply_param_overrides(openai_args)
635
+ except Exception:
636
+ pass
637
+ openai_response = await wrapped(**openai_args)
609
638
 
610
639
  if _is_streaming_response(openai_response):
611
640
  return LangfuseResponseGeneratorAsync(
@@ -654,7 +683,7 @@ async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, a
654
683
  )
655
684
 
656
685
  elif open_ai_resource.type == "chat":
657
- messages = arg_extractor.get_openai_args().get("messages", [])
686
+ messages = openai_args.get("messages", [])
658
687
  message_input = MessageInputs(messages=messages)
659
688
 
660
689
  # Track user input
@@ -735,7 +764,7 @@ async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, a
735
764
 
736
765
 
737
766
  class OpenAILangfuse:
738
- _langfuse: Optional[Langfuse] = None
767
+ _langfuse: Langfuse | None = None
739
768
 
740
769
  def initialize(self):
741
770
  self._langfuse = LangfuseSingleton().get(
@@ -793,15 +822,15 @@ class OpenAILangfuse:
793
822
  else _wrap_async(resource, self.initialize),
794
823
  )
795
824
 
796
- setattr(openai, "langfuse_public_key", None)
797
- setattr(openai, "langfuse_secret_key", None)
798
- setattr(openai, "langfuse_host", None)
799
- setattr(openai, "langfuse_debug", None)
800
- setattr(openai, "langfuse_enabled", True)
801
- setattr(openai, "langfuse_sample_rate", None)
802
- setattr(openai, "langfuse_mask", None)
803
- setattr(openai, "langfuse_auth_check", self.langfuse_auth_check)
804
- setattr(openai, "flush_langfuse", self.flush)
825
+ openai.langfuse_public_key = None
826
+ openai.langfuse_secret_key = None
827
+ openai.langfuse_host = None
828
+ openai.langfuse_debug = None
829
+ openai.langfuse_enabled = True
830
+ openai.langfuse_sample_rate = None
831
+ openai.langfuse_mask = None
832
+ openai.langfuse_auth_check = self.langfuse_auth_check
833
+ openai.flush_langfuse = self.flush
805
834
 
806
835
 
807
836
  modifier = OpenAILangfuse()
@@ -816,7 +845,7 @@ def auth_check():
816
845
  return modifier._langfuse.auth_check()
817
846
 
818
847
 
819
- def _filter_image_data(messages: List[dict]):
848
+ def _filter_image_data(messages: list[dict]):
820
849
  """https://platform.openai.com/docs/guides/vision?lang=python
821
850
 
822
851
  The messages array remains the same, but the 'image_url' is removed from the 'content' array.
@@ -8,10 +8,12 @@ in the requested structured format (Pydantic models).
8
8
  import logging
9
9
  import time
10
10
  from abc import ABC, abstractmethod
11
- from typing import Any, Callable, Dict, List, Literal, Optional, Union
11
+ from collections.abc import Callable
12
+ from typing import Any, Literal
12
13
 
13
14
  from pydantic import BaseModel
14
15
 
16
+ from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
15
17
  from synth_ai.lm.core.exceptions import StructuredOutputCoercionFailureException
16
18
  from synth_ai.lm.structured_outputs.inject import (
17
19
  inject_structured_output_instructions,
@@ -22,7 +24,6 @@ from synth_ai.lm.structured_outputs.rehabilitate import (
22
24
  pull_out_structured_output,
23
25
  )
24
26
  from synth_ai.lm.vendors.base import BaseLMResponse, VendorBase
25
- from synth_ai.lm.constants import SPECIAL_BASE_TEMPS
26
27
 
27
28
  logger = logging.getLogger(__name__)
28
29
 
@@ -30,26 +31,27 @@ logger = logging.getLogger(__name__)
30
31
  class StructuredHandlerBase(ABC):
31
32
  """
32
33
  Abstract base class for structured output handlers.
33
-
34
+
34
35
  Handles the logic for ensuring language models return properly formatted
35
36
  structured outputs, with retry logic and error handling.
36
-
37
+
37
38
  Attributes:
38
39
  core_client: Primary vendor client for API calls
39
40
  retry_client: Client used for retry attempts (may use different model)
40
41
  handler_params: Configuration parameters including retry count
41
42
  structured_output_mode: Either "stringified_json" or "forced_json"
42
43
  """
44
+
43
45
  core_client: VendorBase
44
46
  retry_client: VendorBase
45
- handler_params: Dict[str, Any]
47
+ handler_params: dict[str, Any]
46
48
  structured_output_mode: Literal["stringified_json", "forced_json"]
47
49
 
48
50
  def __init__(
49
51
  self,
50
52
  core_client: VendorBase,
51
53
  retry_client: VendorBase,
52
- handler_params: Optional[Dict[str, Any]] = None,
54
+ handler_params: dict[str, Any] | None = None,
53
55
  structured_output_mode: Literal["stringified_json", "forced_json"] = "stringified_json",
54
56
  ):
55
57
  self.core_client = core_client
@@ -59,7 +61,7 @@ class StructuredHandlerBase(ABC):
59
61
 
60
62
  async def call_async(
61
63
  self,
62
- messages: List[Dict[str, Any]],
64
+ messages: list[dict[str, Any]],
63
65
  model: str,
64
66
  response_model: BaseModel,
65
67
  temperature: float = 0.0,
@@ -74,7 +76,7 @@ class StructuredHandlerBase(ABC):
74
76
  model=model,
75
77
  response_model=response_model,
76
78
  api_call_method=self.core_client._hit_api_async_structured_output
77
- if (not not response_model and self.structured_output_mode == "forced_json")
79
+ if (response_model and self.structured_output_mode == "forced_json")
78
80
  else self.core_client._hit_api_async,
79
81
  temperature=temperature,
80
82
  use_ephemeral_cache_only=use_ephemeral_cache_only,
@@ -83,7 +85,7 @@ class StructuredHandlerBase(ABC):
83
85
 
84
86
  def call_sync(
85
87
  self,
86
- messages: List[Dict[str, Any]],
88
+ messages: list[dict[str, Any]],
87
89
  response_model: BaseModel,
88
90
  model: str,
89
91
  temperature: float = 0.0,
@@ -97,7 +99,7 @@ class StructuredHandlerBase(ABC):
97
99
  model=model,
98
100
  response_model=response_model,
99
101
  api_call_method=self.core_client._hit_api_sync_structured_output
100
- if (not not response_model and self.structured_output_mode == "forced_json")
102
+ if (response_model and self.structured_output_mode == "forced_json")
101
103
  else self.core_client._hit_api_sync,
102
104
  temperature=temperature,
103
105
  use_ephemeral_cache_only=use_ephemeral_cache_only,
@@ -107,7 +109,7 @@ class StructuredHandlerBase(ABC):
107
109
  @abstractmethod
108
110
  async def _process_call_async(
109
111
  self,
110
- messages: List[Dict[str, Any]],
112
+ messages: list[dict[str, Any]],
111
113
  model: str,
112
114
  response_model: BaseModel,
113
115
  api_call_method,
@@ -119,7 +121,7 @@ class StructuredHandlerBase(ABC):
119
121
  @abstractmethod
120
122
  def _process_call_sync(
121
123
  self,
122
- messages: List[Dict[str, Any]],
124
+ messages: list[dict[str, Any]],
123
125
  model: str,
124
126
  response_model: BaseModel,
125
127
  api_call_method,
@@ -132,24 +134,24 @@ class StructuredHandlerBase(ABC):
132
134
  class StringifiedJSONHandler(StructuredHandlerBase):
133
135
  core_client: VendorBase
134
136
  retry_client: VendorBase
135
- handler_params: Dict[str, Any]
137
+ handler_params: dict[str, Any]
136
138
 
137
139
  def __init__(
138
140
  self,
139
141
  core_client: VendorBase,
140
142
  retry_client: VendorBase,
141
- handler_params: Dict[str, Any] = {"retries": 3},
143
+ handler_params: dict[str, Any] | None = None,
142
144
  ):
143
145
  super().__init__(
144
146
  core_client,
145
147
  retry_client,
146
- handler_params,
148
+ handler_params or {"retries": 3},
147
149
  structured_output_mode="stringified_json",
148
150
  )
149
151
 
150
152
  async def _process_call_async(
151
153
  self,
152
- messages: List[Dict[str, Any]],
154
+ messages: list[dict[str, Any]],
153
155
  model: str,
154
156
  response_model: BaseModel,
155
157
  temperature: float,
@@ -170,7 +172,7 @@ class StringifiedJSONHandler(StructuredHandlerBase):
170
172
  response_model=response_model,
171
173
  previously_failed_error_messages=previously_failed_error_messages,
172
174
  )
173
- t0 = time.time()
175
+ # t0 = time.time() # unused
174
176
  raw_text_response_or_cached_hit = await api_call_method(
175
177
  messages=messages_with_json_formatting_instructions,
176
178
  model=model,
@@ -184,7 +186,7 @@ class StringifiedJSONHandler(StructuredHandlerBase):
184
186
  assert type(raw_text_response_or_cached_hit) in [str, BaseLMResponse], (
185
187
  f"Expected str or BaseLMResponse, got {type(raw_text_response_or_cached_hit)}"
186
188
  )
187
- if type(raw_text_response_or_cached_hit) == BaseLMResponse:
189
+ if isinstance(raw_text_response_or_cached_hit, BaseLMResponse):
188
190
  # print("Got cached hit, returning directly")
189
191
  raw_text_response = raw_text_response_or_cached_hit.raw_response
190
192
  else:
@@ -242,7 +244,7 @@ class StringifiedJSONHandler(StructuredHandlerBase):
242
244
 
243
245
  def _process_call_sync(
244
246
  self,
245
- messages: List[Dict[str, Any]],
247
+ messages: list[dict[str, Any]],
246
248
  model: str,
247
249
  response_model: BaseModel,
248
250
  temperature: float,
@@ -277,7 +279,7 @@ class StringifiedJSONHandler(StructuredHandlerBase):
277
279
  assert type(raw_text_response_or_cached_hit) in [str, BaseLMResponse], (
278
280
  f"Expected str or BaseLMResponse, got {type(raw_text_response_or_cached_hit)}"
279
281
  )
280
- if type(raw_text_response_or_cached_hit) == BaseLMResponse:
282
+ if isinstance(raw_text_response_or_cached_hit, BaseLMResponse):
281
283
  logger.info("Got cached hit, returning directly")
282
284
  raw_text_response = raw_text_response_or_cached_hit.raw_response
283
285
  else:
@@ -320,26 +322,26 @@ class StringifiedJSONHandler(StructuredHandlerBase):
320
322
  class ForcedJSONHandler(StructuredHandlerBase):
321
323
  core_client: VendorBase
322
324
  retry_client: VendorBase
323
- handler_params: Dict[str, Any]
325
+ handler_params: dict[str, Any]
324
326
 
325
327
  def __init__(
326
328
  self,
327
329
  core_client: VendorBase,
328
330
  retry_client: VendorBase,
329
- handler_params: Dict[str, Any] = {},
331
+ handler_params: dict[str, Any] | None = None,
330
332
  reasoning_effort: str = "high",
331
333
  ):
332
334
  super().__init__(
333
335
  core_client,
334
336
  retry_client,
335
- handler_params,
337
+ handler_params or {"retries": 3},
336
338
  structured_output_mode="forced_json",
337
339
  )
338
340
  self.reasoning_effort = reasoning_effort
339
341
 
340
342
  async def _process_call_async(
341
343
  self,
342
- messages: List[Dict[str, Any]],
344
+ messages: list[dict[str, Any]],
343
345
  model: str,
344
346
  response_model: BaseModel,
345
347
  api_call_method: Callable,
@@ -360,7 +362,7 @@ class ForcedJSONHandler(StructuredHandlerBase):
360
362
 
361
363
  def _process_call_sync(
362
364
  self,
363
- messages: List[Dict[str, Any]],
365
+ messages: list[dict[str, Any]],
364
366
  model: str,
365
367
  response_model: BaseModel,
366
368
  api_call_method: Callable,
@@ -380,16 +382,16 @@ class ForcedJSONHandler(StructuredHandlerBase):
380
382
 
381
383
 
382
384
  class StructuredOutputHandler:
383
- handler: Union[StringifiedJSONHandler, ForcedJSONHandler]
385
+ handler: StringifiedJSONHandler | ForcedJSONHandler
384
386
  mode: Literal["stringified_json", "forced_json"]
385
- handler_params: Dict[str, Any]
387
+ handler_params: dict[str, Any]
386
388
 
387
389
  def __init__(
388
390
  self,
389
391
  core_client: VendorBase,
390
392
  retry_client: VendorBase,
391
393
  mode: Literal["stringified_json", "forced_json"],
392
- handler_params: Dict[str, Any] = {},
394
+ handler_params: dict[str, Any] = {},
393
395
  ):
394
396
  self.mode = mode
395
397
  if self.mode == "stringified_json":
@@ -402,11 +404,11 @@ class StructuredOutputHandler:
402
404
 
403
405
  async def call_async(
404
406
  self,
405
- messages: List[Dict[str, Any]],
407
+ messages: list[dict[str, Any]],
406
408
  model: str,
407
409
  response_model: BaseModel,
408
410
  use_ephemeral_cache_only: bool = False,
409
- lm_config: Dict[str, Any] = {},
411
+ lm_config: dict[str, Any] = {},
410
412
  reasoning_effort: str = "high",
411
413
  ) -> BaseLMResponse:
412
414
  # print("Output handler call async")
@@ -421,11 +423,11 @@ class StructuredOutputHandler:
421
423
 
422
424
  def call_sync(
423
425
  self,
424
- messages: List[Dict[str, Any]],
426
+ messages: list[dict[str, Any]],
425
427
  model: str,
426
428
  response_model: BaseModel,
427
429
  use_ephemeral_cache_only: bool = False,
428
- lm_config: Dict[str, Any] = {},
430
+ lm_config: dict[str, Any] = {},
429
431
  reasoning_effort: str = "high",
430
432
  ) -> BaseLMResponse:
431
433
  return self.handler.call_sync(