synth-ai 0.1.9__py3-none-any.whl → 0.2.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synth_ai/__init__.py +28 -2
- synth_ai/core/system.py +4 -0
- synth_ai/environments/__init__.py +35 -0
- synth_ai/environments/environment/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/__init__.py +1 -0
- synth_ai/environments/environment/artifacts/base.py +50 -0
- synth_ai/environments/environment/core.py +22 -0
- synth_ai/environments/environment/db/__init__.py +1 -0
- synth_ai/environments/environment/db/sqlite.py +45 -0
- synth_ai/environments/environment/registry.py +24 -0
- synth_ai/environments/environment/resources/sqlite.py +46 -0
- synth_ai/environments/environment/results.py +1 -0
- synth_ai/environments/environment/rewards/__init__.py +1 -0
- synth_ai/environments/environment/rewards/core.py +28 -0
- synth_ai/environments/environment/shared_engine.py +26 -0
- synth_ai/environments/environment/tools/__init__.py +34 -0
- synth_ai/environments/examples/__init__.py +1 -0
- synth_ai/environments/examples/crafter_classic/__init__.py +8 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_comprehensive_evaluation.py +58 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_browser.py +152 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_evaluation_framework.py +1194 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_quick_evaluation.py +51 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_react_agent.py +872 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/crafter_trace_evaluation.py +1412 -0
- synth_ai/environments/examples/crafter_classic/agent_demos/test_crafter_react_agent.py +1110 -0
- synth_ai/environments/examples/crafter_classic/config_logging.py +111 -0
- synth_ai/environments/examples/crafter_classic/engine.py +502 -0
- synth_ai/environments/examples/crafter_classic/engine_deterministic_patch.py +63 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/action_map.py +5 -0
- synth_ai/environments/examples/crafter_classic/engine_helpers/serialization.py +74 -0
- synth_ai/environments/examples/crafter_classic/environment.py +255 -0
- synth_ai/environments/examples/crafter_classic/taskset.py +228 -0
- synth_ai/environments/examples/enron/agent_demos/test_synth_react.py +535 -0
- synth_ai/environments/examples/enron/art_helpers/email_search_tools.py +156 -0
- synth_ai/environments/examples/enron/art_helpers/local_email_db.py +280 -0
- synth_ai/environments/examples/enron/art_helpers/types_enron.py +24 -0
- synth_ai/environments/examples/enron/engine.py +291 -0
- synth_ai/environments/examples/enron/environment.py +165 -0
- synth_ai/environments/examples/enron/taskset.py +112 -0
- synth_ai/environments/examples/enron/units/keyword_stats.py +111 -0
- synth_ai/environments/examples/enron/units/test_email_index.py +8 -0
- synth_ai/environments/examples/minigrid/__init__.py +48 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_evaluation_framework.py +1188 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_quick_evaluation.py +47 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_react_agent.py +562 -0
- synth_ai/environments/examples/minigrid/agent_demos/minigrid_trace_evaluation.py +220 -0
- synth_ai/environments/examples/minigrid/agent_demos/test_minigrid_react_agent.py +393 -0
- synth_ai/environments/examples/minigrid/engine.py +589 -0
- synth_ai/environments/examples/minigrid/environment.py +274 -0
- synth_ai/environments/examples/minigrid/environment_mapping.py +242 -0
- synth_ai/environments/examples/minigrid/puzzle_loader.py +416 -0
- synth_ai/environments/examples/minigrid/taskset.py +583 -0
- synth_ai/environments/examples/minigrid/units/test_action_behavior.py +226 -0
- synth_ai/environments/examples/minigrid/units/test_debug_messages.py +83 -0
- synth_ai/environments/examples/minigrid/units/test_exploration.py +120 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_engine.py +214 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment.py +238 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_environment_mapping.py +301 -0
- synth_ai/environments/examples/minigrid/units/test_minigrid_taskset.py +210 -0
- synth_ai/environments/examples/nethack/__init__.py +7 -0
- synth_ai/environments/examples/nethack/achievements.py +337 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_evaluation_framework.py +981 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_quick_evaluation.py +74 -0
- synth_ai/environments/examples/nethack/agent_demos/nethack_react_agent.py +832 -0
- synth_ai/environments/examples/nethack/agent_demos/test_nethack_react_agent.py +1112 -0
- synth_ai/environments/examples/nethack/engine.py +738 -0
- synth_ai/environments/examples/nethack/environment.py +255 -0
- synth_ai/environments/examples/nethack/helpers/__init__.py +42 -0
- synth_ai/environments/examples/nethack/helpers/action_mapping.py +301 -0
- synth_ai/environments/examples/nethack/helpers/nle_wrapper.py +401 -0
- synth_ai/environments/examples/nethack/helpers/observation_utils.py +433 -0
- synth_ai/environments/examples/nethack/helpers/recording_wrapper.py +201 -0
- synth_ai/environments/examples/nethack/helpers/trajectory_recorder.py +268 -0
- synth_ai/environments/examples/nethack/helpers/visualization/replay_viewer.py +308 -0
- synth_ai/environments/examples/nethack/helpers/visualization/visualizer.py +430 -0
- synth_ai/environments/examples/nethack/taskset.py +323 -0
- synth_ai/environments/examples/nethack/units/test_nethack_engine.py +277 -0
- synth_ai/environments/examples/nethack/units/test_nethack_environment.py +281 -0
- synth_ai/environments/examples/nethack/units/test_nethack_taskset.py +213 -0
- synth_ai/environments/examples/nethack/units/test_recording.py +307 -0
- synth_ai/environments/examples/red/__init__.py +7 -0
- synth_ai/environments/examples/red/agent_demos/__init__.py +1 -0
- synth_ai/environments/examples/red/agent_demos/test_synth_react.py +1471 -0
- synth_ai/environments/examples/red/config_logging.py +110 -0
- synth_ai/environments/examples/red/engine.py +693 -0
- synth_ai/environments/examples/red/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/red/engine_helpers/memory_map.py +28 -0
- synth_ai/environments/examples/red/engine_helpers/reward_components.py +275 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/__init__.py +142 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/adaptive_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/battle_rewards.py +283 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/composite_rewards.py +149 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/economy_rewards.py +137 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/efficiency_rewards.py +56 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/exploration_rewards.py +330 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/novelty_rewards.py +120 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pallet_town_rewards.py +558 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/pokemon_rewards.py +312 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/social_rewards.py +147 -0
- synth_ai/environments/examples/red/engine_helpers/reward_library/story_rewards.py +246 -0
- synth_ai/environments/examples/red/engine_helpers/screen_analysis.py +367 -0
- synth_ai/environments/examples/red/engine_helpers/state_extraction.py +139 -0
- synth_ai/environments/examples/red/environment.py +235 -0
- synth_ai/environments/examples/red/taskset.py +77 -0
- synth_ai/environments/examples/red/test_fixes.py +125 -0
- synth_ai/environments/examples/red/test_fixes_mock.py +148 -0
- synth_ai/environments/examples/red/units/__init__.py +1 -0
- synth_ai/environments/examples/red/units/test_basic_functionality.py +97 -0
- synth_ai/environments/examples/red/units/test_button_press_requirements.py +217 -0
- synth_ai/environments/examples/red/units/test_engine.py +192 -0
- synth_ai/environments/examples/red/units/test_environment.py +455 -0
- synth_ai/environments/examples/red/units/test_exploration_strategy.py +227 -0
- synth_ai/environments/examples/red/units/test_integration.py +217 -0
- synth_ai/environments/examples/red/units/test_memory_extraction.py +111 -0
- synth_ai/environments/examples/red/units/test_menu_bug_reproduction.py +1100 -0
- synth_ai/environments/examples/red/units/test_movement_debug.py +255 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_debug.py +163 -0
- synth_ai/environments/examples/red/units/test_pokemon_mcts_verbose.py +117 -0
- synth_ai/environments/examples/red/units/test_red_basic.py +145 -0
- synth_ai/environments/examples/red/units/test_red_comprehensive.py +323 -0
- synth_ai/environments/examples/red/units/test_retry_movement.py +195 -0
- synth_ai/environments/examples/red/units/test_reward_components.py +186 -0
- synth_ai/environments/examples/red/units/test_rom_integration.py +260 -0
- synth_ai/environments/examples/red/units/test_taskset.py +116 -0
- synth_ai/environments/examples/red/units/test_tree.py +448 -0
- synth_ai/environments/examples/sokoban/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/sokoban_full_eval.py +900 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_dspy_react.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_sokoban_react_agent.py +498 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_lats.py +1 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_locally.py +748 -0
- synth_ai/environments/examples/sokoban/agent_demos/test_synth_react_service.py +296 -0
- synth_ai/environments/examples/sokoban/engine.py +675 -0
- synth_ai/environments/examples/sokoban/engine_helpers/__init__.py +1 -0
- synth_ai/environments/examples/sokoban/engine_helpers/room_utils.py +656 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/__init__.py +17 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/__init__.py +3 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/boxoban_env.py +129 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/render_utils.py +370 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/room_utils.py +331 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env.py +305 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_fixed_targets.py +66 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_pull.py +114 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_two_player.py +122 -0
- synth_ai/environments/examples/sokoban/engine_helpers/vendored/envs/sokoban_env_variations.py +394 -0
- synth_ai/environments/examples/sokoban/environment.py +228 -0
- synth_ai/environments/examples/sokoban/generate_verified_puzzles.py +438 -0
- synth_ai/environments/examples/sokoban/puzzle_loader.py +311 -0
- synth_ai/environments/examples/sokoban/taskset.py +425 -0
- synth_ai/environments/examples/sokoban/units/astar_common.py +94 -0
- synth_ai/environments/examples/sokoban/units/test_building_task_set.py +49 -0
- synth_ai/environments/examples/sokoban/units/test_false_positive.py +120 -0
- synth_ai/environments/examples/sokoban/units/test_simple_run_through_environment.py +119 -0
- synth_ai/environments/examples/sokoban/units/test_sokoban_environment.py +98 -0
- synth_ai/environments/examples/sokoban/units/test_tree.py +364 -0
- synth_ai/environments/examples/tictactoe/__init__.py +1 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_synth_react.py +266 -0
- synth_ai/environments/examples/tictactoe/agent_demos/test_tictactoe_react_agent.py +470 -0
- synth_ai/environments/examples/tictactoe/engine.py +368 -0
- synth_ai/environments/examples/tictactoe/environment.py +239 -0
- synth_ai/environments/examples/tictactoe/taskset.py +214 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_engine.py +393 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_environment.py +493 -0
- synth_ai/environments/examples/tictactoe/units/test_tictactoe_taskset.py +191 -0
- synth_ai/environments/examples/verilog/__init__.py +10 -0
- synth_ai/environments/examples/verilog/agent_demos/test_synth_react.py +520 -0
- synth_ai/environments/examples/verilog/engine.py +328 -0
- synth_ai/environments/examples/verilog/environment.py +349 -0
- synth_ai/environments/examples/verilog/taskset.py +418 -0
- synth_ai/environments/examples/verilog/units/test_verilog_engine.py +466 -0
- synth_ai/environments/examples/verilog/units/test_verilog_environment.py +585 -0
- synth_ai/environments/examples/verilog/units/test_verilog_integration.py +383 -0
- synth_ai/environments/examples/verilog/units/test_verilog_taskset.py +457 -0
- synth_ai/environments/reproducibility/core.py +42 -0
- synth_ai/environments/reproducibility/tree.py +364 -0
- synth_ai/environments/service/app.py +78 -0
- synth_ai/environments/service/core_routes.py +775 -0
- synth_ai/environments/service/external_registry.py +57 -0
- synth_ai/environments/service/registry.py +9 -0
- synth_ai/environments/stateful/__init__.py +1 -0
- synth_ai/environments/stateful/core.py +28 -0
- synth_ai/environments/stateful/engine.py +21 -0
- synth_ai/environments/stateful/state.py +7 -0
- synth_ai/environments/tasks/api.py +19 -0
- synth_ai/environments/tasks/core.py +78 -0
- synth_ai/environments/tasks/filters.py +39 -0
- synth_ai/environments/tasks/utils.py +89 -0
- synth_ai/environments/v0_observability/history.py +3 -0
- synth_ai/environments/v0_observability/log.py +2 -0
- synth_ai/lm/caching/constants.py +1 -0
- synth_ai/{zyk/lms → lm}/caching/ephemeral.py +4 -8
- synth_ai/{zyk/lms → lm}/caching/handler.py +15 -15
- synth_ai/{zyk/lms → lm}/caching/initialize.py +2 -4
- synth_ai/{zyk/lms → lm}/caching/persistent.py +4 -10
- synth_ai/{zyk/lms → lm}/config.py +2 -1
- synth_ai/{zyk/lms → lm}/constants.py +2 -2
- synth_ai/{zyk/lms → lm}/core/all.py +10 -10
- synth_ai/{zyk/lms → lm}/core/main.py +57 -33
- synth_ai/{zyk/lms → lm}/core/vendor_clients.py +12 -10
- synth_ai/lm/cost/monitor.py +1 -0
- synth_ai/lm/cost/statefulness.py +1 -0
- synth_ai/lm/provider_support/__init__.py +8 -0
- synth_ai/lm/provider_support/anthropic.py +945 -0
- synth_ai/lm/provider_support/openai.py +1115 -0
- synth_ai/lm/provider_support/suppress_logging.py +31 -0
- synth_ai/{zyk/lms → lm}/structured_outputs/handler.py +58 -80
- synth_ai/{zyk/lms → lm}/structured_outputs/inject.py +6 -20
- synth_ai/{zyk/lms → lm}/structured_outputs/rehabilitate.py +6 -12
- synth_ai/{zyk/lms → lm}/vendors/core/anthropic_api.py +21 -30
- synth_ai/{zyk/lms → lm}/vendors/core/gemini_api.py +37 -32
- synth_ai/{zyk/lms → lm}/vendors/core/mistral_api.py +19 -28
- synth_ai/{zyk/lms → lm}/vendors/core/openai_api.py +26 -36
- synth_ai/{zyk/lms → lm}/vendors/openai_standard.py +29 -33
- synth_ai/{zyk/lms → lm}/vendors/retries.py +1 -1
- synth_ai/lm/vendors/supported/__init__.py +0 -0
- synth_ai/{zyk/lms → lm}/vendors/supported/custom_endpoint.py +131 -118
- synth_ai/{zyk/lms → lm}/vendors/supported/deepseek.py +4 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/grok.py +6 -8
- synth_ai/{zyk/lms → lm}/vendors/supported/groq.py +1 -1
- synth_ai/{zyk/lms → lm}/vendors/supported/ollama.py +2 -2
- synth_ai/{zyk/lms → lm}/vendors/supported/openrouter.py +18 -16
- synth_ai/{zyk/lms → lm}/vendors/supported/together.py +1 -1
- synth_ai/tracing/__init__.py +0 -0
- synth_ai/tracing/abstractions.py +224 -0
- synth_ai/tracing/base_client.py +91 -0
- synth_ai/tracing/client_manager.py +131 -0
- synth_ai/tracing/config.py +140 -0
- synth_ai/tracing/context.py +146 -0
- synth_ai/tracing/decorators.py +679 -0
- synth_ai/tracing/events/__init__.py +0 -0
- synth_ai/tracing/events/manage.py +147 -0
- synth_ai/tracing/events/scope.py +86 -0
- synth_ai/tracing/events/store.py +227 -0
- synth_ai/tracing/immediate_client.py +152 -0
- synth_ai/tracing/local.py +18 -0
- synth_ai/tracing/log_client_base.py +74 -0
- synth_ai/tracing/retry_queue.py +187 -0
- synth_ai/tracing/trackers.py +515 -0
- synth_ai/tracing/upload.py +504 -0
- synth_ai/tracing/utils.py +9 -0
- synth_ai/zyk/__init__.py +28 -2
- synth_ai-0.2.1.dev0.dist-info/METADATA +349 -0
- synth_ai-0.2.1.dev0.dist-info/RECORD +261 -0
- synth_ai/zyk/lms/caching/constants.py +0 -1
- synth_ai/zyk/lms/cost/monitor.py +0 -1
- synth_ai/zyk/lms/cost/statefulness.py +0 -1
- synth_ai-0.1.9.dist-info/METADATA +0 -37
- synth_ai-0.1.9.dist-info/RECORD +0 -50
- /synth_ai/{zyk/lms/__init__.py → environments/reproducibility/helpers.py} +0 -0
- /synth_ai/{zyk/lms/caching → lm}/__init__.py +0 -0
- /synth_ai/{zyk/lms/core → lm/caching}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/caching/dbs.py +0 -0
- /synth_ai/{zyk/lms/cost → lm/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/core/exceptions.py +0 -0
- /synth_ai/{zyk/lms/structured_outputs → lm/cost}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors → lm/structured_outputs}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/tools/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/core → lm/vendors}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/base.py +0 -0
- /synth_ai/{zyk/lms/vendors/local → lm/vendors/core}/__init__.py +0 -0
- /synth_ai/{zyk/lms/vendors/supported → lm/vendors/local}/__init__.py +0 -0
- /synth_ai/{zyk/lms → lm}/vendors/local/ollama.py +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/WHEEL +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/licenses/LICENSE +0 -0
- {synth_ai-0.1.9.dist-info → synth_ai-0.2.1.dev0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1115 @@
|
|
1
|
+
import copy
|
2
|
+
import logging
|
3
|
+
import types
|
4
|
+
from collections import defaultdict
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from inspect import isclass
|
7
|
+
from typing import List, Optional
|
8
|
+
|
9
|
+
import openai.resources
|
10
|
+
from langfuse import Langfuse
|
11
|
+
from langfuse.client import StatefulGenerationClient
|
12
|
+
from langfuse.decorators import langfuse_context
|
13
|
+
from langfuse.utils import _get_timestamp
|
14
|
+
from langfuse.utils.langfuse_singleton import LangfuseSingleton
|
15
|
+
from packaging.version import Version
|
16
|
+
from pydantic import BaseModel
|
17
|
+
from wrapt import wrap_function_wrapper
|
18
|
+
|
19
|
+
from synth_ai.lm.provider_support.suppress_logging import *
|
20
|
+
from synth_ai.tracing.abstractions import MessageInputs
|
21
|
+
from synth_ai.tracing.trackers import synth_tracker_async, synth_tracker_sync
|
22
|
+
|
23
|
+
try:
|
24
|
+
import openai
|
25
|
+
except ImportError:
|
26
|
+
raise ModuleNotFoundError("Please install OpenAI to use this feature: 'pip install openai'")
|
27
|
+
|
28
|
+
# CREDIT TO LANGFUSE FOR OPEN-SOURCING THE CODE THAT THIS IS BASED ON
|
29
|
+
# USING WITH MIT LICENSE PERMISSION
|
30
|
+
# https://langfuse.com
|
31
|
+
|
32
|
+
try:
|
33
|
+
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI # noqa: F401
|
34
|
+
except ImportError:
|
35
|
+
AsyncAzureOpenAI = None
|
36
|
+
AsyncOpenAI = None
|
37
|
+
AzureOpenAI = None
|
38
|
+
OpenAI = None
|
39
|
+
|
40
|
+
|
41
|
+
# log = logging.getLogger("langfuse")
|
42
|
+
|
43
|
+
# Add logger configuration
|
44
|
+
logger = logging.getLogger(__name__)
|
45
|
+
logger.setLevel(logging.DEBUG) # Set to DEBUG to see all messages
|
46
|
+
|
47
|
+
|
48
|
+
@dataclass
|
49
|
+
class OpenAiDefinition:
|
50
|
+
module: str
|
51
|
+
object: str
|
52
|
+
method: str
|
53
|
+
type: str
|
54
|
+
sync: bool
|
55
|
+
min_version: Optional[str] = None
|
56
|
+
|
57
|
+
|
58
|
+
OPENAI_METHODS_V0 = [
|
59
|
+
OpenAiDefinition(
|
60
|
+
module="openai",
|
61
|
+
object="ChatCompletion",
|
62
|
+
method="create",
|
63
|
+
type="chat",
|
64
|
+
sync=True,
|
65
|
+
),
|
66
|
+
OpenAiDefinition(
|
67
|
+
module="openai",
|
68
|
+
object="Completion",
|
69
|
+
method="create",
|
70
|
+
type="completion",
|
71
|
+
sync=True,
|
72
|
+
),
|
73
|
+
]
|
74
|
+
|
75
|
+
|
76
|
+
OPENAI_METHODS_V1 = [
|
77
|
+
OpenAiDefinition(
|
78
|
+
module="openai.resources.chat.completions",
|
79
|
+
object="Completions",
|
80
|
+
method="create",
|
81
|
+
type="chat",
|
82
|
+
sync=True,
|
83
|
+
),
|
84
|
+
OpenAiDefinition(
|
85
|
+
module="openai.resources.completions",
|
86
|
+
object="Completions",
|
87
|
+
method="create",
|
88
|
+
type="completion",
|
89
|
+
sync=True,
|
90
|
+
),
|
91
|
+
OpenAiDefinition(
|
92
|
+
module="openai.resources.chat.completions",
|
93
|
+
object="AsyncCompletions",
|
94
|
+
method="create",
|
95
|
+
type="chat",
|
96
|
+
sync=False,
|
97
|
+
),
|
98
|
+
OpenAiDefinition(
|
99
|
+
module="openai.resources.completions",
|
100
|
+
object="AsyncCompletions",
|
101
|
+
method="create",
|
102
|
+
type="completion",
|
103
|
+
sync=False,
|
104
|
+
),
|
105
|
+
OpenAiDefinition(
|
106
|
+
module="openai.resources.beta.chat.completions",
|
107
|
+
object="Completions",
|
108
|
+
method="parse",
|
109
|
+
type="chat",
|
110
|
+
sync=True,
|
111
|
+
min_version="1.50.0",
|
112
|
+
),
|
113
|
+
OpenAiDefinition(
|
114
|
+
module="openai.resources.beta.chat.completions",
|
115
|
+
object="AsyncCompletions",
|
116
|
+
method="parse",
|
117
|
+
type="chat",
|
118
|
+
sync=False,
|
119
|
+
min_version="1.50.0",
|
120
|
+
),
|
121
|
+
]
|
122
|
+
|
123
|
+
|
124
|
+
class OpenAiArgsExtractor:
|
125
|
+
def __init__(
|
126
|
+
self,
|
127
|
+
name=None,
|
128
|
+
metadata=None,
|
129
|
+
trace_id=None,
|
130
|
+
session_id=None,
|
131
|
+
user_id=None,
|
132
|
+
tags=None,
|
133
|
+
parent_observation_id=None,
|
134
|
+
langfuse_prompt=None, # we cannot use prompt because it's an argument of the old OpenAI completions API
|
135
|
+
**kwargs,
|
136
|
+
):
|
137
|
+
# logger.debug(f"OpenAiArgsExtractor initialized with kwargs: {kwargs}")
|
138
|
+
# raise NotImplementedError("This method is not implemented yet")
|
139
|
+
self.args = {}
|
140
|
+
self.args["name"] = name
|
141
|
+
self.args["metadata"] = (
|
142
|
+
metadata
|
143
|
+
if "response_format" not in kwargs
|
144
|
+
else {
|
145
|
+
**(metadata or {}),
|
146
|
+
"response_format": kwargs["response_format"].model_json_schema()
|
147
|
+
if isclass(kwargs["response_format"])
|
148
|
+
and issubclass(kwargs["response_format"], BaseModel)
|
149
|
+
else kwargs["response_format"],
|
150
|
+
}
|
151
|
+
)
|
152
|
+
self.args["trace_id"] = trace_id
|
153
|
+
self.args["session_id"] = session_id
|
154
|
+
self.args["user_id"] = user_id
|
155
|
+
self.args["tags"] = tags
|
156
|
+
self.args["parent_observation_id"] = parent_observation_id
|
157
|
+
self.args["langfuse_prompt"] = langfuse_prompt
|
158
|
+
self.kwargs = kwargs
|
159
|
+
|
160
|
+
def get_langfuse_args(self):
|
161
|
+
return {**self.args, **self.kwargs}
|
162
|
+
|
163
|
+
def get_openai_args(self):
|
164
|
+
return self.kwargs
|
165
|
+
|
166
|
+
|
167
|
+
def _langfuse_wrapper(func):
|
168
|
+
def _with_langfuse(open_ai_definitions, initialize):
|
169
|
+
def wrapper(wrapped, instance, args, kwargs):
|
170
|
+
return func(open_ai_definitions, initialize, wrapped, args, kwargs)
|
171
|
+
|
172
|
+
return wrapper
|
173
|
+
|
174
|
+
return _with_langfuse
|
175
|
+
|
176
|
+
|
177
|
+
def _extract_chat_prompt(kwargs: dict):
|
178
|
+
"""
|
179
|
+
Extracts the user input from prompts. Returns an array of messages or a dict with messages and functions.
|
180
|
+
"""
|
181
|
+
logger.debug("Entering _extract_chat_prompt with kwargs keys: %s", list(kwargs.keys()))
|
182
|
+
|
183
|
+
prompt = {}
|
184
|
+
|
185
|
+
if kwargs.get("functions") is not None:
|
186
|
+
prompt.update({"functions": kwargs["functions"]})
|
187
|
+
logger.debug("Found 'functions': %s", kwargs["functions"])
|
188
|
+
|
189
|
+
if kwargs.get("function_call") is not None:
|
190
|
+
prompt.update({"function_call": kwargs["function_call"]})
|
191
|
+
logger.debug("Found 'function_call': %s", kwargs["function_call"])
|
192
|
+
|
193
|
+
if kwargs.get("tools") is not None:
|
194
|
+
prompt.update({"tools": kwargs["tools"]})
|
195
|
+
logger.debug("Found 'tools': %s", kwargs["tools"])
|
196
|
+
|
197
|
+
# existing logic to handle the case when prompt is not empty
|
198
|
+
if prompt:
|
199
|
+
messages = _filter_image_data(kwargs.get("messages", []))
|
200
|
+
prompt.update({"messages": messages})
|
201
|
+
logger.debug(
|
202
|
+
"Detected advanced usage (functions/tools). Prompt now has messages: %s",
|
203
|
+
messages,
|
204
|
+
)
|
205
|
+
return prompt
|
206
|
+
else:
|
207
|
+
# fallback: just return filtered messages
|
208
|
+
messages = _filter_image_data(kwargs.get("messages", []))
|
209
|
+
logger.debug("Returning vanilla messages: %s", messages)
|
210
|
+
return messages
|
211
|
+
|
212
|
+
|
213
|
+
def _extract_chat_response(kwargs: dict):
|
214
|
+
"""
|
215
|
+
Extracts the LLM output from the response.
|
216
|
+
"""
|
217
|
+
logger.debug("Entering _extract_chat_response with keys: %s", list(kwargs.keys()))
|
218
|
+
response = {
|
219
|
+
"role": kwargs.get("role", None),
|
220
|
+
}
|
221
|
+
|
222
|
+
if kwargs.get("function_call") is not None:
|
223
|
+
response.update({"function_call": kwargs["function_call"]})
|
224
|
+
logger.debug("Found 'function_call': %s", kwargs["function_call"])
|
225
|
+
|
226
|
+
if kwargs.get("tool_calls") is not None:
|
227
|
+
response.update({"tool_calls": kwargs["tool_calls"]})
|
228
|
+
logger.debug("Found 'tool_calls': %s", kwargs["tool_calls"])
|
229
|
+
|
230
|
+
response["content"] = kwargs.get("content", None)
|
231
|
+
logger.debug("Final extracted chat response: %s", response)
|
232
|
+
return response
|
233
|
+
|
234
|
+
|
235
|
+
def _get_langfuse_data_from_kwargs(
|
236
|
+
resource: OpenAiDefinition, langfuse: Langfuse, start_time, kwargs
|
237
|
+
):
|
238
|
+
# print("DEBUG: Entering _get_langfuse_data_from_kwargs")
|
239
|
+
# print("DEBUG: kwargs received:", kwargs)
|
240
|
+
|
241
|
+
name = kwargs.get("name", "OpenAI-generation")
|
242
|
+
# print("DEBUG: name =", name)
|
243
|
+
if name is None:
|
244
|
+
name = "OpenAI-generation"
|
245
|
+
|
246
|
+
if name is not None and not isinstance(name, str):
|
247
|
+
raise TypeError("name must be a string")
|
248
|
+
|
249
|
+
decorator_context_observation_id = langfuse_context.get_current_observation_id()
|
250
|
+
decorator_context_trace_id = langfuse_context.get_current_trace_id()
|
251
|
+
# print("DEBUG: decorator_context_observation_id =", decorator_context_observation_id)
|
252
|
+
# print("DEBUG: decorator_context_trace_id =", decorator_context_trace_id)
|
253
|
+
|
254
|
+
trace_id = kwargs.get("trace_id", None) or decorator_context_trace_id
|
255
|
+
# print("DEBUG: trace_id =", trace_id)
|
256
|
+
if trace_id is not None and not isinstance(trace_id, str):
|
257
|
+
raise TypeError("trace_id must be a string")
|
258
|
+
|
259
|
+
session_id = kwargs.get("session_id", None)
|
260
|
+
# print("DEBUG: session_id =", session_id)
|
261
|
+
if session_id is not None and not isinstance(session_id, str):
|
262
|
+
raise TypeError("session_id must be a string")
|
263
|
+
|
264
|
+
user_id = kwargs.get("user_id", None)
|
265
|
+
# print("DEBUG: user_id =", user_id)
|
266
|
+
if user_id is not None and not isinstance(user_id, str):
|
267
|
+
raise TypeError("user_id must be a string")
|
268
|
+
|
269
|
+
tags = kwargs.get("tags", None)
|
270
|
+
# print("DEBUG: tags =", tags)
|
271
|
+
if tags is not None and (
|
272
|
+
not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
|
273
|
+
):
|
274
|
+
raise TypeError("tags must be a list of strings")
|
275
|
+
|
276
|
+
if decorator_context_trace_id:
|
277
|
+
langfuse_context.update_current_trace(session_id=session_id, user_id=user_id, tags=tags)
|
278
|
+
|
279
|
+
parent_observation_id = kwargs.get("parent_observation_id", None) or (
|
280
|
+
decorator_context_observation_id
|
281
|
+
if decorator_context_observation_id != decorator_context_trace_id
|
282
|
+
else None
|
283
|
+
)
|
284
|
+
# print("DEBUG: parent_observation_id =", parent_observation_id)
|
285
|
+
if parent_observation_id is not None and not isinstance(parent_observation_id, str):
|
286
|
+
raise TypeError("parent_observation_id must be a string")
|
287
|
+
if parent_observation_id is not None and trace_id is None:
|
288
|
+
raise ValueError("parent_observation_id requires trace_id to be set")
|
289
|
+
|
290
|
+
metadata = kwargs.get("metadata", {})
|
291
|
+
# print("DEBUG: metadata =", metadata)
|
292
|
+
if metadata is not None and not isinstance(metadata, dict):
|
293
|
+
raise TypeError("metadata must be a dictionary")
|
294
|
+
|
295
|
+
prompt = None
|
296
|
+
if resource.type == "completion":
|
297
|
+
prompt = kwargs.get("prompt", None)
|
298
|
+
elif resource.type == "chat":
|
299
|
+
prompt = _extract_chat_prompt(kwargs)
|
300
|
+
# Extract model: first check top-level, then check inside 'inputs'
|
301
|
+
model = kwargs.get("model", None)
|
302
|
+
inputs = kwargs.get("inputs", {}) if kwargs.get("inputs", {}) else {}
|
303
|
+
if isinstance(inputs, dict):
|
304
|
+
# print("DEBUG: inputs =", inputs)
|
305
|
+
if "model_name" in inputs:
|
306
|
+
detailed_model = inputs["model_name"]
|
307
|
+
print("DEBUG: detailed_model =", detailed_model)
|
308
|
+
# If a detailed_model exists and is different from the top-level model, use it.
|
309
|
+
if detailed_model and (not model or model != detailed_model):
|
310
|
+
print("DEBUG: Upgrading model value from", model, "to", detailed_model)
|
311
|
+
model = detailed_model
|
312
|
+
# print("DEBUG: final model =", model)
|
313
|
+
|
314
|
+
# Extract model hyperparameters and add them to the new field 'model_params'
|
315
|
+
model_params = {
|
316
|
+
"temperature": kwargs.get("temperature", 1),
|
317
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
318
|
+
"top_p": kwargs.get("top_p", 1),
|
319
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
320
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
321
|
+
}
|
322
|
+
if kwargs.get("seed", None) is not None:
|
323
|
+
model_params["seed"] = kwargs.get("seed", None)
|
324
|
+
|
325
|
+
is_nested_trace = False
|
326
|
+
if trace_id:
|
327
|
+
is_nested_trace = True
|
328
|
+
langfuse.trace(id=trace_id, session_id=session_id, user_id=user_id, tags=tags)
|
329
|
+
else:
|
330
|
+
trace_instance = langfuse.trace(
|
331
|
+
session_id=session_id,
|
332
|
+
user_id=user_id,
|
333
|
+
tags=tags,
|
334
|
+
name=name,
|
335
|
+
input=prompt,
|
336
|
+
metadata=metadata,
|
337
|
+
)
|
338
|
+
trace_id = trace_instance.id
|
339
|
+
# print("DEBUG: Generated new trace_id =", trace_id)
|
340
|
+
|
341
|
+
langfuse_prompt = kwargs.get("langfuse_prompt", None)
|
342
|
+
|
343
|
+
extracted_data = {
|
344
|
+
"name": name,
|
345
|
+
"metadata": metadata,
|
346
|
+
"trace_id": trace_id,
|
347
|
+
"parent_observation_id": parent_observation_id,
|
348
|
+
"user_id": user_id,
|
349
|
+
"start_time": start_time,
|
350
|
+
"input": prompt,
|
351
|
+
"model_params": {
|
352
|
+
"model_name": model or None,
|
353
|
+
"temperature": kwargs.get("temperature", 1),
|
354
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
355
|
+
"top_p": kwargs.get("top_p", 1),
|
356
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
357
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
358
|
+
},
|
359
|
+
"prompt": langfuse_prompt,
|
360
|
+
}
|
361
|
+
|
362
|
+
# Add seed to model_params if present
|
363
|
+
if kwargs.get("seed", None) is not None:
|
364
|
+
extracted_data["model_params"]["seed"] = kwargs.get("seed", None)
|
365
|
+
|
366
|
+
# print("DEBUG: Exiting _get_langfuse_data_from_kwargs with extracted_data:")
|
367
|
+
# print(extracted_data)
|
368
|
+
# print("DEBUG: is_nested_trace =", is_nested_trace)
|
369
|
+
|
370
|
+
return extracted_data, is_nested_trace
|
371
|
+
|
372
|
+
|
373
|
+
def _create_langfuse_update(
|
374
|
+
completion,
|
375
|
+
generation: StatefulGenerationClient,
|
376
|
+
completion_start_time,
|
377
|
+
model=None,
|
378
|
+
usage=None,
|
379
|
+
model_params=None,
|
380
|
+
):
|
381
|
+
update = {
|
382
|
+
"end_time": _get_timestamp(),
|
383
|
+
"output": completion,
|
384
|
+
"completion_start_time": completion_start_time,
|
385
|
+
}
|
386
|
+
|
387
|
+
# Create model_params dictionary
|
388
|
+
model_params = {
|
389
|
+
"model_name": model or None,
|
390
|
+
}
|
391
|
+
|
392
|
+
# Add hyperparameters if provided
|
393
|
+
if model_params:
|
394
|
+
model_params.update(model_params)
|
395
|
+
|
396
|
+
# Add model_params to update
|
397
|
+
update["model_params"] = model_params
|
398
|
+
|
399
|
+
if usage is not None:
|
400
|
+
update["usage"] = usage
|
401
|
+
|
402
|
+
generation.update(**update)
|
403
|
+
|
404
|
+
|
405
|
+
def _extract_streamed_openai_response(resource, chunks):
|
406
|
+
# logger.debug(f"Extracting streamed response for resource type: {resource.type}")
|
407
|
+
# logger.debug(f"Number of chunks: {len(chunks)}")
|
408
|
+
completion = defaultdict(str) if resource.type == "chat" else ""
|
409
|
+
model = None
|
410
|
+
usage = None
|
411
|
+
|
412
|
+
for chunk in chunks:
|
413
|
+
if _is_openai_v1():
|
414
|
+
chunk = chunk.__dict__
|
415
|
+
# logger.debug(f"Processing chunk: {chunk}")
|
416
|
+
|
417
|
+
# Extract model name from chunk
|
418
|
+
model = model or chunk.get("model", None) or None
|
419
|
+
|
420
|
+
# Extract usage information
|
421
|
+
chunk_usage = chunk.get("usage", None)
|
422
|
+
if chunk_usage is not None:
|
423
|
+
if _is_openai_v1():
|
424
|
+
chunk_usage = chunk_usage.__dict__
|
425
|
+
usage = chunk_usage
|
426
|
+
|
427
|
+
# Process choices
|
428
|
+
choices = chunk.get("choices", [])
|
429
|
+
# logger.debug(f"Extracted - model: {model}, choices: {choices}")
|
430
|
+
|
431
|
+
# logger.debug(f"Final completion: {completion}")
|
432
|
+
return model, completion, usage
|
433
|
+
|
434
|
+
|
435
|
+
def _get_langfuse_data_from_default_response(resource: OpenAiDefinition, response):
|
436
|
+
if response is None:
|
437
|
+
return None, "<NoneType response returned from OpenAI>", None
|
438
|
+
|
439
|
+
# Extract model name from response
|
440
|
+
model = response.get("model", None) or None
|
441
|
+
|
442
|
+
# Extract completion based on resource type
|
443
|
+
completion = None
|
444
|
+
if resource.type == "completion":
|
445
|
+
choices = response.get("choices", [])
|
446
|
+
if len(choices) > 0:
|
447
|
+
choice = choices[-1]
|
448
|
+
completion = choice.text if _is_openai_v1() else choice.get("text", None)
|
449
|
+
elif resource.type == "chat":
|
450
|
+
choices = response.get("choices", [])
|
451
|
+
if len(choices) > 0:
|
452
|
+
choice = choices[-1]
|
453
|
+
completion = (
|
454
|
+
_extract_chat_response(choice.message.__dict__)
|
455
|
+
if _is_openai_v1()
|
456
|
+
else choice.get("message", None)
|
457
|
+
)
|
458
|
+
|
459
|
+
# Extract usage information
|
460
|
+
usage = response.get("usage", None)
|
461
|
+
if _is_openai_v1() and usage is not None:
|
462
|
+
usage = usage.__dict__
|
463
|
+
|
464
|
+
return model, completion, usage
|
465
|
+
|
466
|
+
|
467
|
+
def _is_openai_v1():
|
468
|
+
return Version(openai.__version__) >= Version("1.0.0")
|
469
|
+
|
470
|
+
|
471
|
+
def _is_streaming_response(response):
|
472
|
+
return (
|
473
|
+
isinstance(response, types.GeneratorType)
|
474
|
+
or isinstance(response, types.AsyncGeneratorType)
|
475
|
+
or (_is_openai_v1() and isinstance(response, openai.Stream))
|
476
|
+
or (_is_openai_v1() and isinstance(response, openai.AsyncStream))
|
477
|
+
)
|
478
|
+
|
479
|
+
|
480
|
+
@_langfuse_wrapper
|
481
|
+
def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs):
|
482
|
+
new_langfuse: Langfuse = initialize()
|
483
|
+
|
484
|
+
start_time = _get_timestamp()
|
485
|
+
arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
|
486
|
+
|
487
|
+
generation, is_nested_trace = _get_langfuse_data_from_kwargs(
|
488
|
+
open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
|
489
|
+
)
|
490
|
+
generation = new_langfuse.generation(**generation)
|
491
|
+
try:
|
492
|
+
openai_response = wrapped(**arg_extractor.get_openai_args())
|
493
|
+
|
494
|
+
if _is_streaming_response(openai_response):
|
495
|
+
return LangfuseResponseGeneratorSync(
|
496
|
+
resource=open_ai_resource,
|
497
|
+
response=openai_response,
|
498
|
+
generation=generation,
|
499
|
+
langfuse=new_langfuse,
|
500
|
+
is_nested_trace=is_nested_trace,
|
501
|
+
kwargs=arg_extractor.get_openai_args(),
|
502
|
+
)
|
503
|
+
|
504
|
+
else:
|
505
|
+
model, completion, usage = _get_langfuse_data_from_default_response(
|
506
|
+
open_ai_resource,
|
507
|
+
(openai_response and openai_response.__dict__)
|
508
|
+
if _is_openai_v1()
|
509
|
+
else openai_response,
|
510
|
+
)
|
511
|
+
model_params = {
|
512
|
+
"model_name": model or None,
|
513
|
+
"temperature": kwargs.get("temperature", 1),
|
514
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
515
|
+
"top_p": kwargs.get("top_p", 1),
|
516
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
517
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
518
|
+
}
|
519
|
+
|
520
|
+
# Collect messages
|
521
|
+
if open_ai_resource.type == "completion":
|
522
|
+
user_prompt = arg_extractor.get_openai_args().get("prompt", "")
|
523
|
+
messages = [{"role": "user", "content": user_prompt}]
|
524
|
+
message_input = MessageInputs(messages=messages)
|
525
|
+
|
526
|
+
# Track user input
|
527
|
+
synth_tracker_sync.track_lm(
|
528
|
+
messages=message_input.messages,
|
529
|
+
model_name=model,
|
530
|
+
model_params=model_params,
|
531
|
+
finetune=False,
|
532
|
+
)
|
533
|
+
|
534
|
+
# Track assistant output separately
|
535
|
+
assistant_message = [{"role": "assistant", "content": completion}]
|
536
|
+
synth_tracker_sync.track_lm_output(
|
537
|
+
messages=assistant_message,
|
538
|
+
model_name=model,
|
539
|
+
model_params=model_params,
|
540
|
+
finetune=False,
|
541
|
+
)
|
542
|
+
|
543
|
+
elif open_ai_resource.type == "chat":
|
544
|
+
messages = arg_extractor.get_openai_args().get("messages", [])
|
545
|
+
message_input = MessageInputs(messages=messages)
|
546
|
+
|
547
|
+
# Track user input
|
548
|
+
synth_tracker_sync.track_lm(
|
549
|
+
messages=message_input.messages,
|
550
|
+
model_name=model,
|
551
|
+
model_params=model_params,
|
552
|
+
finetune=False,
|
553
|
+
)
|
554
|
+
|
555
|
+
# Track assistant output separately
|
556
|
+
assistant_message = [{"role": "assistant", "content": completion["content"]}]
|
557
|
+
synth_tracker_sync.track_lm_output(
|
558
|
+
messages=assistant_message, model_name=model, finetune=False
|
559
|
+
)
|
560
|
+
|
561
|
+
else:
|
562
|
+
message_input = MessageInputs(messages=[])
|
563
|
+
|
564
|
+
# Use track_lm
|
565
|
+
# synth_tracker_sync.track_lm(
|
566
|
+
# messages=message_input.messages,
|
567
|
+
# model_name=model,
|
568
|
+
# model_params=model_params,finetune=False,
|
569
|
+
# )
|
570
|
+
|
571
|
+
if kwargs.get("seed", None) is not None:
|
572
|
+
model_params["seed"] = kwargs.get("seed", None)
|
573
|
+
|
574
|
+
generation.update(
|
575
|
+
model_params=model_params,
|
576
|
+
output=completion,
|
577
|
+
end_time=_get_timestamp(),
|
578
|
+
usage=usage,
|
579
|
+
)
|
580
|
+
|
581
|
+
# Avoiding the trace-update if trace-id is provided by user.
|
582
|
+
if not is_nested_trace:
|
583
|
+
new_langfuse.trace(id=generation.trace_id, output=completion)
|
584
|
+
|
585
|
+
return openai_response
|
586
|
+
except Exception as ex:
|
587
|
+
# log.warning(ex)
|
588
|
+
model = kwargs.get("model", None) or None
|
589
|
+
model_params = {
|
590
|
+
"model_name": model or None,
|
591
|
+
"temperature": kwargs.get("temperature", 1),
|
592
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
593
|
+
"top_p": kwargs.get("top_p", 1),
|
594
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
595
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
596
|
+
}
|
597
|
+
if kwargs.get("seed", None) is not None:
|
598
|
+
model_params["seed"] = kwargs.get("seed", None)
|
599
|
+
|
600
|
+
generation.update(
|
601
|
+
end_time=_get_timestamp(),
|
602
|
+
status_message=str(ex),
|
603
|
+
level="ERROR",
|
604
|
+
model_params=model_params,
|
605
|
+
usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
|
606
|
+
)
|
607
|
+
raise ex
|
608
|
+
|
609
|
+
|
610
|
+
@_langfuse_wrapper
|
611
|
+
async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs):
|
612
|
+
new_langfuse = initialize()
|
613
|
+
start_time = _get_timestamp()
|
614
|
+
arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
|
615
|
+
|
616
|
+
generation, is_nested_trace = _get_langfuse_data_from_kwargs(
|
617
|
+
open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
|
618
|
+
)
|
619
|
+
generation = new_langfuse.generation(**generation)
|
620
|
+
|
621
|
+
try:
|
622
|
+
openai_response = await wrapped(**arg_extractor.get_openai_args())
|
623
|
+
|
624
|
+
if _is_streaming_response(openai_response):
|
625
|
+
return LangfuseResponseGeneratorAsync(
|
626
|
+
resource=open_ai_resource,
|
627
|
+
response=openai_response,
|
628
|
+
generation=generation,
|
629
|
+
langfuse=new_langfuse,
|
630
|
+
is_nested_trace=is_nested_trace,
|
631
|
+
kwargs=arg_extractor.get_openai_args(),
|
632
|
+
)
|
633
|
+
|
634
|
+
else:
|
635
|
+
model, completion, usage = _get_langfuse_data_from_default_response(
|
636
|
+
open_ai_resource,
|
637
|
+
(openai_response and openai_response.__dict__)
|
638
|
+
if _is_openai_v1()
|
639
|
+
else openai_response,
|
640
|
+
)
|
641
|
+
model_params = {
|
642
|
+
"model_name": model or None,
|
643
|
+
"temperature": kwargs.get("temperature", 1),
|
644
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
645
|
+
"top_p": kwargs.get("top_p", 1),
|
646
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
647
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
648
|
+
}
|
649
|
+
|
650
|
+
# Collect messages
|
651
|
+
if open_ai_resource.type == "completion":
|
652
|
+
user_prompt = arg_extractor.get_openai_args().get("prompt", "")
|
653
|
+
messages = [{"role": "user", "content": user_prompt}]
|
654
|
+
message_input = MessageInputs(messages=messages)
|
655
|
+
|
656
|
+
# Track user input
|
657
|
+
synth_tracker_async.track_lm(
|
658
|
+
messages=message_input.messages,
|
659
|
+
model_name=model,
|
660
|
+
model_params=model_params,
|
661
|
+
finetune=False,
|
662
|
+
)
|
663
|
+
|
664
|
+
# Track assistant output separately
|
665
|
+
assistant_message = [{"role": "assistant", "content": completion}]
|
666
|
+
synth_tracker_async.track_lm_output(
|
667
|
+
messages=assistant_message, model_name=model, finetune=False
|
668
|
+
)
|
669
|
+
|
670
|
+
elif open_ai_resource.type == "chat":
|
671
|
+
messages = arg_extractor.get_openai_args().get("messages", [])
|
672
|
+
message_input = MessageInputs(messages=messages)
|
673
|
+
|
674
|
+
# Track user input
|
675
|
+
synth_tracker_async.track_lm(
|
676
|
+
messages=message_input.messages,
|
677
|
+
model_name=model,
|
678
|
+
model_params=model_params,
|
679
|
+
finetune=False,
|
680
|
+
)
|
681
|
+
|
682
|
+
# Track assistant output separately
|
683
|
+
assistant_message = [{"role": "assistant", "content": completion["content"]}]
|
684
|
+
synth_tracker_async.track_lm_output(
|
685
|
+
messages=assistant_message, model_name=model, finetune=False
|
686
|
+
)
|
687
|
+
|
688
|
+
else:
|
689
|
+
message_input = MessageInputs(messages=[])
|
690
|
+
|
691
|
+
# Use track_lm
|
692
|
+
# synth_tracker_async.track_lm(
|
693
|
+
# messages=message_input.messages,
|
694
|
+
# model_name=model,
|
695
|
+
# model_params=model_params,finetune=False,
|
696
|
+
# )
|
697
|
+
|
698
|
+
# Create model_params dictionary
|
699
|
+
model_params = {
|
700
|
+
"model_name": model or None,
|
701
|
+
"temperature": kwargs.get("temperature", 1),
|
702
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
703
|
+
"top_p": kwargs.get("top_p", 1),
|
704
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
705
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
706
|
+
}
|
707
|
+
if kwargs.get("seed", None) is not None:
|
708
|
+
model_params["seed"] = kwargs.get("seed", None)
|
709
|
+
|
710
|
+
generation.update(
|
711
|
+
model_params=model_params,
|
712
|
+
output=completion,
|
713
|
+
end_time=_get_timestamp(),
|
714
|
+
usage=usage,
|
715
|
+
)
|
716
|
+
# Avoiding the trace-update if trace-id is provided by user.
|
717
|
+
if not is_nested_trace:
|
718
|
+
new_langfuse.trace(id=generation.trace_id, output=completion)
|
719
|
+
|
720
|
+
return openai_response
|
721
|
+
except Exception as ex:
|
722
|
+
model = kwargs.get("model", None) or None
|
723
|
+
model_params = {
|
724
|
+
"model_name": model or None,
|
725
|
+
"temperature": kwargs.get("temperature", 1),
|
726
|
+
"max_tokens": kwargs.get("max_tokens", float("inf")),
|
727
|
+
"top_p": kwargs.get("top_p", 1),
|
728
|
+
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
729
|
+
"presence_penalty": kwargs.get("presence_penalty", 0),
|
730
|
+
}
|
731
|
+
if kwargs.get("seed", None) is not None:
|
732
|
+
model_params["seed"] = kwargs.get("seed", None)
|
733
|
+
|
734
|
+
generation.update(
|
735
|
+
end_time=_get_timestamp(),
|
736
|
+
status_message=str(ex),
|
737
|
+
level="ERROR",
|
738
|
+
model_params=model_params,
|
739
|
+
usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
|
740
|
+
)
|
741
|
+
raise ex
|
742
|
+
|
743
|
+
async def close(self) -> None:
|
744
|
+
"""Close the response and release the connection.
|
745
|
+
|
746
|
+
Automatically called if the response body is read to completion.
|
747
|
+
"""
|
748
|
+
await self.response.close()
|
749
|
+
|
750
|
+
|
751
|
+
class OpenAILangfuse:
|
752
|
+
_langfuse: Optional[Langfuse] = None
|
753
|
+
|
754
|
+
def initialize(self):
|
755
|
+
self._langfuse = LangfuseSingleton().get(
|
756
|
+
public_key=openai.langfuse_public_key,
|
757
|
+
secret_key=openai.langfuse_secret_key,
|
758
|
+
host=openai.langfuse_host,
|
759
|
+
debug=openai.langfuse_debug,
|
760
|
+
enabled=openai.langfuse_enabled,
|
761
|
+
sdk_integration="openai",
|
762
|
+
sample_rate=openai.langfuse_sample_rate,
|
763
|
+
)
|
764
|
+
|
765
|
+
return self._langfuse
|
766
|
+
|
767
|
+
def flush(cls):
|
768
|
+
cls._langfuse.flush()
|
769
|
+
|
770
|
+
def langfuse_auth_check(self):
|
771
|
+
"""Check if the provided Langfuse credentials (public and secret key) are valid.
|
772
|
+
|
773
|
+
Raises:
|
774
|
+
Exception: If no projects were found for the provided credentials.
|
775
|
+
|
776
|
+
Note:
|
777
|
+
This method is blocking. It is discouraged to use it in prod code.
|
778
|
+
"""
|
779
|
+
if self._langfuse is None:
|
780
|
+
self.initialize()
|
781
|
+
|
782
|
+
return self._langfuse.auth_check()
|
783
|
+
|
784
|
+
def register_tracing(self):
|
785
|
+
resources = OPENAI_METHODS_V1 if _is_openai_v1() else OPENAI_METHODS_V0
|
786
|
+
|
787
|
+
for resource in resources:
|
788
|
+
if resource.min_version is not None and Version(openai.__version__) < Version(
|
789
|
+
resource.min_version
|
790
|
+
):
|
791
|
+
continue
|
792
|
+
|
793
|
+
wrap_function_wrapper(
|
794
|
+
resource.module,
|
795
|
+
f"{resource.object}.{resource.method}",
|
796
|
+
_wrap(resource, self.initialize)
|
797
|
+
if resource.sync
|
798
|
+
else _wrap_async(resource, self.initialize),
|
799
|
+
)
|
800
|
+
|
801
|
+
setattr(openai, "langfuse_public_key", None)
|
802
|
+
setattr(openai, "langfuse_secret_key", None)
|
803
|
+
setattr(openai, "langfuse_host", None)
|
804
|
+
setattr(openai, "langfuse_debug", None)
|
805
|
+
setattr(openai, "langfuse_enabled", True)
|
806
|
+
setattr(openai, "langfuse_sample_rate", None)
|
807
|
+
setattr(openai, "langfuse_mask", None)
|
808
|
+
setattr(openai, "langfuse_auth_check", self.langfuse_auth_check)
|
809
|
+
setattr(openai, "flush_langfuse", self.flush)
|
810
|
+
|
811
|
+
|
812
|
+
modifier = OpenAILangfuse()
|
813
|
+
modifier.register_tracing()
|
814
|
+
|
815
|
+
|
816
|
+
# DEPRECATED: Use `openai.langfuse_auth_check()` instead
|
817
|
+
def auth_check():
|
818
|
+
if modifier._langfuse is None:
|
819
|
+
modifier.initialize()
|
820
|
+
|
821
|
+
return modifier._langfuse.auth_check()
|
822
|
+
|
823
|
+
|
824
|
+
def _filter_image_data(messages: List[dict]):
|
825
|
+
"""https://platform.openai.com/docs/guides/vision?lang=python
|
826
|
+
|
827
|
+
The messages array remains the same, but the 'image_url' is removed from the 'content' array.
|
828
|
+
It should only be removed if the value starts with 'data:image/jpeg;base64,'
|
829
|
+
|
830
|
+
"""
|
831
|
+
output_messages = copy.deepcopy(messages)
|
832
|
+
|
833
|
+
for message in output_messages:
|
834
|
+
content = (
|
835
|
+
message.get("content", None)
|
836
|
+
if isinstance(message, dict)
|
837
|
+
else getattr(message, "content", None)
|
838
|
+
)
|
839
|
+
|
840
|
+
if content is not None:
|
841
|
+
for index, item in enumerate(content):
|
842
|
+
if isinstance(item, dict) and item.get("image_url", None) is not None:
|
843
|
+
url = item["image_url"]["url"]
|
844
|
+
if url.startswith("data:image/"):
|
845
|
+
del content[index]["image_url"]
|
846
|
+
|
847
|
+
return output_messages
|
848
|
+
|
849
|
+
|
850
|
+
class LangfuseResponseGeneratorSync:
|
851
|
+
def __init__(
|
852
|
+
self,
|
853
|
+
*,
|
854
|
+
resource,
|
855
|
+
response,
|
856
|
+
generation,
|
857
|
+
langfuse,
|
858
|
+
is_nested_trace,
|
859
|
+
kwargs,
|
860
|
+
):
|
861
|
+
self.items = []
|
862
|
+
self.resource = resource
|
863
|
+
self.response = response
|
864
|
+
self.generation = generation
|
865
|
+
self.langfuse = langfuse
|
866
|
+
self.is_nested_trace = is_nested_trace
|
867
|
+
self.kwargs = kwargs
|
868
|
+
self.completion_start_time = None
|
869
|
+
|
870
|
+
def __iter__(self):
|
871
|
+
try:
|
872
|
+
for i in self.response:
|
873
|
+
self.items.append(i)
|
874
|
+
|
875
|
+
if self.completion_start_time is None:
|
876
|
+
self.completion_start_time = _get_timestamp()
|
877
|
+
|
878
|
+
yield i
|
879
|
+
finally:
|
880
|
+
self._finalize()
|
881
|
+
|
882
|
+
def __next__(self):
|
883
|
+
try:
|
884
|
+
item = self.response.__next__()
|
885
|
+
self.items.append(item)
|
886
|
+
|
887
|
+
if self.completion_start_time is None:
|
888
|
+
self.completion_start_time = _get_timestamp()
|
889
|
+
|
890
|
+
return item
|
891
|
+
|
892
|
+
except StopIteration:
|
893
|
+
self._finalize()
|
894
|
+
|
895
|
+
raise
|
896
|
+
|
897
|
+
def __enter__(self):
|
898
|
+
return self.__iter__()
|
899
|
+
|
900
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
901
|
+
pass
|
902
|
+
|
903
|
+
def _finalize(self):
|
904
|
+
logger.debug("Entering _finalize() in LangfuseResponseGeneratorSync...")
|
905
|
+
# First, extract values from the streamed response items
|
906
|
+
model, completion, usage = _extract_streamed_openai_response(self.resource, self.items)
|
907
|
+
logger.debug("Extracted model=%s, completion=%s, usage=%s", model, completion, usage)
|
908
|
+
|
909
|
+
# Look through the streamed items for a detailed model in the additional "inputs"
|
910
|
+
for item in self.items:
|
911
|
+
if isinstance(item, dict):
|
912
|
+
inputs = item.get("inputs")
|
913
|
+
if isinstance(inputs, dict):
|
914
|
+
detailed = inputs.get("model_name")
|
915
|
+
if detailed and detailed != model:
|
916
|
+
logger.debug(
|
917
|
+
"Upgrading model value from %s to %s based on streamed inputs",
|
918
|
+
model,
|
919
|
+
detailed,
|
920
|
+
)
|
921
|
+
model = detailed
|
922
|
+
break
|
923
|
+
logger.debug("Final model after _finalize check: %s", model)
|
924
|
+
|
925
|
+
# Create model hyperparameters dictionary
|
926
|
+
model_params = {
|
927
|
+
"temperature": self.kwargs.get("temperature", 1),
|
928
|
+
"max_tokens": self.kwargs.get("max_tokens", float("inf")),
|
929
|
+
"top_p": self.kwargs.get("top_p", 1),
|
930
|
+
"frequency_penalty": self.kwargs.get("frequency_penalty", 0),
|
931
|
+
"presence_penalty": self.kwargs.get("presence_penalty", 0),
|
932
|
+
}
|
933
|
+
if self.kwargs.get("seed") is not None:
|
934
|
+
model_params["seed"] = self.kwargs.get("seed")
|
935
|
+
|
936
|
+
if self.resource.type == "completion":
|
937
|
+
user_prompt = self.kwargs.get("prompt", "")
|
938
|
+
messages = [
|
939
|
+
{"role": "user", "content": user_prompt},
|
940
|
+
{"role": "assistant", "content": completion},
|
941
|
+
]
|
942
|
+
message_input = MessageInputs(messages=messages)
|
943
|
+
elif self.resource.type == "chat":
|
944
|
+
messages = self.kwargs.get("messages", [])
|
945
|
+
logger.debug("Existing 'messages' from kwargs before appending: %s", messages)
|
946
|
+
if isinstance(completion, dict) and "content" in completion:
|
947
|
+
messages.append({"role": "assistant", "content": completion["content"]})
|
948
|
+
message_input = MessageInputs(messages=messages)
|
949
|
+
logger.debug("Final 'messages': %s", message_input.messages)
|
950
|
+
else:
|
951
|
+
message_input = MessageInputs(messages=[])
|
952
|
+
|
953
|
+
logger.debug(
|
954
|
+
"Calling track_lm (sync) with messages: %s, model: %s",
|
955
|
+
message_input.messages,
|
956
|
+
model,
|
957
|
+
)
|
958
|
+
synth_tracker_sync.track_lm(
|
959
|
+
messages=message_input.messages,
|
960
|
+
model_name=model,
|
961
|
+
model_params=model_params,
|
962
|
+
finetune=False,
|
963
|
+
)
|
964
|
+
|
965
|
+
# Avoid the trace update if a trace-id was provided by the user.
|
966
|
+
if not self.is_nested_trace:
|
967
|
+
self.langfuse.trace(id=self.generation.trace_id, output=completion)
|
968
|
+
|
969
|
+
# Pass the updated model and hyperparameters downstream in the update event.
|
970
|
+
_create_langfuse_update(
|
971
|
+
completion,
|
972
|
+
self.generation,
|
973
|
+
self.completion_start_time,
|
974
|
+
model=model,
|
975
|
+
usage=usage,
|
976
|
+
model_params=model_params,
|
977
|
+
)
|
978
|
+
|
979
|
+
|
980
|
+
class LangfuseResponseGeneratorAsync:
|
981
|
+
def __init__(
|
982
|
+
self,
|
983
|
+
*,
|
984
|
+
resource,
|
985
|
+
response,
|
986
|
+
generation,
|
987
|
+
langfuse,
|
988
|
+
is_nested_trace,
|
989
|
+
kwargs,
|
990
|
+
):
|
991
|
+
# logger.debug(f"LangfuseResponseGeneratorAsync initialized with kwargs: {kwargs}")
|
992
|
+
# logger.debug(f"Resource type: {resource.type}")
|
993
|
+
self.items = []
|
994
|
+
self.resource = resource
|
995
|
+
self.response = response
|
996
|
+
self.generation = generation
|
997
|
+
self.langfuse = langfuse
|
998
|
+
self.is_nested_trace = is_nested_trace
|
999
|
+
self.kwargs = kwargs
|
1000
|
+
self.completion_start_time = None
|
1001
|
+
|
1002
|
+
async def __aiter__(self):
|
1003
|
+
try:
|
1004
|
+
async for i in self.response:
|
1005
|
+
self.items.append(i)
|
1006
|
+
|
1007
|
+
if self.completion_start_time is None:
|
1008
|
+
self.completion_start_time = _get_timestamp()
|
1009
|
+
|
1010
|
+
yield i
|
1011
|
+
finally:
|
1012
|
+
await self._finalize()
|
1013
|
+
|
1014
|
+
async def __anext__(self):
|
1015
|
+
try:
|
1016
|
+
item = await self.response.__anext__()
|
1017
|
+
self.items.append(item)
|
1018
|
+
|
1019
|
+
if self.completion_start_time is None:
|
1020
|
+
self.completion_start_time = _get_timestamp()
|
1021
|
+
|
1022
|
+
return item
|
1023
|
+
|
1024
|
+
except StopAsyncIteration:
|
1025
|
+
await self._finalize()
|
1026
|
+
|
1027
|
+
raise
|
1028
|
+
|
1029
|
+
async def __aenter__(self):
|
1030
|
+
return self.__aiter__()
|
1031
|
+
|
1032
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
1033
|
+
pass
|
1034
|
+
|
1035
|
+
async def _finalize(self):
|
1036
|
+
logger.debug("Entering _finalize() in LangfuseResponseGeneratorAsync...")
|
1037
|
+
model, completion, usage = _extract_streamed_openai_response(self.resource, self.items)
|
1038
|
+
logger.debug("Extracted model=%s, completion=%s, usage=%s", model, completion, usage)
|
1039
|
+
|
1040
|
+
# Look through the streamed items for a detailed model in the additional "inputs"
|
1041
|
+
for item in self.items:
|
1042
|
+
if isinstance(item, dict):
|
1043
|
+
inputs = item.get("inputs")
|
1044
|
+
if isinstance(inputs, dict):
|
1045
|
+
detailed = inputs.get("model_name")
|
1046
|
+
if detailed and detailed != model:
|
1047
|
+
logger.debug(
|
1048
|
+
"Upgrading model value from %s to %s based on streamed inputs",
|
1049
|
+
model,
|
1050
|
+
detailed,
|
1051
|
+
)
|
1052
|
+
model = detailed
|
1053
|
+
break
|
1054
|
+
logger.debug("Final model after _finalize check: %s", model)
|
1055
|
+
|
1056
|
+
# Create model hyperparameters dictionary
|
1057
|
+
model_params = {
|
1058
|
+
"temperature": self.kwargs.get("temperature", 1),
|
1059
|
+
"max_tokens": self.kwargs.get("max_tokens", float("inf")),
|
1060
|
+
"top_p": self.kwargs.get("top_p", 1),
|
1061
|
+
"frequency_penalty": self.kwargs.get("frequency_penalty", 0),
|
1062
|
+
"presence_penalty": self.kwargs.get("presence_penalty", 0),
|
1063
|
+
}
|
1064
|
+
if self.kwargs.get("seed") is not None:
|
1065
|
+
model_params["seed"] = self.kwargs.get("seed")
|
1066
|
+
|
1067
|
+
if self.resource.type == "completion":
|
1068
|
+
user_prompt = self.kwargs.get("prompt", "")
|
1069
|
+
messages = [
|
1070
|
+
{"role": "user", "content": user_prompt},
|
1071
|
+
{"role": "assistant", "content": completion},
|
1072
|
+
]
|
1073
|
+
message_input = MessageInputs(messages=messages)
|
1074
|
+
elif self.resource.type == "chat":
|
1075
|
+
messages = self.kwargs.get("messages", [])
|
1076
|
+
logger.debug("Existing 'messages' from kwargs before appending: %s", messages)
|
1077
|
+
# If completion is a dict, ensure we extract 'content' safely
|
1078
|
+
if isinstance(completion, dict) and "content" in completion:
|
1079
|
+
messages.append({"role": "assistant", "content": completion["content"]})
|
1080
|
+
message_input = MessageInputs(messages=messages)
|
1081
|
+
logger.debug("Final 'messages': %s", message_input.messages)
|
1082
|
+
else:
|
1083
|
+
message_input = MessageInputs(messages=[])
|
1084
|
+
|
1085
|
+
logger.debug(
|
1086
|
+
"Calling track_lm (async) with messages: %s, model: %s",
|
1087
|
+
message_input.messages,
|
1088
|
+
model,
|
1089
|
+
)
|
1090
|
+
synth_tracker_async.track_lm(
|
1091
|
+
messages=message_input.messages,
|
1092
|
+
model_name=model,
|
1093
|
+
model_params=model_params,
|
1094
|
+
finetune=False,
|
1095
|
+
)
|
1096
|
+
|
1097
|
+
# Avoiding the trace-update if trace-id is provided by user.
|
1098
|
+
if not self.is_nested_trace:
|
1099
|
+
self.langfuse.trace(id=self.generation.trace_id, output=completion)
|
1100
|
+
|
1101
|
+
_create_langfuse_update(
|
1102
|
+
completion,
|
1103
|
+
self.generation,
|
1104
|
+
self.completion_start_time,
|
1105
|
+
model=model,
|
1106
|
+
usage=usage,
|
1107
|
+
model_params=model_params,
|
1108
|
+
)
|
1109
|
+
|
1110
|
+
async def close(self) -> None:
|
1111
|
+
"""Close the response and release the connection.
|
1112
|
+
|
1113
|
+
Automatically called if the response body is read to completion.
|
1114
|
+
"""
|
1115
|
+
await self.response.close()
|