openlit 1.34.30__py3-none-any.whl → 1.34.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +235 -86
- openlit/__init__.py +16 -13
- openlit/_instrumentors.py +2 -1
- openlit/evals/all.py +50 -21
- openlit/evals/bias_detection.py +47 -20
- openlit/evals/hallucination.py +53 -22
- openlit/evals/toxicity.py +50 -21
- openlit/evals/utils.py +54 -30
- openlit/guard/all.py +61 -19
- openlit/guard/prompt_injection.py +34 -14
- openlit/guard/restrict_topic.py +46 -15
- openlit/guard/sensitive_topic.py +34 -14
- openlit/guard/utils.py +58 -22
- openlit/instrumentation/ag2/__init__.py +24 -8
- openlit/instrumentation/ag2/ag2.py +34 -13
- openlit/instrumentation/ag2/async_ag2.py +34 -13
- openlit/instrumentation/ag2/utils.py +133 -30
- openlit/instrumentation/ai21/__init__.py +43 -14
- openlit/instrumentation/ai21/ai21.py +47 -21
- openlit/instrumentation/ai21/async_ai21.py +47 -21
- openlit/instrumentation/ai21/utils.py +299 -78
- openlit/instrumentation/anthropic/__init__.py +21 -4
- openlit/instrumentation/anthropic/anthropic.py +28 -17
- openlit/instrumentation/anthropic/async_anthropic.py +28 -17
- openlit/instrumentation/anthropic/utils.py +145 -35
- openlit/instrumentation/assemblyai/__init__.py +11 -2
- openlit/instrumentation/assemblyai/assemblyai.py +15 -4
- openlit/instrumentation/assemblyai/utils.py +120 -25
- openlit/instrumentation/astra/__init__.py +43 -10
- openlit/instrumentation/astra/astra.py +28 -5
- openlit/instrumentation/astra/async_astra.py +28 -5
- openlit/instrumentation/astra/utils.py +151 -55
- openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/utils.py +307 -83
- openlit/instrumentation/bedrock/__init__.py +21 -4
- openlit/instrumentation/bedrock/bedrock.py +63 -25
- openlit/instrumentation/bedrock/utils.py +139 -30
- openlit/instrumentation/chroma/__init__.py +89 -16
- openlit/instrumentation/chroma/chroma.py +28 -6
- openlit/instrumentation/chroma/utils.py +167 -51
- openlit/instrumentation/cohere/__init__.py +63 -18
- openlit/instrumentation/cohere/async_cohere.py +63 -24
- openlit/instrumentation/cohere/cohere.py +63 -24
- openlit/instrumentation/cohere/utils.py +286 -73
- openlit/instrumentation/controlflow/__init__.py +35 -9
- openlit/instrumentation/controlflow/controlflow.py +66 -33
- openlit/instrumentation/crawl4ai/__init__.py +25 -10
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
- openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
- openlit/instrumentation/crewai/__init__.py +40 -15
- openlit/instrumentation/crewai/async_crewai.py +32 -7
- openlit/instrumentation/crewai/crewai.py +32 -7
- openlit/instrumentation/crewai/utils.py +159 -56
- openlit/instrumentation/dynamiq/__init__.py +46 -12
- openlit/instrumentation/dynamiq/dynamiq.py +74 -33
- openlit/instrumentation/elevenlabs/__init__.py +23 -4
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/utils.py +128 -25
- openlit/instrumentation/embedchain/__init__.py +11 -2
- openlit/instrumentation/embedchain/embedchain.py +68 -35
- openlit/instrumentation/firecrawl/__init__.py +24 -7
- openlit/instrumentation/firecrawl/firecrawl.py +46 -20
- openlit/instrumentation/google_ai_studio/__init__.py +45 -10
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/utils.py +180 -67
- openlit/instrumentation/gpt4all/__init__.py +22 -7
- openlit/instrumentation/gpt4all/gpt4all.py +67 -29
- openlit/instrumentation/gpt4all/utils.py +285 -61
- openlit/instrumentation/gpu/__init__.py +128 -47
- openlit/instrumentation/groq/__init__.py +21 -4
- openlit/instrumentation/groq/async_groq.py +33 -21
- openlit/instrumentation/groq/groq.py +33 -21
- openlit/instrumentation/groq/utils.py +192 -55
- openlit/instrumentation/haystack/__init__.py +70 -24
- openlit/instrumentation/haystack/async_haystack.py +28 -6
- openlit/instrumentation/haystack/haystack.py +28 -6
- openlit/instrumentation/haystack/utils.py +196 -74
- openlit/instrumentation/julep/__init__.py +69 -19
- openlit/instrumentation/julep/async_julep.py +53 -27
- openlit/instrumentation/julep/julep.py +53 -28
- openlit/instrumentation/langchain/__init__.py +74 -63
- openlit/instrumentation/langchain/callback_handler.py +1100 -0
- openlit/instrumentation/langchain_community/__init__.py +13 -2
- openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/utils.py +35 -9
- openlit/instrumentation/letta/__init__.py +68 -15
- openlit/instrumentation/letta/letta.py +99 -54
- openlit/instrumentation/litellm/__init__.py +43 -14
- openlit/instrumentation/litellm/async_litellm.py +51 -26
- openlit/instrumentation/litellm/litellm.py +51 -26
- openlit/instrumentation/litellm/utils.py +304 -102
- openlit/instrumentation/llamaindex/__init__.py +267 -90
- openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/utils.py +204 -91
- openlit/instrumentation/mem0/__init__.py +11 -2
- openlit/instrumentation/mem0/mem0.py +50 -29
- openlit/instrumentation/milvus/__init__.py +10 -2
- openlit/instrumentation/milvus/milvus.py +31 -6
- openlit/instrumentation/milvus/utils.py +166 -67
- openlit/instrumentation/mistral/__init__.py +63 -18
- openlit/instrumentation/mistral/async_mistral.py +63 -24
- openlit/instrumentation/mistral/mistral.py +63 -24
- openlit/instrumentation/mistral/utils.py +277 -69
- openlit/instrumentation/multion/__init__.py +69 -19
- openlit/instrumentation/multion/async_multion.py +57 -26
- openlit/instrumentation/multion/multion.py +57 -26
- openlit/instrumentation/ollama/__init__.py +39 -18
- openlit/instrumentation/ollama/async_ollama.py +57 -26
- openlit/instrumentation/ollama/ollama.py +57 -26
- openlit/instrumentation/ollama/utils.py +226 -50
- openlit/instrumentation/openai/__init__.py +156 -32
- openlit/instrumentation/openai/async_openai.py +147 -67
- openlit/instrumentation/openai/openai.py +150 -67
- openlit/instrumentation/openai/utils.py +657 -185
- openlit/instrumentation/openai_agents/__init__.py +5 -1
- openlit/instrumentation/openai_agents/processor.py +110 -90
- openlit/instrumentation/phidata/__init__.py +13 -5
- openlit/instrumentation/phidata/phidata.py +67 -32
- openlit/instrumentation/pinecone/__init__.py +48 -9
- openlit/instrumentation/pinecone/async_pinecone.py +27 -5
- openlit/instrumentation/pinecone/pinecone.py +27 -5
- openlit/instrumentation/pinecone/utils.py +153 -47
- openlit/instrumentation/premai/__init__.py +22 -7
- openlit/instrumentation/premai/premai.py +51 -26
- openlit/instrumentation/premai/utils.py +246 -59
- openlit/instrumentation/pydantic_ai/__init__.py +49 -22
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
- openlit/instrumentation/pydantic_ai/utils.py +89 -24
- openlit/instrumentation/qdrant/__init__.py +19 -4
- openlit/instrumentation/qdrant/async_qdrant.py +33 -7
- openlit/instrumentation/qdrant/qdrant.py +33 -7
- openlit/instrumentation/qdrant/utils.py +228 -93
- openlit/instrumentation/reka/__init__.py +23 -10
- openlit/instrumentation/reka/async_reka.py +17 -11
- openlit/instrumentation/reka/reka.py +17 -11
- openlit/instrumentation/reka/utils.py +138 -36
- openlit/instrumentation/together/__init__.py +44 -12
- openlit/instrumentation/together/async_together.py +50 -27
- openlit/instrumentation/together/together.py +50 -27
- openlit/instrumentation/together/utils.py +301 -71
- openlit/instrumentation/transformers/__init__.py +2 -1
- openlit/instrumentation/transformers/transformers.py +13 -3
- openlit/instrumentation/transformers/utils.py +139 -36
- openlit/instrumentation/vertexai/__init__.py +81 -16
- openlit/instrumentation/vertexai/async_vertexai.py +33 -15
- openlit/instrumentation/vertexai/utils.py +123 -27
- openlit/instrumentation/vertexai/vertexai.py +33 -15
- openlit/instrumentation/vllm/__init__.py +12 -5
- openlit/instrumentation/vllm/utils.py +121 -31
- openlit/instrumentation/vllm/vllm.py +16 -10
- openlit/otel/events.py +35 -10
- openlit/otel/metrics.py +32 -24
- openlit/otel/tracing.py +24 -9
- openlit/semcov/__init__.py +72 -6
- {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
- openlit-1.34.31.dist-info/RECORD +166 -0
- openlit/instrumentation/langchain/async_langchain.py +0 -102
- openlit/instrumentation/langchain/langchain.py +0 -102
- openlit/instrumentation/langchain/utils.py +0 -252
- openlit-1.34.30.dist-info/RECORD +0 -168
- {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
- {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -6,12 +6,11 @@ import importlib.metadata
|
|
6
6
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
7
|
from wrapt import wrap_function_wrapper
|
8
8
|
|
9
|
-
from openlit.instrumentation.dynamiq.dynamiq import
|
10
|
-
dynamiq_wrap
|
11
|
-
)
|
9
|
+
from openlit.instrumentation.dynamiq.dynamiq import dynamiq_wrap
|
12
10
|
|
13
11
|
_instruments = ("dynamiq >= 0.4.0",)
|
14
12
|
|
13
|
+
|
15
14
|
class DynamiqInstrumentor(BaseInstrumentor):
|
16
15
|
"""
|
17
16
|
An instrumentor for dynamiq's client library.
|
@@ -33,32 +32,67 @@ class DynamiqInstrumentor(BaseInstrumentor):
|
|
33
32
|
wrap_function_wrapper(
|
34
33
|
"dynamiq.nodes.agents.base",
|
35
34
|
"Agent.run",
|
36
|
-
dynamiq_wrap(
|
37
|
-
|
35
|
+
dynamiq_wrap(
|
36
|
+
"dynamiq.agent_run",
|
37
|
+
version,
|
38
|
+
environment,
|
39
|
+
application_name,
|
40
|
+
tracer,
|
41
|
+
pricing_info,
|
42
|
+
capture_message_content,
|
43
|
+
metrics,
|
44
|
+
disable_metrics,
|
45
|
+
),
|
38
46
|
)
|
39
47
|
|
40
48
|
wrap_function_wrapper(
|
41
49
|
"dynamiq",
|
42
50
|
"Workflow.run",
|
43
|
-
dynamiq_wrap(
|
44
|
-
|
51
|
+
dynamiq_wrap(
|
52
|
+
"dynamiq.workflow_run",
|
53
|
+
version,
|
54
|
+
environment,
|
55
|
+
application_name,
|
56
|
+
tracer,
|
57
|
+
pricing_info,
|
58
|
+
capture_message_content,
|
59
|
+
metrics,
|
60
|
+
disable_metrics,
|
61
|
+
),
|
45
62
|
)
|
46
63
|
|
47
64
|
wrap_function_wrapper(
|
48
65
|
"dynamiq.memory",
|
49
66
|
"Memory.add",
|
50
|
-
dynamiq_wrap(
|
51
|
-
|
67
|
+
dynamiq_wrap(
|
68
|
+
"dynamiq.memory_add",
|
69
|
+
version,
|
70
|
+
environment,
|
71
|
+
application_name,
|
72
|
+
tracer,
|
73
|
+
pricing_info,
|
74
|
+
capture_message_content,
|
75
|
+
metrics,
|
76
|
+
disable_metrics,
|
77
|
+
),
|
52
78
|
)
|
53
79
|
|
54
80
|
wrap_function_wrapper(
|
55
81
|
"dynamiq.memory",
|
56
82
|
"Memory.search",
|
57
|
-
dynamiq_wrap(
|
58
|
-
|
83
|
+
dynamiq_wrap(
|
84
|
+
"dynamiq.memory_search",
|
85
|
+
version,
|
86
|
+
environment,
|
87
|
+
application_name,
|
88
|
+
tracer,
|
89
|
+
pricing_info,
|
90
|
+
capture_message_content,
|
91
|
+
metrics,
|
92
|
+
disable_metrics,
|
93
|
+
),
|
59
94
|
)
|
60
95
|
|
61
|
-
|
62
96
|
def _uninstrument(self, **kwargs):
|
63
97
|
# Proper uninstrumentation logic to revert patched methods
|
64
98
|
pass
|
@@ -5,7 +5,11 @@ Module for monitoring Dynamiq calls.
|
|
5
5
|
|
6
6
|
import logging
|
7
7
|
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
|
-
from opentelemetry.sdk.resources import
|
8
|
+
from opentelemetry.sdk.resources import (
|
9
|
+
SERVICE_NAME,
|
10
|
+
TELEMETRY_SDK_NAME,
|
11
|
+
DEPLOYMENT_ENVIRONMENT,
|
12
|
+
)
|
9
13
|
from openlit.__helpers import (
|
10
14
|
handle_exception,
|
11
15
|
)
|
@@ -14,8 +18,18 @@ from openlit.semcov import SemanticConvention
|
|
14
18
|
# Initialize logger for logging potential issues and operations
|
15
19
|
logger = logging.getLogger(__name__)
|
16
20
|
|
17
|
-
|
18
|
-
|
21
|
+
|
22
|
+
def dynamiq_wrap(
|
23
|
+
gen_ai_endpoint,
|
24
|
+
version,
|
25
|
+
environment,
|
26
|
+
application_name,
|
27
|
+
tracer,
|
28
|
+
pricing_info,
|
29
|
+
capture_message_content,
|
30
|
+
metrics,
|
31
|
+
disable_metrics,
|
32
|
+
):
|
19
33
|
"""
|
20
34
|
Generates a telemetry wrapper for chat completions to collect metrics.
|
21
35
|
|
@@ -50,49 +64,76 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
|
|
50
64
|
"""
|
51
65
|
|
52
66
|
# pylint: disable=line-too-long
|
53
|
-
with tracer.start_as_current_span(
|
67
|
+
with tracer.start_as_current_span(
|
68
|
+
gen_ai_endpoint, kind=SpanKind.CLIENT
|
69
|
+
) as span:
|
54
70
|
response = wrapped(*args, **kwargs)
|
55
71
|
|
56
72
|
try:
|
57
73
|
# Set base span attribues
|
58
74
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
59
|
-
span.set_attribute(
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
span.set_attribute(
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
span.set_attribute(
|
68
|
-
|
75
|
+
span.set_attribute(
|
76
|
+
SemanticConvention.GEN_AI_SYSTEM,
|
77
|
+
SemanticConvention.GEN_AI_SYSTEM_DYNAMIQ,
|
78
|
+
)
|
79
|
+
span.set_attribute(
|
80
|
+
SemanticConvention.GEN_AI_OPERATION,
|
81
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
82
|
+
)
|
83
|
+
span.set_attribute(SemanticConvention.GEN_AI_ENDPOINT, gen_ai_endpoint)
|
84
|
+
span.set_attribute(SERVICE_NAME, application_name)
|
85
|
+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
69
86
|
|
70
87
|
if gen_ai_endpoint == "dynamiq.agent_run":
|
71
|
-
span.set_attribute(
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
span.set_attribute(
|
76
|
-
|
77
|
-
|
78
|
-
|
88
|
+
span.set_attribute(
|
89
|
+
SemanticConvention.GEN_AI_AGENT_ID,
|
90
|
+
getattr(instance, "id", "") or "",
|
91
|
+
)
|
92
|
+
span.set_attribute(
|
93
|
+
SemanticConvention.GEN_AI_AGENT_ROLE,
|
94
|
+
getattr(instance, "name", "") or "",
|
95
|
+
)
|
96
|
+
span.set_attribute(
|
97
|
+
SemanticConvention.GEN_AI_REQUEST_MODEL,
|
98
|
+
getattr(getattr(instance, "llm", None), "model", "") or "",
|
99
|
+
)
|
100
|
+
span.set_attribute(
|
101
|
+
SemanticConvention.GEN_AI_AGENT_TYPE,
|
102
|
+
str(getattr(instance, "type", "")) or "",
|
103
|
+
)
|
79
104
|
|
80
105
|
elif gen_ai_endpoint == "dynamiq.workflow_run":
|
81
|
-
span.set_attribute(
|
82
|
-
|
83
|
-
|
84
|
-
|
106
|
+
span.set_attribute(
|
107
|
+
SemanticConvention.GEN_AI_AGENT_ID,
|
108
|
+
getattr(instance, "id", "") or "",
|
109
|
+
)
|
110
|
+
span.set_attribute(
|
111
|
+
SemanticConvention.GEN_AI_REQUEST_MODEL,
|
112
|
+
getattr(
|
113
|
+
getattr(instance.flow, "nodes", [None])[0],
|
114
|
+
"model",
|
115
|
+
"default_model",
|
116
|
+
),
|
117
|
+
)
|
85
118
|
|
86
119
|
elif gen_ai_endpoint == "dynamiq.memory_add":
|
87
|
-
span.set_attribute(
|
88
|
-
|
89
|
-
|
120
|
+
span.set_attribute(
|
121
|
+
SemanticConvention.DB_OPERATION_NAME,
|
122
|
+
SemanticConvention.DB_OPERATION_ADD,
|
123
|
+
)
|
124
|
+
span.set_attribute(
|
125
|
+
SemanticConvention.DB_METADATA, str(kwargs.get("metadata", ""))
|
126
|
+
)
|
90
127
|
|
91
128
|
elif gen_ai_endpoint == "dynamiq.memory_search":
|
92
|
-
query_value = kwargs.get(
|
93
|
-
span.set_attribute(
|
94
|
-
|
95
|
-
|
129
|
+
query_value = kwargs.get("query", "") or (args[0] if args else "")
|
130
|
+
span.set_attribute(
|
131
|
+
SemanticConvention.DB_OPERATION_NAME,
|
132
|
+
SemanticConvention.DB_OPERATION_GET,
|
133
|
+
)
|
134
|
+
span.set_attribute(
|
135
|
+
SemanticConvention.DB_FILTER, str(kwargs.get("filters", ""))
|
136
|
+
)
|
96
137
|
span.set_attribute(SemanticConvention.DB_STATEMENT, query_value)
|
97
138
|
|
98
139
|
span.set_status(Status(StatusCode.OK))
|
@@ -10,6 +10,7 @@ from openlit.instrumentation.elevenlabs.async_elevenlabs import async_generate
|
|
10
10
|
|
11
11
|
_instruments = ("elevenlabs >= 1.4.0",)
|
12
12
|
|
13
|
+
|
13
14
|
class ElevenLabsInstrumentor(BaseInstrumentor):
|
14
15
|
"""
|
15
16
|
An instrumentor for ElevenLabs client library.
|
@@ -32,16 +33,34 @@ class ElevenLabsInstrumentor(BaseInstrumentor):
|
|
32
33
|
wrap_function_wrapper(
|
33
34
|
"elevenlabs.text_to_speech.client",
|
34
35
|
"TextToSpeechClient.convert",
|
35
|
-
generate(
|
36
|
-
|
36
|
+
generate(
|
37
|
+
"elevenlabs.text_to_speech",
|
38
|
+
version,
|
39
|
+
environment,
|
40
|
+
application_name,
|
41
|
+
tracer,
|
42
|
+
pricing_info,
|
43
|
+
capture_message_content,
|
44
|
+
metrics,
|
45
|
+
disable_metrics,
|
46
|
+
),
|
37
47
|
)
|
38
48
|
|
39
49
|
# async text_to_speech.convert
|
40
50
|
wrap_function_wrapper(
|
41
51
|
"elevenlabs.text_to_speech.client",
|
42
52
|
"AsyncTextToSpeechClient.convert",
|
43
|
-
async_generate(
|
44
|
-
|
53
|
+
async_generate(
|
54
|
+
"elevenlabs.text_to_speech",
|
55
|
+
version,
|
56
|
+
environment,
|
57
|
+
application_name,
|
58
|
+
tracer,
|
59
|
+
pricing_info,
|
60
|
+
capture_message_content,
|
61
|
+
metrics,
|
62
|
+
disable_metrics,
|
63
|
+
),
|
45
64
|
)
|
46
65
|
|
47
66
|
def _uninstrument(self, **kwargs):
|
@@ -8,8 +8,18 @@ from openlit.__helpers import handle_exception
|
|
8
8
|
from openlit.instrumentation.elevenlabs.utils import process_audio_response
|
9
9
|
from openlit.semcov import SemanticConvention
|
10
10
|
|
11
|
-
|
12
|
-
|
11
|
+
|
12
|
+
def async_generate(
|
13
|
+
gen_ai_endpoint,
|
14
|
+
version,
|
15
|
+
environment,
|
16
|
+
application_name,
|
17
|
+
tracer,
|
18
|
+
pricing_info,
|
19
|
+
capture_message_content,
|
20
|
+
metrics,
|
21
|
+
disable_metrics,
|
22
|
+
):
|
13
23
|
"""
|
14
24
|
Generates a telemetry wrapper for GenAI function call
|
15
25
|
"""
|
@@ -20,7 +30,9 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
|
|
20
30
|
"""
|
21
31
|
|
22
32
|
server_address, server_port = "api.elevenlabs.io", 443
|
23
|
-
request_model = kwargs.get(
|
33
|
+
request_model = kwargs.get(
|
34
|
+
"model", kwargs.get("model_id", "eleven_multilingual_v2")
|
35
|
+
)
|
24
36
|
|
25
37
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}"
|
26
38
|
|
@@ -44,7 +56,7 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
|
|
44
56
|
kwargs=kwargs,
|
45
57
|
capture_message_content=capture_message_content,
|
46
58
|
disable_metrics=disable_metrics,
|
47
|
-
version=version
|
59
|
+
version=version,
|
48
60
|
)
|
49
61
|
|
50
62
|
except Exception as e:
|
@@ -8,8 +8,18 @@ from openlit.__helpers import handle_exception
|
|
8
8
|
from openlit.instrumentation.elevenlabs.utils import process_audio_response
|
9
9
|
from openlit.semcov import SemanticConvention
|
10
10
|
|
11
|
-
|
12
|
-
|
11
|
+
|
12
|
+
def generate(
|
13
|
+
gen_ai_endpoint,
|
14
|
+
version,
|
15
|
+
environment,
|
16
|
+
application_name,
|
17
|
+
tracer,
|
18
|
+
pricing_info,
|
19
|
+
capture_message_content,
|
20
|
+
metrics,
|
21
|
+
disable_metrics,
|
22
|
+
):
|
13
23
|
"""
|
14
24
|
Generates a telemetry wrapper for GenAI function call
|
15
25
|
"""
|
@@ -20,7 +30,9 @@ def generate(gen_ai_endpoint, version, environment, application_name,
|
|
20
30
|
"""
|
21
31
|
|
22
32
|
server_address, server_port = "api.elevenlabs.io", 443
|
23
|
-
request_model = kwargs.get(
|
33
|
+
request_model = kwargs.get(
|
34
|
+
"model", kwargs.get("model_id", "eleven_multilingual_v2")
|
35
|
+
)
|
24
36
|
|
25
37
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}"
|
26
38
|
|
@@ -44,7 +56,7 @@ def generate(gen_ai_endpoint, version, environment, application_name,
|
|
44
56
|
kwargs=kwargs,
|
45
57
|
capture_message_content=capture_message_content,
|
46
58
|
disable_metrics=disable_metrics,
|
47
|
-
version=version
|
59
|
+
version=version,
|
48
60
|
)
|
49
61
|
|
50
62
|
except Exception as e:
|
@@ -1,9 +1,14 @@
|
|
1
1
|
"""
|
2
2
|
ElevenLabs OpenTelemetry instrumentation utility functions
|
3
3
|
"""
|
4
|
+
|
4
5
|
import time
|
5
6
|
|
6
|
-
from opentelemetry.sdk.resources import
|
7
|
+
from opentelemetry.sdk.resources import (
|
8
|
+
SERVICE_NAME,
|
9
|
+
TELEMETRY_SDK_NAME,
|
10
|
+
DEPLOYMENT_ENVIRONMENT,
|
11
|
+
)
|
7
12
|
from opentelemetry.trace import Status, StatusCode
|
8
13
|
|
9
14
|
from openlit.__helpers import (
|
@@ -12,14 +17,29 @@ from openlit.__helpers import (
|
|
12
17
|
)
|
13
18
|
from openlit.semcov import SemanticConvention
|
14
19
|
|
20
|
+
|
15
21
|
def format_content(text):
|
16
22
|
"""
|
17
23
|
Process text input to extract content.
|
18
24
|
"""
|
19
25
|
return str(text) if text else ""
|
20
26
|
|
21
|
-
|
22
|
-
|
27
|
+
|
28
|
+
def common_span_attributes(
|
29
|
+
scope,
|
30
|
+
gen_ai_operation,
|
31
|
+
gen_ai_system,
|
32
|
+
server_address,
|
33
|
+
server_port,
|
34
|
+
request_model,
|
35
|
+
response_model,
|
36
|
+
environment,
|
37
|
+
application_name,
|
38
|
+
is_stream,
|
39
|
+
tbt,
|
40
|
+
ttft,
|
41
|
+
version,
|
42
|
+
):
|
23
43
|
"""
|
24
44
|
Set common span attributes for both chat and RAG operations.
|
25
45
|
"""
|
@@ -30,7 +50,9 @@ def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_addres
|
|
30
50
|
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
31
51
|
scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
32
52
|
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
33
|
-
scope._span.set_attribute(
|
53
|
+
scope._span.set_attribute(
|
54
|
+
SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model
|
55
|
+
)
|
34
56
|
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
35
57
|
scope._span.set_attribute(SERVICE_NAME, application_name)
|
36
58
|
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
@@ -38,8 +60,21 @@ def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_addres
|
|
38
60
|
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
|
39
61
|
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
40
62
|
|
41
|
-
|
42
|
-
|
63
|
+
|
64
|
+
def record_audio_metrics(
|
65
|
+
metrics,
|
66
|
+
gen_ai_operation,
|
67
|
+
gen_ai_system,
|
68
|
+
server_address,
|
69
|
+
server_port,
|
70
|
+
request_model,
|
71
|
+
response_model,
|
72
|
+
environment,
|
73
|
+
application_name,
|
74
|
+
start_time,
|
75
|
+
end_time,
|
76
|
+
cost,
|
77
|
+
):
|
43
78
|
"""
|
44
79
|
Record audio generation metrics for the operation.
|
45
80
|
"""
|
@@ -58,33 +93,64 @@ def record_audio_metrics(metrics, gen_ai_operation, gen_ai_system, server_addres
|
|
58
93
|
metrics["genai_requests"].add(1, attributes)
|
59
94
|
metrics["genai_cost"].record(cost, attributes)
|
60
95
|
|
61
|
-
|
62
|
-
|
96
|
+
|
97
|
+
def common_audio_logic(
|
98
|
+
scope,
|
99
|
+
gen_ai_endpoint,
|
100
|
+
pricing_info,
|
101
|
+
environment,
|
102
|
+
application_name,
|
103
|
+
metrics,
|
104
|
+
capture_message_content,
|
105
|
+
disable_metrics,
|
106
|
+
version,
|
107
|
+
):
|
63
108
|
"""
|
64
109
|
Process audio generation request and generate Telemetry
|
65
110
|
"""
|
66
111
|
|
67
112
|
text = format_content(scope._kwargs.get("text", ""))
|
68
|
-
request_model = scope._kwargs.get(
|
113
|
+
request_model = scope._kwargs.get(
|
114
|
+
"model", scope._kwargs.get("model_id", "eleven_multilingual_v2")
|
115
|
+
)
|
69
116
|
is_stream = False # ElevenLabs audio generation is not streaming
|
70
117
|
|
71
118
|
cost = get_audio_model_cost(request_model, pricing_info, text)
|
72
119
|
|
73
120
|
# Common Span Attributes
|
74
|
-
common_span_attributes(
|
75
|
-
|
76
|
-
|
77
|
-
|
121
|
+
common_span_attributes(
|
122
|
+
scope,
|
123
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
|
124
|
+
SemanticConvention.GEN_AI_SYSTEM_ELEVENLABS,
|
125
|
+
scope._server_address,
|
126
|
+
scope._server_port,
|
127
|
+
request_model,
|
128
|
+
request_model,
|
129
|
+
environment,
|
130
|
+
application_name,
|
131
|
+
is_stream,
|
132
|
+
scope._tbt,
|
133
|
+
scope._ttft,
|
134
|
+
version,
|
135
|
+
)
|
78
136
|
|
79
137
|
# Span Attributes for Cost and Tokens
|
80
138
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
81
139
|
|
82
140
|
# Span Attributes for Response parameters
|
83
|
-
scope._span.set_attribute(
|
141
|
+
scope._span.set_attribute(
|
142
|
+
SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
143
|
+
scope._kwargs.get("output_format", "mp3_44100_128"),
|
144
|
+
)
|
84
145
|
|
85
146
|
# Audio-specific span attributes
|
86
|
-
scope._span.set_attribute(
|
87
|
-
|
147
|
+
scope._span.set_attribute(
|
148
|
+
SemanticConvention.GEN_AI_REQUEST_AUDIO_VOICE, scope._kwargs.get("voice_id", "")
|
149
|
+
)
|
150
|
+
scope._span.set_attribute(
|
151
|
+
SemanticConvention.GEN_AI_REQUEST_AUDIO_SETTINGS,
|
152
|
+
str(scope._kwargs.get("voice_settings", "")),
|
153
|
+
)
|
88
154
|
|
89
155
|
# Span Attributes for Content
|
90
156
|
if capture_message_content:
|
@@ -102,13 +168,39 @@ def common_audio_logic(scope, gen_ai_endpoint, pricing_info, environment, applic
|
|
102
168
|
|
103
169
|
# Metrics
|
104
170
|
if not disable_metrics:
|
105
|
-
record_audio_metrics(
|
106
|
-
|
107
|
-
|
171
|
+
record_audio_metrics(
|
172
|
+
metrics,
|
173
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO,
|
174
|
+
SemanticConvention.GEN_AI_SYSTEM_ELEVENLABS,
|
175
|
+
scope._server_address,
|
176
|
+
scope._server_port,
|
177
|
+
request_model,
|
178
|
+
request_model,
|
179
|
+
environment,
|
180
|
+
application_name,
|
181
|
+
scope._start_time,
|
182
|
+
scope._end_time,
|
183
|
+
cost,
|
184
|
+
)
|
185
|
+
|
108
186
|
|
109
|
-
def process_audio_response(
|
110
|
-
|
111
|
-
|
187
|
+
def process_audio_response(
|
188
|
+
response,
|
189
|
+
gen_ai_endpoint,
|
190
|
+
pricing_info,
|
191
|
+
server_port,
|
192
|
+
server_address,
|
193
|
+
environment,
|
194
|
+
application_name,
|
195
|
+
metrics,
|
196
|
+
start_time,
|
197
|
+
span,
|
198
|
+
args,
|
199
|
+
kwargs,
|
200
|
+
capture_message_content=False,
|
201
|
+
disable_metrics=False,
|
202
|
+
version="1.0.0",
|
203
|
+
):
|
112
204
|
"""
|
113
205
|
Process audio generation request and generate Telemetry
|
114
206
|
"""
|
@@ -123,11 +215,22 @@ def process_audio_response(response, gen_ai_endpoint, pricing_info, server_port,
|
|
123
215
|
scope._args = args
|
124
216
|
|
125
217
|
# Initialize streaming and timing values for ElevenLabs audio generation
|
126
|
-
scope._response_model = kwargs.get(
|
218
|
+
scope._response_model = kwargs.get(
|
219
|
+
"model", kwargs.get("model_id", "eleven_multilingual_v2")
|
220
|
+
)
|
127
221
|
scope._tbt = 0.0
|
128
222
|
scope._ttft = scope._end_time - scope._start_time
|
129
223
|
|
130
|
-
common_audio_logic(
|
131
|
-
|
224
|
+
common_audio_logic(
|
225
|
+
scope,
|
226
|
+
gen_ai_endpoint,
|
227
|
+
pricing_info,
|
228
|
+
environment,
|
229
|
+
application_name,
|
230
|
+
metrics,
|
231
|
+
capture_message_content,
|
232
|
+
disable_metrics,
|
233
|
+
version,
|
234
|
+
)
|
132
235
|
|
133
236
|
return response
|
@@ -1,5 +1,6 @@
|
|
1
1
|
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
2
|
"""Initializer of Auto Instrumentation of EmbedChain Functions"""
|
3
|
+
|
3
4
|
from typing import Collection
|
4
5
|
import importlib.metadata
|
5
6
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
@@ -24,6 +25,7 @@ WRAPPED_METHODS = [
|
|
24
25
|
},
|
25
26
|
]
|
26
27
|
|
28
|
+
|
27
29
|
class EmbedChainInstrumentor(BaseInstrumentor):
|
28
30
|
"""An instrumentor for EmbedChain's client library."""
|
29
31
|
|
@@ -46,8 +48,15 @@ class EmbedChainInstrumentor(BaseInstrumentor):
|
|
46
48
|
wrap_function_wrapper(
|
47
49
|
wrap_package,
|
48
50
|
wrap_object,
|
49
|
-
wrapper(
|
50
|
-
|
51
|
+
wrapper(
|
52
|
+
gen_ai_endpoint,
|
53
|
+
version,
|
54
|
+
environment,
|
55
|
+
application_name,
|
56
|
+
tracer,
|
57
|
+
pricing_info,
|
58
|
+
capture_message_content,
|
59
|
+
),
|
51
60
|
)
|
52
61
|
|
53
62
|
@staticmethod
|