openlit 1.34.30__py3-none-any.whl → 1.34.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +235 -86
- openlit/__init__.py +19 -14
- openlit/_instrumentors.py +2 -1
- openlit/evals/all.py +50 -21
- openlit/evals/bias_detection.py +47 -20
- openlit/evals/hallucination.py +53 -22
- openlit/evals/toxicity.py +50 -21
- openlit/evals/utils.py +54 -30
- openlit/guard/all.py +61 -19
- openlit/guard/prompt_injection.py +34 -14
- openlit/guard/restrict_topic.py +46 -15
- openlit/guard/sensitive_topic.py +34 -14
- openlit/guard/utils.py +58 -22
- openlit/instrumentation/ag2/__init__.py +113 -6
- openlit/instrumentation/ag2/ag2.py +459 -17
- openlit/instrumentation/ag2/async_ag2.py +459 -17
- openlit/instrumentation/ag2/utils.py +475 -31
- openlit/instrumentation/ai21/__init__.py +43 -14
- openlit/instrumentation/ai21/ai21.py +47 -21
- openlit/instrumentation/ai21/async_ai21.py +47 -21
- openlit/instrumentation/ai21/utils.py +299 -78
- openlit/instrumentation/anthropic/__init__.py +21 -4
- openlit/instrumentation/anthropic/anthropic.py +28 -17
- openlit/instrumentation/anthropic/async_anthropic.py +28 -17
- openlit/instrumentation/anthropic/utils.py +145 -35
- openlit/instrumentation/assemblyai/__init__.py +11 -2
- openlit/instrumentation/assemblyai/assemblyai.py +15 -4
- openlit/instrumentation/assemblyai/utils.py +120 -25
- openlit/instrumentation/astra/__init__.py +43 -10
- openlit/instrumentation/astra/astra.py +28 -5
- openlit/instrumentation/astra/async_astra.py +28 -5
- openlit/instrumentation/astra/utils.py +151 -55
- openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/utils.py +307 -83
- openlit/instrumentation/bedrock/__init__.py +21 -4
- openlit/instrumentation/bedrock/bedrock.py +63 -25
- openlit/instrumentation/bedrock/utils.py +139 -30
- openlit/instrumentation/chroma/__init__.py +89 -16
- openlit/instrumentation/chroma/chroma.py +28 -6
- openlit/instrumentation/chroma/utils.py +167 -51
- openlit/instrumentation/cohere/__init__.py +63 -18
- openlit/instrumentation/cohere/async_cohere.py +63 -24
- openlit/instrumentation/cohere/cohere.py +63 -24
- openlit/instrumentation/cohere/utils.py +286 -73
- openlit/instrumentation/controlflow/__init__.py +35 -9
- openlit/instrumentation/controlflow/controlflow.py +66 -33
- openlit/instrumentation/crawl4ai/__init__.py +25 -10
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
- openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
- openlit/instrumentation/crewai/__init__.py +40 -15
- openlit/instrumentation/crewai/async_crewai.py +32 -7
- openlit/instrumentation/crewai/crewai.py +32 -7
- openlit/instrumentation/crewai/utils.py +159 -56
- openlit/instrumentation/dynamiq/__init__.py +46 -12
- openlit/instrumentation/dynamiq/dynamiq.py +74 -33
- openlit/instrumentation/elevenlabs/__init__.py +23 -4
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/utils.py +128 -25
- openlit/instrumentation/embedchain/__init__.py +11 -2
- openlit/instrumentation/embedchain/embedchain.py +68 -35
- openlit/instrumentation/firecrawl/__init__.py +24 -7
- openlit/instrumentation/firecrawl/firecrawl.py +46 -20
- openlit/instrumentation/google_ai_studio/__init__.py +45 -10
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/utils.py +180 -67
- openlit/instrumentation/gpt4all/__init__.py +22 -7
- openlit/instrumentation/gpt4all/gpt4all.py +67 -29
- openlit/instrumentation/gpt4all/utils.py +285 -61
- openlit/instrumentation/gpu/__init__.py +128 -47
- openlit/instrumentation/groq/__init__.py +21 -4
- openlit/instrumentation/groq/async_groq.py +33 -21
- openlit/instrumentation/groq/groq.py +33 -21
- openlit/instrumentation/groq/utils.py +192 -55
- openlit/instrumentation/haystack/__init__.py +70 -24
- openlit/instrumentation/haystack/async_haystack.py +28 -6
- openlit/instrumentation/haystack/haystack.py +28 -6
- openlit/instrumentation/haystack/utils.py +196 -74
- openlit/instrumentation/julep/__init__.py +69 -19
- openlit/instrumentation/julep/async_julep.py +53 -27
- openlit/instrumentation/julep/julep.py +53 -28
- openlit/instrumentation/langchain/__init__.py +74 -63
- openlit/instrumentation/langchain/callback_handler.py +1100 -0
- openlit/instrumentation/langchain_community/__init__.py +13 -2
- openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/utils.py +35 -9
- openlit/instrumentation/letta/__init__.py +68 -15
- openlit/instrumentation/letta/letta.py +99 -54
- openlit/instrumentation/litellm/__init__.py +43 -14
- openlit/instrumentation/litellm/async_litellm.py +51 -26
- openlit/instrumentation/litellm/litellm.py +51 -26
- openlit/instrumentation/litellm/utils.py +304 -102
- openlit/instrumentation/llamaindex/__init__.py +267 -90
- openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/utils.py +204 -91
- openlit/instrumentation/mem0/__init__.py +11 -2
- openlit/instrumentation/mem0/mem0.py +50 -29
- openlit/instrumentation/milvus/__init__.py +10 -2
- openlit/instrumentation/milvus/milvus.py +31 -6
- openlit/instrumentation/milvus/utils.py +166 -67
- openlit/instrumentation/mistral/__init__.py +63 -18
- openlit/instrumentation/mistral/async_mistral.py +63 -24
- openlit/instrumentation/mistral/mistral.py +63 -24
- openlit/instrumentation/mistral/utils.py +277 -69
- openlit/instrumentation/multion/__init__.py +69 -19
- openlit/instrumentation/multion/async_multion.py +57 -26
- openlit/instrumentation/multion/multion.py +57 -26
- openlit/instrumentation/ollama/__init__.py +39 -18
- openlit/instrumentation/ollama/async_ollama.py +57 -26
- openlit/instrumentation/ollama/ollama.py +57 -26
- openlit/instrumentation/ollama/utils.py +226 -50
- openlit/instrumentation/openai/__init__.py +156 -32
- openlit/instrumentation/openai/async_openai.py +147 -67
- openlit/instrumentation/openai/openai.py +150 -67
- openlit/instrumentation/openai/utils.py +657 -185
- openlit/instrumentation/openai_agents/__init__.py +5 -1
- openlit/instrumentation/openai_agents/processor.py +110 -90
- openlit/instrumentation/phidata/__init__.py +13 -5
- openlit/instrumentation/phidata/phidata.py +67 -32
- openlit/instrumentation/pinecone/__init__.py +48 -9
- openlit/instrumentation/pinecone/async_pinecone.py +27 -5
- openlit/instrumentation/pinecone/pinecone.py +27 -5
- openlit/instrumentation/pinecone/utils.py +153 -47
- openlit/instrumentation/premai/__init__.py +22 -7
- openlit/instrumentation/premai/premai.py +51 -26
- openlit/instrumentation/premai/utils.py +246 -59
- openlit/instrumentation/pydantic_ai/__init__.py +49 -22
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
- openlit/instrumentation/pydantic_ai/utils.py +89 -24
- openlit/instrumentation/qdrant/__init__.py +19 -4
- openlit/instrumentation/qdrant/async_qdrant.py +33 -7
- openlit/instrumentation/qdrant/qdrant.py +33 -7
- openlit/instrumentation/qdrant/utils.py +228 -93
- openlit/instrumentation/reka/__init__.py +23 -10
- openlit/instrumentation/reka/async_reka.py +17 -11
- openlit/instrumentation/reka/reka.py +17 -11
- openlit/instrumentation/reka/utils.py +138 -36
- openlit/instrumentation/together/__init__.py +44 -12
- openlit/instrumentation/together/async_together.py +50 -27
- openlit/instrumentation/together/together.py +50 -27
- openlit/instrumentation/together/utils.py +301 -71
- openlit/instrumentation/transformers/__init__.py +2 -1
- openlit/instrumentation/transformers/transformers.py +13 -3
- openlit/instrumentation/transformers/utils.py +139 -36
- openlit/instrumentation/vertexai/__init__.py +81 -16
- openlit/instrumentation/vertexai/async_vertexai.py +33 -15
- openlit/instrumentation/vertexai/utils.py +123 -27
- openlit/instrumentation/vertexai/vertexai.py +33 -15
- openlit/instrumentation/vllm/__init__.py +12 -5
- openlit/instrumentation/vllm/utils.py +121 -31
- openlit/instrumentation/vllm/vllm.py +16 -10
- openlit/otel/events.py +35 -10
- openlit/otel/metrics.py +32 -24
- openlit/otel/tracing.py +24 -9
- openlit/semcov/__init__.py +82 -6
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/METADATA +2 -1
- openlit-1.34.32.dist-info/RECORD +166 -0
- openlit/instrumentation/langchain/async_langchain.py +0 -102
- openlit/instrumentation/langchain/langchain.py +0 -102
- openlit/instrumentation/langchain/utils.py +0 -252
- openlit-1.34.30.dist-info/RECORD +0 -168
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/LICENSE +0 -0
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/WHEEL +0 -0
@@ -5,19 +5,16 @@ import importlib.metadata
|
|
5
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
6
|
from wrapt import wrap_function_wrapper
|
7
7
|
|
8
|
-
from openlit.instrumentation.mistral.mistral import
|
9
|
-
complete,
|
10
|
-
stream,
|
11
|
-
embed
|
12
|
-
)
|
8
|
+
from openlit.instrumentation.mistral.mistral import complete, stream, embed
|
13
9
|
from openlit.instrumentation.mistral.async_mistral import (
|
14
10
|
async_complete,
|
15
11
|
async_stream,
|
16
|
-
async_embed
|
12
|
+
async_embed,
|
17
13
|
)
|
18
14
|
|
19
15
|
_instruments = ("mistralai >= 1.0.0",)
|
20
16
|
|
17
|
+
|
21
18
|
class MistralInstrumentor(BaseInstrumentor):
|
22
19
|
"""
|
23
20
|
An instrumentor for Mistral client library.
|
@@ -40,48 +37,96 @@ class MistralInstrumentor(BaseInstrumentor):
|
|
40
37
|
wrap_function_wrapper(
|
41
38
|
"mistralai.chat",
|
42
39
|
"Chat.complete",
|
43
|
-
complete(
|
44
|
-
|
40
|
+
complete(
|
41
|
+
version,
|
42
|
+
environment,
|
43
|
+
application_name,
|
44
|
+
tracer,
|
45
|
+
pricing_info,
|
46
|
+
capture_message_content,
|
47
|
+
metrics,
|
48
|
+
disable_metrics,
|
49
|
+
),
|
45
50
|
)
|
46
51
|
|
47
52
|
# sync chat streaming
|
48
53
|
wrap_function_wrapper(
|
49
54
|
"mistralai.chat",
|
50
55
|
"Chat.stream",
|
51
|
-
stream(
|
52
|
-
|
56
|
+
stream(
|
57
|
+
version,
|
58
|
+
environment,
|
59
|
+
application_name,
|
60
|
+
tracer,
|
61
|
+
pricing_info,
|
62
|
+
capture_message_content,
|
63
|
+
metrics,
|
64
|
+
disable_metrics,
|
65
|
+
),
|
53
66
|
)
|
54
67
|
|
55
68
|
# sync embeddings
|
56
69
|
wrap_function_wrapper(
|
57
70
|
"mistralai.embeddings",
|
58
71
|
"Embeddings.create",
|
59
|
-
embed(
|
60
|
-
|
72
|
+
embed(
|
73
|
+
version,
|
74
|
+
environment,
|
75
|
+
application_name,
|
76
|
+
tracer,
|
77
|
+
pricing_info,
|
78
|
+
capture_message_content,
|
79
|
+
metrics,
|
80
|
+
disable_metrics,
|
81
|
+
),
|
61
82
|
)
|
62
83
|
|
63
84
|
# async chat completions
|
64
85
|
wrap_function_wrapper(
|
65
86
|
"mistralai.chat",
|
66
87
|
"Chat.complete_async",
|
67
|
-
async_complete(
|
68
|
-
|
88
|
+
async_complete(
|
89
|
+
version,
|
90
|
+
environment,
|
91
|
+
application_name,
|
92
|
+
tracer,
|
93
|
+
pricing_info,
|
94
|
+
capture_message_content,
|
95
|
+
metrics,
|
96
|
+
disable_metrics,
|
97
|
+
),
|
69
98
|
)
|
70
99
|
|
71
100
|
# async chat streaming
|
72
101
|
wrap_function_wrapper(
|
73
102
|
"mistralai.chat",
|
74
103
|
"Chat.stream_async",
|
75
|
-
async_stream(
|
76
|
-
|
104
|
+
async_stream(
|
105
|
+
version,
|
106
|
+
environment,
|
107
|
+
application_name,
|
108
|
+
tracer,
|
109
|
+
pricing_info,
|
110
|
+
capture_message_content,
|
111
|
+
metrics,
|
112
|
+
disable_metrics,
|
113
|
+
),
|
77
114
|
)
|
78
115
|
|
79
116
|
# async embeddings
|
80
117
|
wrap_function_wrapper(
|
81
118
|
"mistralai.embeddings",
|
82
119
|
"Embeddings.create_async",
|
83
|
-
async_embed(
|
84
|
-
|
120
|
+
async_embed(
|
121
|
+
version,
|
122
|
+
environment,
|
123
|
+
application_name,
|
124
|
+
tracer,
|
125
|
+
pricing_info,
|
126
|
+
capture_message_content,
|
127
|
+
metrics,
|
128
|
+
disable_metrics,
|
129
|
+
),
|
85
130
|
)
|
86
131
|
|
87
132
|
def _uninstrument(self, **kwargs):
|
@@ -16,8 +16,17 @@ from openlit.instrumentation.mistral.utils import (
|
|
16
16
|
)
|
17
17
|
from openlit.semcov import SemanticConvention
|
18
18
|
|
19
|
-
|
20
|
-
|
19
|
+
|
20
|
+
def async_complete(
|
21
|
+
version,
|
22
|
+
environment,
|
23
|
+
application_name,
|
24
|
+
tracer,
|
25
|
+
pricing_info,
|
26
|
+
capture_message_content,
|
27
|
+
metrics,
|
28
|
+
disable_metrics,
|
29
|
+
):
|
21
30
|
"""
|
22
31
|
Generates a telemetry wrapper for GenAI complete function call
|
23
32
|
"""
|
@@ -27,7 +36,9 @@ def async_complete(version, environment, application_name,
|
|
27
36
|
Wraps the GenAI complete function call.
|
28
37
|
"""
|
29
38
|
|
30
|
-
server_address, server_port = set_server_address_and_port(
|
39
|
+
server_address, server_port = set_server_address_and_port(
|
40
|
+
instance, "api.mistral.ai", 443
|
41
|
+
)
|
31
42
|
request_model = kwargs.get("model", "mistral-small-latest")
|
32
43
|
|
33
44
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
@@ -49,15 +60,24 @@ def async_complete(version, environment, application_name,
|
|
49
60
|
capture_message_content=capture_message_content,
|
50
61
|
disable_metrics=disable_metrics,
|
51
62
|
version=version,
|
52
|
-
**kwargs
|
63
|
+
**kwargs,
|
53
64
|
)
|
54
65
|
|
55
66
|
return response
|
56
67
|
|
57
68
|
return wrapper
|
58
69
|
|
59
|
-
|
60
|
-
|
70
|
+
|
71
|
+
def async_stream(
|
72
|
+
version,
|
73
|
+
environment,
|
74
|
+
application_name,
|
75
|
+
tracer,
|
76
|
+
pricing_info,
|
77
|
+
capture_message_content,
|
78
|
+
metrics,
|
79
|
+
disable_metrics,
|
80
|
+
):
|
61
81
|
"""
|
62
82
|
Generates a telemetry wrapper for GenAI stream function call
|
63
83
|
"""
|
@@ -68,15 +88,15 @@ def async_stream(version, environment, application_name,
|
|
68
88
|
"""
|
69
89
|
|
70
90
|
def __init__(
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
91
|
+
self,
|
92
|
+
wrapped,
|
93
|
+
span,
|
94
|
+
span_name,
|
95
|
+
kwargs,
|
96
|
+
server_address,
|
97
|
+
server_port,
|
98
|
+
**args,
|
99
|
+
):
|
80
100
|
self.__wrapped__ = wrapped
|
81
101
|
self._span = span
|
82
102
|
self._span_name = span_name
|
@@ -119,7 +139,9 @@ def async_stream(version, environment, application_name,
|
|
119
139
|
return chunk
|
120
140
|
except StopAsyncIteration:
|
121
141
|
try:
|
122
|
-
with tracer.start_as_current_span(
|
142
|
+
with tracer.start_as_current_span(
|
143
|
+
self._span_name, kind=SpanKind.CLIENT
|
144
|
+
) as self._span:
|
123
145
|
process_streaming_chat_response(
|
124
146
|
self,
|
125
147
|
pricing_info=pricing_info,
|
@@ -128,7 +150,7 @@ def async_stream(version, environment, application_name,
|
|
128
150
|
metrics=metrics,
|
129
151
|
capture_message_content=capture_message_content,
|
130
152
|
disable_metrics=disable_metrics,
|
131
|
-
version=version
|
153
|
+
version=version,
|
132
154
|
)
|
133
155
|
|
134
156
|
except Exception as e:
|
@@ -141,7 +163,9 @@ def async_stream(version, environment, application_name,
|
|
141
163
|
Wraps the GenAI stream function call.
|
142
164
|
"""
|
143
165
|
|
144
|
-
server_address, server_port = set_server_address_and_port(
|
166
|
+
server_address, server_port = set_server_address_and_port(
|
167
|
+
instance, "api.mistral.ai", 443
|
168
|
+
)
|
145
169
|
request_model = kwargs.get("model", "mistral-small-latest")
|
146
170
|
|
147
171
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
@@ -150,12 +174,23 @@ def async_stream(version, environment, application_name,
|
|
150
174
|
awaited_wrapped = await wrapped(*args, **kwargs)
|
151
175
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
152
176
|
|
153
|
-
return TracedAsyncStream(
|
177
|
+
return TracedAsyncStream(
|
178
|
+
awaited_wrapped, span, span_name, kwargs, server_address, server_port
|
179
|
+
)
|
154
180
|
|
155
181
|
return wrapper
|
156
182
|
|
157
|
-
|
158
|
-
|
183
|
+
|
184
|
+
def async_embed(
|
185
|
+
version,
|
186
|
+
environment,
|
187
|
+
application_name,
|
188
|
+
tracer,
|
189
|
+
pricing_info,
|
190
|
+
capture_message_content,
|
191
|
+
metrics,
|
192
|
+
disable_metrics,
|
193
|
+
):
|
159
194
|
"""
|
160
195
|
Generates a telemetry wrapper for GenAI embedding function call
|
161
196
|
"""
|
@@ -165,10 +200,14 @@ def async_embed(version, environment, application_name,
|
|
165
200
|
Wraps the GenAI embedding function call.
|
166
201
|
"""
|
167
202
|
|
168
|
-
server_address, server_port = set_server_address_and_port(
|
203
|
+
server_address, server_port = set_server_address_and_port(
|
204
|
+
instance, "api.mistral.ai", 443
|
205
|
+
)
|
169
206
|
request_model = kwargs.get("model", "mistral-embed")
|
170
207
|
|
171
|
-
span_name =
|
208
|
+
span_name = (
|
209
|
+
f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
210
|
+
)
|
172
211
|
|
173
212
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
174
213
|
start_time = time.time()
|
@@ -189,7 +228,7 @@ def async_embed(version, environment, application_name,
|
|
189
228
|
capture_message_content=capture_message_content,
|
190
229
|
disable_metrics=disable_metrics,
|
191
230
|
version=version,
|
192
|
-
**kwargs
|
231
|
+
**kwargs,
|
193
232
|
)
|
194
233
|
|
195
234
|
except Exception as e:
|
@@ -16,8 +16,17 @@ from openlit.instrumentation.mistral.utils import (
|
|
16
16
|
)
|
17
17
|
from openlit.semcov import SemanticConvention
|
18
18
|
|
19
|
-
|
20
|
-
|
19
|
+
|
20
|
+
def complete(
|
21
|
+
version,
|
22
|
+
environment,
|
23
|
+
application_name,
|
24
|
+
tracer,
|
25
|
+
pricing_info,
|
26
|
+
capture_message_content,
|
27
|
+
metrics,
|
28
|
+
disable_metrics,
|
29
|
+
):
|
21
30
|
"""
|
22
31
|
Generates a telemetry wrapper for GenAI complete function call
|
23
32
|
"""
|
@@ -27,7 +36,9 @@ def complete(version, environment, application_name,
|
|
27
36
|
Wraps the GenAI complete function call.
|
28
37
|
"""
|
29
38
|
|
30
|
-
server_address, server_port = set_server_address_and_port(
|
39
|
+
server_address, server_port = set_server_address_and_port(
|
40
|
+
instance, "api.mistral.ai", 443
|
41
|
+
)
|
31
42
|
request_model = kwargs.get("model", "mistral-small-latest")
|
32
43
|
|
33
44
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
@@ -49,15 +60,24 @@ def complete(version, environment, application_name,
|
|
49
60
|
capture_message_content=capture_message_content,
|
50
61
|
disable_metrics=disable_metrics,
|
51
62
|
version=version,
|
52
|
-
**kwargs
|
63
|
+
**kwargs,
|
53
64
|
)
|
54
65
|
|
55
66
|
return response
|
56
67
|
|
57
68
|
return wrapper
|
58
69
|
|
59
|
-
|
60
|
-
|
70
|
+
|
71
|
+
def stream(
|
72
|
+
version,
|
73
|
+
environment,
|
74
|
+
application_name,
|
75
|
+
tracer,
|
76
|
+
pricing_info,
|
77
|
+
capture_message_content,
|
78
|
+
metrics,
|
79
|
+
disable_metrics,
|
80
|
+
):
|
61
81
|
"""
|
62
82
|
Generates a telemetry wrapper for GenAI stream function call
|
63
83
|
"""
|
@@ -68,15 +88,15 @@ def stream(version, environment, application_name,
|
|
68
88
|
"""
|
69
89
|
|
70
90
|
def __init__(
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
91
|
+
self,
|
92
|
+
wrapped,
|
93
|
+
span,
|
94
|
+
span_name,
|
95
|
+
kwargs,
|
96
|
+
server_address,
|
97
|
+
server_port,
|
98
|
+
**args,
|
99
|
+
):
|
80
100
|
self.__wrapped__ = wrapped
|
81
101
|
self._span = span
|
82
102
|
self._span_name = span_name
|
@@ -119,7 +139,9 @@ def stream(version, environment, application_name,
|
|
119
139
|
return chunk
|
120
140
|
except StopIteration:
|
121
141
|
try:
|
122
|
-
with tracer.start_as_current_span(
|
142
|
+
with tracer.start_as_current_span(
|
143
|
+
self._span_name, kind=SpanKind.CLIENT
|
144
|
+
) as self._span:
|
123
145
|
process_streaming_chat_response(
|
124
146
|
self,
|
125
147
|
pricing_info=pricing_info,
|
@@ -128,7 +150,7 @@ def stream(version, environment, application_name,
|
|
128
150
|
metrics=metrics,
|
129
151
|
capture_message_content=capture_message_content,
|
130
152
|
disable_metrics=disable_metrics,
|
131
|
-
version=version
|
153
|
+
version=version,
|
132
154
|
)
|
133
155
|
|
134
156
|
except Exception as e:
|
@@ -141,7 +163,9 @@ def stream(version, environment, application_name,
|
|
141
163
|
Wraps the GenAI stream function call.
|
142
164
|
"""
|
143
165
|
|
144
|
-
server_address, server_port = set_server_address_and_port(
|
166
|
+
server_address, server_port = set_server_address_and_port(
|
167
|
+
instance, "api.mistral.ai", 443
|
168
|
+
)
|
145
169
|
request_model = kwargs.get("model", "mistral-small-latest")
|
146
170
|
|
147
171
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
@@ -150,12 +174,23 @@ def stream(version, environment, application_name,
|
|
150
174
|
awaited_wrapped = wrapped(*args, **kwargs)
|
151
175
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
152
176
|
|
153
|
-
return TracedSyncStream(
|
177
|
+
return TracedSyncStream(
|
178
|
+
awaited_wrapped, span, span_name, kwargs, server_address, server_port
|
179
|
+
)
|
154
180
|
|
155
181
|
return wrapper
|
156
182
|
|
157
|
-
|
158
|
-
|
183
|
+
|
184
|
+
def embed(
|
185
|
+
version,
|
186
|
+
environment,
|
187
|
+
application_name,
|
188
|
+
tracer,
|
189
|
+
pricing_info,
|
190
|
+
capture_message_content,
|
191
|
+
metrics,
|
192
|
+
disable_metrics,
|
193
|
+
):
|
159
194
|
"""
|
160
195
|
Generates a telemetry wrapper for GenAI embedding function call
|
161
196
|
"""
|
@@ -165,10 +200,14 @@ def embed(version, environment, application_name,
|
|
165
200
|
Wraps the GenAI embedding function call.
|
166
201
|
"""
|
167
202
|
|
168
|
-
server_address, server_port = set_server_address_and_port(
|
203
|
+
server_address, server_port = set_server_address_and_port(
|
204
|
+
instance, "api.mistral.ai", 443
|
205
|
+
)
|
169
206
|
request_model = kwargs.get("model", "mistral-embed")
|
170
207
|
|
171
|
-
span_name =
|
208
|
+
span_name = (
|
209
|
+
f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
210
|
+
)
|
172
211
|
|
173
212
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
174
213
|
start_time = time.time()
|
@@ -189,7 +228,7 @@ def embed(version, environment, application_name,
|
|
189
228
|
capture_message_content=capture_message_content,
|
190
229
|
disable_metrics=disable_metrics,
|
191
230
|
version=version,
|
192
|
-
**kwargs
|
231
|
+
**kwargs,
|
193
232
|
)
|
194
233
|
|
195
234
|
except Exception as e:
|