openlit 1.34.29__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +111 -24
  53. openlit/instrumentation/crewai/async_crewai.py +114 -0
  54. openlit/instrumentation/crewai/crewai.py +104 -131
  55. openlit/instrumentation/crewai/utils.py +615 -0
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +312 -101
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +660 -186
  121. openlit/instrumentation/openai_agents/__init__.py +6 -2
  122. openlit/instrumentation/openai_agents/processor.py +409 -537
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +101 -7
  161. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.29.dist-info/RECORD +0 -166
  167. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.29.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -5,19 +5,16 @@ import importlib.metadata
5
5
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
6
  from wrapt import wrap_function_wrapper
7
7
 
8
- from openlit.instrumentation.mistral.mistral import (
9
- complete,
10
- stream,
11
- embed
12
- )
8
+ from openlit.instrumentation.mistral.mistral import complete, stream, embed
13
9
  from openlit.instrumentation.mistral.async_mistral import (
14
10
  async_complete,
15
11
  async_stream,
16
- async_embed
12
+ async_embed,
17
13
  )
18
14
 
19
15
  _instruments = ("mistralai >= 1.0.0",)
20
16
 
17
+
21
18
  class MistralInstrumentor(BaseInstrumentor):
22
19
  """
23
20
  An instrumentor for Mistral client library.
@@ -40,48 +37,96 @@ class MistralInstrumentor(BaseInstrumentor):
40
37
  wrap_function_wrapper(
41
38
  "mistralai.chat",
42
39
  "Chat.complete",
43
- complete(version, environment, application_name,
44
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
40
+ complete(
41
+ version,
42
+ environment,
43
+ application_name,
44
+ tracer,
45
+ pricing_info,
46
+ capture_message_content,
47
+ metrics,
48
+ disable_metrics,
49
+ ),
45
50
  )
46
51
 
47
52
  # sync chat streaming
48
53
  wrap_function_wrapper(
49
54
  "mistralai.chat",
50
55
  "Chat.stream",
51
- stream(version, environment, application_name,
52
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
56
+ stream(
57
+ version,
58
+ environment,
59
+ application_name,
60
+ tracer,
61
+ pricing_info,
62
+ capture_message_content,
63
+ metrics,
64
+ disable_metrics,
65
+ ),
53
66
  )
54
67
 
55
68
  # sync embeddings
56
69
  wrap_function_wrapper(
57
70
  "mistralai.embeddings",
58
71
  "Embeddings.create",
59
- embed(version, environment, application_name,
60
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
72
+ embed(
73
+ version,
74
+ environment,
75
+ application_name,
76
+ tracer,
77
+ pricing_info,
78
+ capture_message_content,
79
+ metrics,
80
+ disable_metrics,
81
+ ),
61
82
  )
62
83
 
63
84
  # async chat completions
64
85
  wrap_function_wrapper(
65
86
  "mistralai.chat",
66
87
  "Chat.complete_async",
67
- async_complete(version, environment, application_name,
68
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
88
+ async_complete(
89
+ version,
90
+ environment,
91
+ application_name,
92
+ tracer,
93
+ pricing_info,
94
+ capture_message_content,
95
+ metrics,
96
+ disable_metrics,
97
+ ),
69
98
  )
70
99
 
71
100
  # async chat streaming
72
101
  wrap_function_wrapper(
73
102
  "mistralai.chat",
74
103
  "Chat.stream_async",
75
- async_stream(version, environment, application_name,
76
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
104
+ async_stream(
105
+ version,
106
+ environment,
107
+ application_name,
108
+ tracer,
109
+ pricing_info,
110
+ capture_message_content,
111
+ metrics,
112
+ disable_metrics,
113
+ ),
77
114
  )
78
115
 
79
116
  # async embeddings
80
117
  wrap_function_wrapper(
81
118
  "mistralai.embeddings",
82
119
  "Embeddings.create_async",
83
- async_embed(version, environment, application_name,
84
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
120
+ async_embed(
121
+ version,
122
+ environment,
123
+ application_name,
124
+ tracer,
125
+ pricing_info,
126
+ capture_message_content,
127
+ metrics,
128
+ disable_metrics,
129
+ ),
85
130
  )
86
131
 
87
132
  def _uninstrument(self, **kwargs):
@@ -16,8 +16,17 @@ from openlit.instrumentation.mistral.utils import (
16
16
  )
17
17
  from openlit.semcov import SemanticConvention
18
18
 
19
- def async_complete(version, environment, application_name,
20
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
+
20
+ def async_complete(
21
+ version,
22
+ environment,
23
+ application_name,
24
+ tracer,
25
+ pricing_info,
26
+ capture_message_content,
27
+ metrics,
28
+ disable_metrics,
29
+ ):
21
30
  """
22
31
  Generates a telemetry wrapper for GenAI complete function call
23
32
  """
@@ -27,7 +36,9 @@ def async_complete(version, environment, application_name,
27
36
  Wraps the GenAI complete function call.
28
37
  """
29
38
 
30
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
39
+ server_address, server_port = set_server_address_and_port(
40
+ instance, "api.mistral.ai", 443
41
+ )
31
42
  request_model = kwargs.get("model", "mistral-small-latest")
32
43
 
33
44
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -49,15 +60,24 @@ def async_complete(version, environment, application_name,
49
60
  capture_message_content=capture_message_content,
50
61
  disable_metrics=disable_metrics,
51
62
  version=version,
52
- **kwargs
63
+ **kwargs,
53
64
  )
54
65
 
55
66
  return response
56
67
 
57
68
  return wrapper
58
69
 
59
- def async_stream(version, environment, application_name,
60
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
70
+
71
+ def async_stream(
72
+ version,
73
+ environment,
74
+ application_name,
75
+ tracer,
76
+ pricing_info,
77
+ capture_message_content,
78
+ metrics,
79
+ disable_metrics,
80
+ ):
61
81
  """
62
82
  Generates a telemetry wrapper for GenAI stream function call
63
83
  """
@@ -68,15 +88,15 @@ def async_stream(version, environment, application_name,
68
88
  """
69
89
 
70
90
  def __init__(
71
- self,
72
- wrapped,
73
- span,
74
- span_name,
75
- kwargs,
76
- server_address,
77
- server_port,
78
- **args,
79
- ):
91
+ self,
92
+ wrapped,
93
+ span,
94
+ span_name,
95
+ kwargs,
96
+ server_address,
97
+ server_port,
98
+ **args,
99
+ ):
80
100
  self.__wrapped__ = wrapped
81
101
  self._span = span
82
102
  self._span_name = span_name
@@ -119,7 +139,9 @@ def async_stream(version, environment, application_name,
119
139
  return chunk
120
140
  except StopAsyncIteration:
121
141
  try:
122
- with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
142
+ with tracer.start_as_current_span(
143
+ self._span_name, kind=SpanKind.CLIENT
144
+ ) as self._span:
123
145
  process_streaming_chat_response(
124
146
  self,
125
147
  pricing_info=pricing_info,
@@ -128,7 +150,7 @@ def async_stream(version, environment, application_name,
128
150
  metrics=metrics,
129
151
  capture_message_content=capture_message_content,
130
152
  disable_metrics=disable_metrics,
131
- version=version
153
+ version=version,
132
154
  )
133
155
 
134
156
  except Exception as e:
@@ -141,7 +163,9 @@ def async_stream(version, environment, application_name,
141
163
  Wraps the GenAI stream function call.
142
164
  """
143
165
 
144
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
166
+ server_address, server_port = set_server_address_and_port(
167
+ instance, "api.mistral.ai", 443
168
+ )
145
169
  request_model = kwargs.get("model", "mistral-small-latest")
146
170
 
147
171
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -150,12 +174,23 @@ def async_stream(version, environment, application_name,
150
174
  awaited_wrapped = await wrapped(*args, **kwargs)
151
175
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
152
176
 
153
- return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
177
+ return TracedAsyncStream(
178
+ awaited_wrapped, span, span_name, kwargs, server_address, server_port
179
+ )
154
180
 
155
181
  return wrapper
156
182
 
157
- def async_embed(version, environment, application_name,
158
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
183
+
184
+ def async_embed(
185
+ version,
186
+ environment,
187
+ application_name,
188
+ tracer,
189
+ pricing_info,
190
+ capture_message_content,
191
+ metrics,
192
+ disable_metrics,
193
+ ):
159
194
  """
160
195
  Generates a telemetry wrapper for GenAI embedding function call
161
196
  """
@@ -165,10 +200,14 @@ def async_embed(version, environment, application_name,
165
200
  Wraps the GenAI embedding function call.
166
201
  """
167
202
 
168
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
203
+ server_address, server_port = set_server_address_and_port(
204
+ instance, "api.mistral.ai", 443
205
+ )
169
206
  request_model = kwargs.get("model", "mistral-embed")
170
207
 
171
- span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
208
+ span_name = (
209
+ f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
210
+ )
172
211
 
173
212
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
174
213
  start_time = time.time()
@@ -189,7 +228,7 @@ def async_embed(version, environment, application_name,
189
228
  capture_message_content=capture_message_content,
190
229
  disable_metrics=disable_metrics,
191
230
  version=version,
192
- **kwargs
231
+ **kwargs,
193
232
  )
194
233
 
195
234
  except Exception as e:
@@ -16,8 +16,17 @@ from openlit.instrumentation.mistral.utils import (
16
16
  )
17
17
  from openlit.semcov import SemanticConvention
18
18
 
19
- def complete(version, environment, application_name,
20
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
+
20
+ def complete(
21
+ version,
22
+ environment,
23
+ application_name,
24
+ tracer,
25
+ pricing_info,
26
+ capture_message_content,
27
+ metrics,
28
+ disable_metrics,
29
+ ):
21
30
  """
22
31
  Generates a telemetry wrapper for GenAI complete function call
23
32
  """
@@ -27,7 +36,9 @@ def complete(version, environment, application_name,
27
36
  Wraps the GenAI complete function call.
28
37
  """
29
38
 
30
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
39
+ server_address, server_port = set_server_address_and_port(
40
+ instance, "api.mistral.ai", 443
41
+ )
31
42
  request_model = kwargs.get("model", "mistral-small-latest")
32
43
 
33
44
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -49,15 +60,24 @@ def complete(version, environment, application_name,
49
60
  capture_message_content=capture_message_content,
50
61
  disable_metrics=disable_metrics,
51
62
  version=version,
52
- **kwargs
63
+ **kwargs,
53
64
  )
54
65
 
55
66
  return response
56
67
 
57
68
  return wrapper
58
69
 
59
- def stream(version, environment, application_name,
60
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
70
+
71
+ def stream(
72
+ version,
73
+ environment,
74
+ application_name,
75
+ tracer,
76
+ pricing_info,
77
+ capture_message_content,
78
+ metrics,
79
+ disable_metrics,
80
+ ):
61
81
  """
62
82
  Generates a telemetry wrapper for GenAI stream function call
63
83
  """
@@ -68,15 +88,15 @@ def stream(version, environment, application_name,
68
88
  """
69
89
 
70
90
  def __init__(
71
- self,
72
- wrapped,
73
- span,
74
- span_name,
75
- kwargs,
76
- server_address,
77
- server_port,
78
- **args,
79
- ):
91
+ self,
92
+ wrapped,
93
+ span,
94
+ span_name,
95
+ kwargs,
96
+ server_address,
97
+ server_port,
98
+ **args,
99
+ ):
80
100
  self.__wrapped__ = wrapped
81
101
  self._span = span
82
102
  self._span_name = span_name
@@ -119,7 +139,9 @@ def stream(version, environment, application_name,
119
139
  return chunk
120
140
  except StopIteration:
121
141
  try:
122
- with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
142
+ with tracer.start_as_current_span(
143
+ self._span_name, kind=SpanKind.CLIENT
144
+ ) as self._span:
123
145
  process_streaming_chat_response(
124
146
  self,
125
147
  pricing_info=pricing_info,
@@ -128,7 +150,7 @@ def stream(version, environment, application_name,
128
150
  metrics=metrics,
129
151
  capture_message_content=capture_message_content,
130
152
  disable_metrics=disable_metrics,
131
- version=version
153
+ version=version,
132
154
  )
133
155
 
134
156
  except Exception as e:
@@ -141,7 +163,9 @@ def stream(version, environment, application_name,
141
163
  Wraps the GenAI stream function call.
142
164
  """
143
165
 
144
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
166
+ server_address, server_port = set_server_address_and_port(
167
+ instance, "api.mistral.ai", 443
168
+ )
145
169
  request_model = kwargs.get("model", "mistral-small-latest")
146
170
 
147
171
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -150,12 +174,23 @@ def stream(version, environment, application_name,
150
174
  awaited_wrapped = wrapped(*args, **kwargs)
151
175
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
152
176
 
153
- return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
177
+ return TracedSyncStream(
178
+ awaited_wrapped, span, span_name, kwargs, server_address, server_port
179
+ )
154
180
 
155
181
  return wrapper
156
182
 
157
- def embed(version, environment, application_name,
158
- tracer, pricing_info, capture_message_content, metrics, disable_metrics):
183
+
184
+ def embed(
185
+ version,
186
+ environment,
187
+ application_name,
188
+ tracer,
189
+ pricing_info,
190
+ capture_message_content,
191
+ metrics,
192
+ disable_metrics,
193
+ ):
159
194
  """
160
195
  Generates a telemetry wrapper for GenAI embedding function call
161
196
  """
@@ -165,10 +200,14 @@ def embed(version, environment, application_name,
165
200
  Wraps the GenAI embedding function call.
166
201
  """
167
202
 
168
- server_address, server_port = set_server_address_and_port(instance, "api.mistral.ai", 443)
203
+ server_address, server_port = set_server_address_and_port(
204
+ instance, "api.mistral.ai", 443
205
+ )
169
206
  request_model = kwargs.get("model", "mistral-embed")
170
207
 
171
- span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
208
+ span_name = (
209
+ f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
210
+ )
172
211
 
173
212
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
174
213
  start_time = time.time()
@@ -189,7 +228,7 @@ def embed(version, environment, application_name,
189
228
  capture_message_content=capture_message_content,
190
229
  disable_metrics=disable_metrics,
191
230
  version=version,
192
- **kwargs
231
+ **kwargs,
193
232
  )
194
233
 
195
234
  except Exception as e: