openlit 1.34.30__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +40 -15
  53. openlit/instrumentation/crewai/async_crewai.py +32 -7
  54. openlit/instrumentation/crewai/crewai.py +32 -7
  55. openlit/instrumentation/crewai/utils.py +159 -56
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +304 -102
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +657 -185
  121. openlit/instrumentation/openai_agents/__init__.py +5 -1
  122. openlit/instrumentation/openai_agents/processor.py +110 -90
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +72 -6
  161. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.30.dist-info/RECORD +0 -168
  167. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -1,6 +1,7 @@
1
1
  """
2
2
  HF Transformers OpenTelemetry instrumentation utility functions
3
3
  """
4
+
4
5
  import time
5
6
 
6
7
  from opentelemetry.trace import Status, StatusCode
@@ -13,6 +14,7 @@ from openlit.__helpers import (
13
14
  )
14
15
  from openlit.semcov import SemanticConvention
15
16
 
17
+
16
18
  def format_content(content):
17
19
  """
18
20
  Format content to a consistent structure.
@@ -21,8 +23,12 @@ def format_content(content):
21
23
  return content
22
24
  elif isinstance(content, list):
23
25
  # Check if its a list of chat messages (like in the test case)
24
- if (len(content) > 0 and isinstance(content[0], dict) and
25
- "role" in content[0] and "content" in content[0]):
26
+ if (
27
+ len(content) > 0
28
+ and isinstance(content[0], dict)
29
+ and "role" in content[0]
30
+ and "content" in content[0]
31
+ ):
26
32
  # Handle chat message format like Groq
27
33
  formatted_messages = []
28
34
  for message in content:
@@ -31,7 +37,7 @@ def format_content(content):
31
37
 
32
38
  if isinstance(msg_content, list):
33
39
  content_str = ", ".join(
34
- f'{item["type"]}: {item["text"] if "text" in item else item.get("image_url", str(item))}'
40
+ f"{item['type']}: {item['text'] if 'text' in item else item.get('image_url', str(item))}"
35
41
  if isinstance(item, dict) and "type" in item
36
42
  else str(item)
37
43
  for item in msg_content
@@ -58,9 +64,20 @@ def format_content(content):
58
64
  else:
59
65
  return str(content)
60
66
 
61
- def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
62
- capture_message_content, disable_metrics, version, args, kwargs, is_stream):
63
67
 
68
+ def common_chat_logic(
69
+ scope,
70
+ pricing_info,
71
+ environment,
72
+ application_name,
73
+ metrics,
74
+ capture_message_content,
75
+ disable_metrics,
76
+ version,
77
+ args,
78
+ kwargs,
79
+ is_stream,
80
+ ):
64
81
  """
65
82
  Process chat request and generate Telemetry
66
83
  """
@@ -75,31 +92,60 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
75
92
  cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
76
93
 
77
94
  # Common Span Attributes
78
- common_span_attributes(scope,
79
- SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_HUGGING_FACE,
80
- scope._server_address, scope._server_port, request_model, request_model,
81
- environment, application_name, is_stream, scope._tbt, scope._ttft, version)
95
+ common_span_attributes(
96
+ scope,
97
+ SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
98
+ SemanticConvention.GEN_AI_SYSTEM_HUGGING_FACE,
99
+ scope._server_address,
100
+ scope._server_port,
101
+ request_model,
102
+ request_model,
103
+ environment,
104
+ application_name,
105
+ is_stream,
106
+ scope._tbt,
107
+ scope._ttft,
108
+ version,
109
+ )
82
110
 
83
111
  # Set request parameters from forward_params
84
112
  if forward_params.get("temperature") is not None:
85
- scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, forward_params["temperature"])
113
+ scope._span.set_attribute(
114
+ SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, forward_params["temperature"]
115
+ )
86
116
  if forward_params.get("top_k") is not None:
87
- scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_K, forward_params["top_k"])
117
+ scope._span.set_attribute(
118
+ SemanticConvention.GEN_AI_REQUEST_TOP_K, forward_params["top_k"]
119
+ )
88
120
  if forward_params.get("top_p") is not None:
89
- scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, forward_params["top_p"])
121
+ scope._span.set_attribute(
122
+ SemanticConvention.GEN_AI_REQUEST_TOP_P, forward_params["top_p"]
123
+ )
90
124
  if forward_params.get("max_length") is not None:
91
- scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, forward_params["max_length"])
125
+ scope._span.set_attribute(
126
+ SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, forward_params["max_length"]
127
+ )
92
128
 
93
129
  # Set token usage and cost attributes
94
- scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
95
- scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
96
- scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
130
+ scope._span.set_attribute(
131
+ SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens
132
+ )
133
+ scope._span.set_attribute(
134
+ SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens
135
+ )
136
+ scope._span.set_attribute(
137
+ SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens
138
+ )
97
139
  scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
98
140
 
99
141
  # Span Attributes for Content
100
142
  if capture_message_content:
101
- scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, scope._prompt)
102
- scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._completion)
143
+ scope._span.set_attribute(
144
+ SemanticConvention.GEN_AI_CONTENT_PROMPT, scope._prompt
145
+ )
146
+ scope._span.set_attribute(
147
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._completion
148
+ )
103
149
 
104
150
  # To be removed once the change to span_attributes (from span events) is complete
105
151
  scope._span.add_event(
@@ -119,14 +165,44 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
119
165
 
120
166
  # Record metrics using the standardized helper function
121
167
  if not disable_metrics:
122
- record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
123
- SemanticConvention.GEN_AI_SYSTEM_HUGGING_FACE, scope._server_address, scope._server_port,
124
- request_model, request_model, environment, application_name, scope._start_time, scope._end_time,
125
- cost, input_tokens, output_tokens, scope._tbt, scope._ttft)
126
-
127
- def process_chat_response(instance, response, request_model, pricing_info, server_port, server_address,
128
- environment, application_name, metrics, start_time,
129
- span, args, kwargs, capture_message_content=False, disable_metrics=False, version="1.0.0"):
168
+ record_completion_metrics(
169
+ metrics,
170
+ SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
171
+ SemanticConvention.GEN_AI_SYSTEM_HUGGING_FACE,
172
+ scope._server_address,
173
+ scope._server_port,
174
+ request_model,
175
+ request_model,
176
+ environment,
177
+ application_name,
178
+ scope._start_time,
179
+ scope._end_time,
180
+ cost,
181
+ input_tokens,
182
+ output_tokens,
183
+ scope._tbt,
184
+ scope._ttft,
185
+ )
186
+
187
+
188
+ def process_chat_response(
189
+ instance,
190
+ response,
191
+ request_model,
192
+ pricing_info,
193
+ server_port,
194
+ server_address,
195
+ environment,
196
+ application_name,
197
+ metrics,
198
+ start_time,
199
+ span,
200
+ args,
201
+ kwargs,
202
+ capture_message_content=False,
203
+ disable_metrics=False,
204
+ version="1.0.0",
205
+ ):
130
206
  """
131
207
  Process chat request and generate Telemetry
132
208
  """
@@ -146,11 +222,19 @@ def process_chat_response(instance, response, request_model, pricing_info, serve
146
222
  scope._prompt = args[0]
147
223
  else:
148
224
  scope._prompt = (
149
- kwargs.get("text_inputs") or
150
- (kwargs.get("image") and kwargs.get("question") and
151
- ("image: " + kwargs.get("image") + " question:" + kwargs.get("question"))) or
152
- kwargs.get("fallback") or
153
- ""
225
+ kwargs.get("text_inputs")
226
+ or (
227
+ kwargs.get("image")
228
+ and kwargs.get("question")
229
+ and (
230
+ "image: "
231
+ + kwargs.get("image")
232
+ + " question:"
233
+ + kwargs.get("question")
234
+ )
235
+ )
236
+ or kwargs.get("fallback")
237
+ or ""
154
238
  )
155
239
  scope._prompt = format_content(scope._prompt)
156
240
 
@@ -175,13 +259,21 @@ def process_chat_response(instance, response, request_model, pricing_info, serve
175
259
  scope._completion = ""
176
260
 
177
261
  elif task == "automatic-speech-recognition":
178
- scope._completion = response.get("text", "") if isinstance(response, dict) else ""
262
+ scope._completion = (
263
+ response.get("text", "") if isinstance(response, dict) else ""
264
+ )
179
265
 
180
266
  elif task == "image-classification":
181
- scope._completion = str(response[0]) if isinstance(response, list) and len(response) > 0 else ""
267
+ scope._completion = (
268
+ str(response[0]) if isinstance(response, list) and len(response) > 0 else ""
269
+ )
182
270
 
183
271
  elif task == "visual-question-answering":
184
- if isinstance(response, list) and len(response) > 0 and isinstance(response[0], dict):
272
+ if (
273
+ isinstance(response, list)
274
+ and len(response) > 0
275
+ and isinstance(response[0], dict)
276
+ ):
185
277
  scope._completion = response[0].get("answer", "")
186
278
  else:
187
279
  scope._completion = ""
@@ -193,7 +285,18 @@ def process_chat_response(instance, response, request_model, pricing_info, serve
193
285
  scope._tbt = 0
194
286
  scope._ttft = scope._end_time - scope._start_time
195
287
 
196
- common_chat_logic(scope, pricing_info, environment, application_name, metrics,
197
- capture_message_content, disable_metrics, version, args, kwargs, is_stream=False)
288
+ common_chat_logic(
289
+ scope,
290
+ pricing_info,
291
+ environment,
292
+ application_name,
293
+ metrics,
294
+ capture_message_content,
295
+ disable_metrics,
296
+ version,
297
+ args,
298
+ kwargs,
299
+ is_stream=False,
300
+ )
198
301
 
199
302
  return response
@@ -10,6 +10,7 @@ from openlit.instrumentation.vertexai.async_vertexai import async_send_message
10
10
 
11
11
  _instruments = ("google-cloud-aiplatform >= 1.38.1",)
12
12
 
13
+
13
14
  class VertexAIInstrumentor(BaseInstrumentor):
14
15
  """
15
16
  An instrumentor for VertexAI client library.
@@ -32,60 +33,124 @@ class VertexAIInstrumentor(BaseInstrumentor):
32
33
  wrap_function_wrapper(
33
34
  "vertexai.generative_models",
34
35
  "GenerativeModel.generate_content",
35
- send_message(version, environment, application_name,
36
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
36
+ send_message(
37
+ version,
38
+ environment,
39
+ application_name,
40
+ tracer,
41
+ pricing_info,
42
+ capture_message_content,
43
+ metrics,
44
+ disable_metrics,
45
+ ),
37
46
  )
38
47
 
39
48
  wrap_function_wrapper(
40
49
  "vertexai.generative_models",
41
50
  "ChatSession.send_message",
42
- send_message(version, environment, application_name,
43
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
51
+ send_message(
52
+ version,
53
+ environment,
54
+ application_name,
55
+ tracer,
56
+ pricing_info,
57
+ capture_message_content,
58
+ metrics,
59
+ disable_metrics,
60
+ ),
44
61
  )
45
62
 
46
63
  # sync language models
47
64
  wrap_function_wrapper(
48
65
  "vertexai.language_models",
49
66
  "ChatSession.send_message",
50
- send_message(version, environment, application_name,
51
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
67
+ send_message(
68
+ version,
69
+ environment,
70
+ application_name,
71
+ tracer,
72
+ pricing_info,
73
+ capture_message_content,
74
+ metrics,
75
+ disable_metrics,
76
+ ),
52
77
  )
53
78
 
54
79
  wrap_function_wrapper(
55
80
  "vertexai.language_models",
56
81
  "ChatSession.send_message_streaming",
57
- send_message(version, environment, application_name,
58
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
82
+ send_message(
83
+ version,
84
+ environment,
85
+ application_name,
86
+ tracer,
87
+ pricing_info,
88
+ capture_message_content,
89
+ metrics,
90
+ disable_metrics,
91
+ ),
59
92
  )
60
93
 
61
94
  # async generative models
62
95
  wrap_function_wrapper(
63
96
  "vertexai.generative_models",
64
97
  "GenerativeModel.generate_content_async",
65
- async_send_message(version, environment, application_name,
66
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
98
+ async_send_message(
99
+ version,
100
+ environment,
101
+ application_name,
102
+ tracer,
103
+ pricing_info,
104
+ capture_message_content,
105
+ metrics,
106
+ disable_metrics,
107
+ ),
67
108
  )
68
109
 
69
110
  wrap_function_wrapper(
70
111
  "vertexai.generative_models",
71
112
  "ChatSession.send_message_async",
72
- async_send_message(version, environment, application_name,
73
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
113
+ async_send_message(
114
+ version,
115
+ environment,
116
+ application_name,
117
+ tracer,
118
+ pricing_info,
119
+ capture_message_content,
120
+ metrics,
121
+ disable_metrics,
122
+ ),
74
123
  )
75
124
 
76
125
  # async language models
77
126
  wrap_function_wrapper(
78
127
  "vertexai.language_models",
79
128
  "ChatSession.send_message_async",
80
- async_send_message(version, environment, application_name,
81
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
129
+ async_send_message(
130
+ version,
131
+ environment,
132
+ application_name,
133
+ tracer,
134
+ pricing_info,
135
+ capture_message_content,
136
+ metrics,
137
+ disable_metrics,
138
+ ),
82
139
  )
83
140
 
84
141
  wrap_function_wrapper(
85
142
  "vertexai.language_models",
86
143
  "ChatSession.send_message_streaming_async",
87
- async_send_message(version, environment, application_name,
88
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
144
+ async_send_message(
145
+ version,
146
+ environment,
147
+ application_name,
148
+ tracer,
149
+ pricing_info,
150
+ capture_message_content,
151
+ metrics,
152
+ disable_metrics,
153
+ ),
89
154
  )
90
155
 
91
156
  def _uninstrument(self, **kwargs):
@@ -19,8 +19,17 @@ from openlit.semcov import SemanticConvention
19
19
  # Initialize logger for logging potential issues and operations
20
20
  logger = logging.getLogger(__name__)
21
21
 
22
- def async_send_message(version, environment, application_name, tracer,
23
- pricing_info, capture_message_content, metrics, disable_metrics):
22
+
23
+ def async_send_message(
24
+ version,
25
+ environment,
26
+ application_name,
27
+ tracer,
28
+ pricing_info,
29
+ capture_message_content,
30
+ metrics,
31
+ disable_metrics,
32
+ ):
24
33
  """
25
34
  Generates a telemetry wrapper for VertexAI AsyncMessages calls.
26
35
  """
@@ -31,16 +40,16 @@ def async_send_message(version, environment, application_name, tracer,
31
40
  """
32
41
 
33
42
  def __init__(
34
- self,
35
- wrapped,
36
- span,
37
- span_name,
38
- kwargs,
39
- server_address,
40
- server_port,
41
- request_model,
42
- args,
43
- ):
43
+ self,
44
+ wrapped,
45
+ span,
46
+ span_name,
47
+ kwargs,
48
+ server_address,
49
+ server_port,
50
+ request_model,
51
+ args,
52
+ ):
44
53
  self.__wrapped__ = wrapped
45
54
  self._span = span
46
55
  self._span_name = span_name
@@ -88,7 +97,7 @@ def async_send_message(version, environment, application_name, tracer,
88
97
  metrics=metrics,
89
98
  capture_message_content=capture_message_content,
90
99
  disable_metrics=disable_metrics,
91
- version=version
100
+ version=version,
92
101
  )
93
102
  except Exception as e:
94
103
  handle_exception(self._span, e)
@@ -108,7 +117,16 @@ def async_send_message(version, environment, application_name, tracer,
108
117
  awaited_wrapped = await wrapped(*args, **kwargs)
109
118
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
110
119
 
111
- return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port, request_model, args)
120
+ return TracedAsyncStream(
121
+ awaited_wrapped,
122
+ span,
123
+ span_name,
124
+ kwargs,
125
+ server_address,
126
+ server_port,
127
+ request_model,
128
+ args,
129
+ )
112
130
 
113
131
  else:
114
132
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
@@ -130,7 +148,7 @@ def async_send_message(version, environment, application_name, tracer,
130
148
  capture_message_content=capture_message_content,
131
149
  disable_metrics=disable_metrics,
132
150
  version=version,
133
- **kwargs
151
+ **kwargs,
134
152
  )
135
153
 
136
154
  except Exception as e: