openlit 1.34.30__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +40 -15
  53. openlit/instrumentation/crewai/async_crewai.py +32 -7
  54. openlit/instrumentation/crewai/crewai.py +32 -7
  55. openlit/instrumentation/crewai/utils.py +159 -56
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +304 -102
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +657 -185
  121. openlit/instrumentation/openai_agents/__init__.py +5 -1
  122. openlit/instrumentation/openai_agents/processor.py +110 -90
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +72 -6
  161. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.30.dist-info/RECORD +0 -168
  167. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -4,10 +4,7 @@ Module for monitoring OpenAI API calls.
4
4
 
5
5
  import time
6
6
  from opentelemetry.trace import SpanKind
7
- from openlit.__helpers import (
8
- handle_exception,
9
- set_server_address_and_port
10
- )
7
+ from openlit.__helpers import handle_exception, set_server_address_and_port
11
8
  from openlit.instrumentation.openai.utils import (
12
9
  process_chat_chunk,
13
10
  process_response_chunk,
@@ -21,8 +18,17 @@ from openlit.instrumentation.openai.utils import (
21
18
  )
22
19
  from openlit.semcov import SemanticConvention
23
20
 
24
- def chat_completions(version, environment, application_name, tracer, pricing_info,
25
- capture_message_content, metrics, disable_metrics):
21
+
22
+ def chat_completions(
23
+ version,
24
+ environment,
25
+ application_name,
26
+ tracer,
27
+ pricing_info,
28
+ capture_message_content,
29
+ metrics,
30
+ disable_metrics,
31
+ ):
26
32
  """
27
33
  Generates a telemetry wrapper for OpenAI chat completions.
28
34
  """
@@ -33,14 +39,14 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
33
39
  """
34
40
 
35
41
  def __init__(
36
- self,
37
- wrapped,
38
- span,
39
- span_name,
40
- kwargs,
41
- server_address,
42
- server_port,
43
- ):
42
+ self,
43
+ wrapped,
44
+ span,
45
+ span_name,
46
+ kwargs,
47
+ server_address,
48
+ server_port,
49
+ ):
44
50
  self.__wrapped__ = wrapped
45
51
  self._span = span
46
52
  self._span_name = span_name
@@ -90,7 +96,7 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
90
96
  metrics=metrics,
91
97
  capture_message_content=capture_message_content,
92
98
  disable_metrics=disable_metrics,
93
- version=version
99
+ version=version,
94
100
  )
95
101
  except Exception as e:
96
102
  handle_exception(self._span, e)
@@ -102,7 +108,9 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
102
108
  """
103
109
 
104
110
  streaming = kwargs.get("stream", False)
105
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
111
+ server_address, server_port = set_server_address_and_port(
112
+ instance, "api.openai.com", 443
113
+ )
106
114
  request_model = kwargs.get("model", "gpt-4o")
107
115
 
108
116
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -111,7 +119,9 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
111
119
  awaited_wrapped = wrapped(*args, **kwargs)
112
120
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
113
121
 
114
- return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
122
+ return TracedSyncStream(
123
+ awaited_wrapped, span, span_name, kwargs, server_address, server_port
124
+ )
115
125
 
116
126
  else:
117
127
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
@@ -121,9 +131,9 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
121
131
  try:
122
132
  response = process_chat_response(
123
133
  response=response,
124
- request_model=request_model,
134
+ request_model=request_model,
125
135
  pricing_info=pricing_info,
126
- server_port=server_port,
136
+ server_port=server_port,
127
137
  server_address=server_address,
128
138
  environment=environment,
129
139
  application_name=application_name,
@@ -133,7 +143,7 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
133
143
  capture_message_content=capture_message_content,
134
144
  disable_metrics=disable_metrics,
135
145
  version=version,
136
- **kwargs
146
+ **kwargs,
137
147
  )
138
148
 
139
149
  except Exception as e:
@@ -143,8 +153,18 @@ def chat_completions(version, environment, application_name, tracer, pricing_inf
143
153
 
144
154
  return wrapper
145
155
 
146
- def responses(version, environment, application_name, tracer, pricing_info,
147
- capture_message_content, metrics, disable_metrics, **kwargs):
156
+
157
+ def responses(
158
+ version,
159
+ environment,
160
+ application_name,
161
+ tracer,
162
+ pricing_info,
163
+ capture_message_content,
164
+ metrics,
165
+ disable_metrics,
166
+ **kwargs,
167
+ ):
148
168
  """
149
169
  Generates a telemetry wrapper for OpenAI responses API.
150
170
  """
@@ -155,14 +175,14 @@ def responses(version, environment, application_name, tracer, pricing_info,
155
175
  """
156
176
 
157
177
  def __init__(
158
- self,
159
- wrapped,
160
- span,
161
- span_name,
162
- kwargs,
163
- server_address,
164
- server_port,
165
- ):
178
+ self,
179
+ wrapped,
180
+ span,
181
+ span_name,
182
+ kwargs,
183
+ server_address,
184
+ server_port,
185
+ ):
166
186
  self.__wrapped__ = wrapped
167
187
  self._span = span
168
188
  self._span_name = span_name
@@ -216,7 +236,7 @@ def responses(version, environment, application_name, tracer, pricing_info,
216
236
  metrics=metrics,
217
237
  capture_message_content=capture_message_content,
218
238
  disable_metrics=disable_metrics,
219
- version=version
239
+ version=version,
220
240
  )
221
241
  except Exception as e:
222
242
  handle_exception(self._span, e)
@@ -228,7 +248,9 @@ def responses(version, environment, application_name, tracer, pricing_info,
228
248
  """
229
249
 
230
250
  streaming = kwargs.get("stream", False)
231
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
251
+ server_address, server_port = set_server_address_and_port(
252
+ instance, "api.openai.com", 443
253
+ )
232
254
  request_model = kwargs.get("model", "gpt-4o")
233
255
 
234
256
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -237,7 +259,9 @@ def responses(version, environment, application_name, tracer, pricing_info,
237
259
  awaited_wrapped = wrapped(*args, **kwargs)
238
260
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
239
261
 
240
- return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
262
+ return TracedSyncStream(
263
+ awaited_wrapped, span, span_name, kwargs, server_address, server_port
264
+ )
241
265
 
242
266
  else:
243
267
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
@@ -247,9 +271,9 @@ def responses(version, environment, application_name, tracer, pricing_info,
247
271
  try:
248
272
  response = process_response_response(
249
273
  response=response,
250
- request_model=request_model,
274
+ request_model=request_model,
251
275
  pricing_info=pricing_info,
252
- server_port=server_port,
276
+ server_port=server_port,
253
277
  server_address=server_address,
254
278
  environment=environment,
255
279
  application_name=application_name,
@@ -259,7 +283,7 @@ def responses(version, environment, application_name, tracer, pricing_info,
259
283
  capture_message_content=capture_message_content,
260
284
  disable_metrics=disable_metrics,
261
285
  version=version,
262
- **kwargs
286
+ **kwargs,
263
287
  )
264
288
 
265
289
  except Exception as e:
@@ -269,8 +293,17 @@ def responses(version, environment, application_name, tracer, pricing_info,
269
293
 
270
294
  return wrapper
271
295
 
272
- def chat_completions_parse(version, environment, application_name, tracer, pricing_info,
273
- capture_message_content, metrics, disable_metrics):
296
+
297
+ def chat_completions_parse(
298
+ version,
299
+ environment,
300
+ application_name,
301
+ tracer,
302
+ pricing_info,
303
+ capture_message_content,
304
+ metrics,
305
+ disable_metrics,
306
+ ):
274
307
  """
275
308
  Generates a telemetry wrapper for OpenAI chat completions parse.
276
309
  """
@@ -280,7 +313,9 @@ def chat_completions_parse(version, environment, application_name, tracer, prici
280
313
  Wraps the OpenAI chat completions parse call.
281
314
  """
282
315
 
283
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
316
+ server_address, server_port = set_server_address_and_port(
317
+ instance, "api.openai.com", 443
318
+ )
284
319
  request_model = kwargs.get("model", "gpt-4o")
285
320
 
286
321
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
@@ -292,9 +327,9 @@ def chat_completions_parse(version, environment, application_name, tracer, prici
292
327
  try:
293
328
  response = process_chat_response(
294
329
  response=response,
295
- request_model=request_model,
330
+ request_model=request_model,
296
331
  pricing_info=pricing_info,
297
- server_port=server_port,
332
+ server_port=server_port,
298
333
  server_address=server_address,
299
334
  environment=environment,
300
335
  application_name=application_name,
@@ -304,7 +339,7 @@ def chat_completions_parse(version, environment, application_name, tracer, prici
304
339
  capture_message_content=capture_message_content,
305
340
  disable_metrics=disable_metrics,
306
341
  version=version,
307
- **kwargs
342
+ **kwargs,
308
343
  )
309
344
 
310
345
  except Exception as e:
@@ -314,8 +349,18 @@ def chat_completions_parse(version, environment, application_name, tracer, prici
314
349
 
315
350
  return wrapper
316
351
 
317
- def embedding(version, environment, application_name, tracer, pricing_info,
318
- capture_message_content, metrics, disable_metrics, **kwargs):
352
+
353
+ def embedding(
354
+ version,
355
+ environment,
356
+ application_name,
357
+ tracer,
358
+ pricing_info,
359
+ capture_message_content,
360
+ metrics,
361
+ disable_metrics,
362
+ **kwargs,
363
+ ):
319
364
  """
320
365
  Generates a telemetry wrapper for OpenAI embeddings.
321
366
  """
@@ -325,10 +370,14 @@ def embedding(version, environment, application_name, tracer, pricing_info,
325
370
  Wraps the OpenAI embeddings call.
326
371
  """
327
372
 
328
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
373
+ server_address, server_port = set_server_address_and_port(
374
+ instance, "api.openai.com", 443
375
+ )
329
376
  request_model = kwargs.get("model", "text-embedding-ada-002")
330
377
 
331
- span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
378
+ span_name = (
379
+ f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
380
+ )
332
381
 
333
382
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
334
383
  start_time = time.time()
@@ -337,9 +386,9 @@ def embedding(version, environment, application_name, tracer, pricing_info,
337
386
  try:
338
387
  response = process_embedding_response(
339
388
  response=response,
340
- request_model=request_model,
389
+ request_model=request_model,
341
390
  pricing_info=pricing_info,
342
- server_port=server_port,
391
+ server_port=server_port,
343
392
  server_address=server_address,
344
393
  environment=environment,
345
394
  application_name=application_name,
@@ -349,7 +398,7 @@ def embedding(version, environment, application_name, tracer, pricing_info,
349
398
  capture_message_content=capture_message_content,
350
399
  disable_metrics=disable_metrics,
351
400
  version=version,
352
- **kwargs
401
+ **kwargs,
353
402
  )
354
403
 
355
404
  except Exception as e:
@@ -359,8 +408,18 @@ def embedding(version, environment, application_name, tracer, pricing_info,
359
408
 
360
409
  return wrapper
361
410
 
362
- def image_generate(version, environment, application_name, tracer, pricing_info,
363
- capture_message_content, metrics, disable_metrics, **kwargs):
411
+
412
+ def image_generate(
413
+ version,
414
+ environment,
415
+ application_name,
416
+ tracer,
417
+ pricing_info,
418
+ capture_message_content,
419
+ metrics,
420
+ disable_metrics,
421
+ **kwargs,
422
+ ):
364
423
  """
365
424
  Generates a telemetry wrapper for OpenAI image generation.
366
425
  """
@@ -370,7 +429,9 @@ def image_generate(version, environment, application_name, tracer, pricing_info,
370
429
  Wraps the OpenAI image generation call.
371
430
  """
372
431
 
373
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
432
+ server_address, server_port = set_server_address_and_port(
433
+ instance, "api.openai.com", 443
434
+ )
374
435
  request_model = kwargs.get("model", "dall-e-2")
375
436
 
376
437
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE} {request_model}"
@@ -383,9 +444,9 @@ def image_generate(version, environment, application_name, tracer, pricing_info,
383
444
  try:
384
445
  response = process_image_response(
385
446
  response=response,
386
- request_model=request_model,
447
+ request_model=request_model,
387
448
  pricing_info=pricing_info,
388
- server_port=server_port,
449
+ server_port=server_port,
389
450
  server_address=server_address,
390
451
  environment=environment,
391
452
  application_name=application_name,
@@ -396,7 +457,7 @@ def image_generate(version, environment, application_name, tracer, pricing_info,
396
457
  capture_message_content=capture_message_content,
397
458
  disable_metrics=disable_metrics,
398
459
  version=version,
399
- **kwargs
460
+ **kwargs,
400
461
  )
401
462
 
402
463
  except Exception as e:
@@ -406,8 +467,17 @@ def image_generate(version, environment, application_name, tracer, pricing_info,
406
467
 
407
468
  return wrapper
408
469
 
409
- def image_variatons(version, environment, application_name, tracer, pricing_info,
410
- capture_message_content, metrics, disable_metrics):
470
+
471
+ def image_variatons(
472
+ version,
473
+ environment,
474
+ application_name,
475
+ tracer,
476
+ pricing_info,
477
+ capture_message_content,
478
+ metrics,
479
+ disable_metrics,
480
+ ):
411
481
  """
412
482
  Generates a telemetry wrapper for OpenAI image variations.
413
483
  """
@@ -417,7 +487,9 @@ def image_variatons(version, environment, application_name, tracer, pricing_info
417
487
  Wraps the OpenAI image variations call.
418
488
  """
419
489
 
420
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
490
+ server_address, server_port = set_server_address_and_port(
491
+ instance, "api.openai.com", 443
492
+ )
421
493
  request_model = kwargs.get("model", "dall-e-2")
422
494
 
423
495
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE} {request_model}"
@@ -430,9 +502,9 @@ def image_variatons(version, environment, application_name, tracer, pricing_info
430
502
  try:
431
503
  response = process_image_response(
432
504
  response=response,
433
- request_model=request_model,
505
+ request_model=request_model,
434
506
  pricing_info=pricing_info,
435
- server_port=server_port,
507
+ server_port=server_port,
436
508
  server_address=server_address,
437
509
  environment=environment,
438
510
  application_name=application_name,
@@ -443,7 +515,7 @@ def image_variatons(version, environment, application_name, tracer, pricing_info
443
515
  capture_message_content=capture_message_content,
444
516
  disable_metrics=disable_metrics,
445
517
  version=version,
446
- **kwargs
518
+ **kwargs,
447
519
  )
448
520
 
449
521
  except Exception as e:
@@ -453,8 +525,17 @@ def image_variatons(version, environment, application_name, tracer, pricing_info
453
525
 
454
526
  return wrapper
455
527
 
456
- def audio_create(version, environment, application_name, tracer, pricing_info,
457
- capture_message_content, metrics, disable_metrics):
528
+
529
+ def audio_create(
530
+ version,
531
+ environment,
532
+ application_name,
533
+ tracer,
534
+ pricing_info,
535
+ capture_message_content,
536
+ metrics,
537
+ disable_metrics,
538
+ ):
458
539
  """
459
540
  Generates a telemetry wrapper for OpenAI audio creation.
460
541
  """
@@ -464,7 +545,9 @@ def audio_create(version, environment, application_name, tracer, pricing_info,
464
545
  Wraps the OpenAI audio creation call.
465
546
  """
466
547
 
467
- server_address, server_port = set_server_address_and_port(instance, "api.openai.com", 443)
548
+ server_address, server_port = set_server_address_and_port(
549
+ instance, "api.openai.com", 443
550
+ )
468
551
  request_model = kwargs.get("model", "tts-1")
469
552
 
470
553
  span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AUDIO} {request_model}"
@@ -477,9 +560,9 @@ def audio_create(version, environment, application_name, tracer, pricing_info,
477
560
  try:
478
561
  response = process_audio_response(
479
562
  response=response,
480
- request_model=request_model,
563
+ request_model=request_model,
481
564
  pricing_info=pricing_info,
482
- server_port=server_port,
565
+ server_port=server_port,
483
566
  server_address=server_address,
484
567
  environment=environment,
485
568
  application_name=application_name,
@@ -490,7 +573,7 @@ def audio_create(version, environment, application_name, tracer, pricing_info,
490
573
  capture_message_content=capture_message_content,
491
574
  disable_metrics=disable_metrics,
492
575
  version=version,
493
- **kwargs
576
+ **kwargs,
494
577
  )
495
578
 
496
579
  except Exception as e: