openlit 1.34.30__py3-none-any.whl → 1.34.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. openlit/__helpers.py +235 -86
  2. openlit/__init__.py +16 -13
  3. openlit/_instrumentors.py +2 -1
  4. openlit/evals/all.py +50 -21
  5. openlit/evals/bias_detection.py +47 -20
  6. openlit/evals/hallucination.py +53 -22
  7. openlit/evals/toxicity.py +50 -21
  8. openlit/evals/utils.py +54 -30
  9. openlit/guard/all.py +61 -19
  10. openlit/guard/prompt_injection.py +34 -14
  11. openlit/guard/restrict_topic.py +46 -15
  12. openlit/guard/sensitive_topic.py +34 -14
  13. openlit/guard/utils.py +58 -22
  14. openlit/instrumentation/ag2/__init__.py +24 -8
  15. openlit/instrumentation/ag2/ag2.py +34 -13
  16. openlit/instrumentation/ag2/async_ag2.py +34 -13
  17. openlit/instrumentation/ag2/utils.py +133 -30
  18. openlit/instrumentation/ai21/__init__.py +43 -14
  19. openlit/instrumentation/ai21/ai21.py +47 -21
  20. openlit/instrumentation/ai21/async_ai21.py +47 -21
  21. openlit/instrumentation/ai21/utils.py +299 -78
  22. openlit/instrumentation/anthropic/__init__.py +21 -4
  23. openlit/instrumentation/anthropic/anthropic.py +28 -17
  24. openlit/instrumentation/anthropic/async_anthropic.py +28 -17
  25. openlit/instrumentation/anthropic/utils.py +145 -35
  26. openlit/instrumentation/assemblyai/__init__.py +11 -2
  27. openlit/instrumentation/assemblyai/assemblyai.py +15 -4
  28. openlit/instrumentation/assemblyai/utils.py +120 -25
  29. openlit/instrumentation/astra/__init__.py +43 -10
  30. openlit/instrumentation/astra/astra.py +28 -5
  31. openlit/instrumentation/astra/async_astra.py +28 -5
  32. openlit/instrumentation/astra/utils.py +151 -55
  33. openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
  34. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
  35. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
  36. openlit/instrumentation/azure_ai_inference/utils.py +307 -83
  37. openlit/instrumentation/bedrock/__init__.py +21 -4
  38. openlit/instrumentation/bedrock/bedrock.py +63 -25
  39. openlit/instrumentation/bedrock/utils.py +139 -30
  40. openlit/instrumentation/chroma/__init__.py +89 -16
  41. openlit/instrumentation/chroma/chroma.py +28 -6
  42. openlit/instrumentation/chroma/utils.py +167 -51
  43. openlit/instrumentation/cohere/__init__.py +63 -18
  44. openlit/instrumentation/cohere/async_cohere.py +63 -24
  45. openlit/instrumentation/cohere/cohere.py +63 -24
  46. openlit/instrumentation/cohere/utils.py +286 -73
  47. openlit/instrumentation/controlflow/__init__.py +35 -9
  48. openlit/instrumentation/controlflow/controlflow.py +66 -33
  49. openlit/instrumentation/crawl4ai/__init__.py +25 -10
  50. openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
  51. openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
  52. openlit/instrumentation/crewai/__init__.py +40 -15
  53. openlit/instrumentation/crewai/async_crewai.py +32 -7
  54. openlit/instrumentation/crewai/crewai.py +32 -7
  55. openlit/instrumentation/crewai/utils.py +159 -56
  56. openlit/instrumentation/dynamiq/__init__.py +46 -12
  57. openlit/instrumentation/dynamiq/dynamiq.py +74 -33
  58. openlit/instrumentation/elevenlabs/__init__.py +23 -4
  59. openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
  60. openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
  61. openlit/instrumentation/elevenlabs/utils.py +128 -25
  62. openlit/instrumentation/embedchain/__init__.py +11 -2
  63. openlit/instrumentation/embedchain/embedchain.py +68 -35
  64. openlit/instrumentation/firecrawl/__init__.py +24 -7
  65. openlit/instrumentation/firecrawl/firecrawl.py +46 -20
  66. openlit/instrumentation/google_ai_studio/__init__.py +45 -10
  67. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
  68. openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
  69. openlit/instrumentation/google_ai_studio/utils.py +180 -67
  70. openlit/instrumentation/gpt4all/__init__.py +22 -7
  71. openlit/instrumentation/gpt4all/gpt4all.py +67 -29
  72. openlit/instrumentation/gpt4all/utils.py +285 -61
  73. openlit/instrumentation/gpu/__init__.py +128 -47
  74. openlit/instrumentation/groq/__init__.py +21 -4
  75. openlit/instrumentation/groq/async_groq.py +33 -21
  76. openlit/instrumentation/groq/groq.py +33 -21
  77. openlit/instrumentation/groq/utils.py +192 -55
  78. openlit/instrumentation/haystack/__init__.py +70 -24
  79. openlit/instrumentation/haystack/async_haystack.py +28 -6
  80. openlit/instrumentation/haystack/haystack.py +28 -6
  81. openlit/instrumentation/haystack/utils.py +196 -74
  82. openlit/instrumentation/julep/__init__.py +69 -19
  83. openlit/instrumentation/julep/async_julep.py +53 -27
  84. openlit/instrumentation/julep/julep.py +53 -28
  85. openlit/instrumentation/langchain/__init__.py +74 -63
  86. openlit/instrumentation/langchain/callback_handler.py +1100 -0
  87. openlit/instrumentation/langchain_community/__init__.py +13 -2
  88. openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
  89. openlit/instrumentation/langchain_community/langchain_community.py +23 -5
  90. openlit/instrumentation/langchain_community/utils.py +35 -9
  91. openlit/instrumentation/letta/__init__.py +68 -15
  92. openlit/instrumentation/letta/letta.py +99 -54
  93. openlit/instrumentation/litellm/__init__.py +43 -14
  94. openlit/instrumentation/litellm/async_litellm.py +51 -26
  95. openlit/instrumentation/litellm/litellm.py +51 -26
  96. openlit/instrumentation/litellm/utils.py +304 -102
  97. openlit/instrumentation/llamaindex/__init__.py +267 -90
  98. openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
  99. openlit/instrumentation/llamaindex/llamaindex.py +28 -6
  100. openlit/instrumentation/llamaindex/utils.py +204 -91
  101. openlit/instrumentation/mem0/__init__.py +11 -2
  102. openlit/instrumentation/mem0/mem0.py +50 -29
  103. openlit/instrumentation/milvus/__init__.py +10 -2
  104. openlit/instrumentation/milvus/milvus.py +31 -6
  105. openlit/instrumentation/milvus/utils.py +166 -67
  106. openlit/instrumentation/mistral/__init__.py +63 -18
  107. openlit/instrumentation/mistral/async_mistral.py +63 -24
  108. openlit/instrumentation/mistral/mistral.py +63 -24
  109. openlit/instrumentation/mistral/utils.py +277 -69
  110. openlit/instrumentation/multion/__init__.py +69 -19
  111. openlit/instrumentation/multion/async_multion.py +57 -26
  112. openlit/instrumentation/multion/multion.py +57 -26
  113. openlit/instrumentation/ollama/__init__.py +39 -18
  114. openlit/instrumentation/ollama/async_ollama.py +57 -26
  115. openlit/instrumentation/ollama/ollama.py +57 -26
  116. openlit/instrumentation/ollama/utils.py +226 -50
  117. openlit/instrumentation/openai/__init__.py +156 -32
  118. openlit/instrumentation/openai/async_openai.py +147 -67
  119. openlit/instrumentation/openai/openai.py +150 -67
  120. openlit/instrumentation/openai/utils.py +657 -185
  121. openlit/instrumentation/openai_agents/__init__.py +5 -1
  122. openlit/instrumentation/openai_agents/processor.py +110 -90
  123. openlit/instrumentation/phidata/__init__.py +13 -5
  124. openlit/instrumentation/phidata/phidata.py +67 -32
  125. openlit/instrumentation/pinecone/__init__.py +48 -9
  126. openlit/instrumentation/pinecone/async_pinecone.py +27 -5
  127. openlit/instrumentation/pinecone/pinecone.py +27 -5
  128. openlit/instrumentation/pinecone/utils.py +153 -47
  129. openlit/instrumentation/premai/__init__.py +22 -7
  130. openlit/instrumentation/premai/premai.py +51 -26
  131. openlit/instrumentation/premai/utils.py +246 -59
  132. openlit/instrumentation/pydantic_ai/__init__.py +49 -22
  133. openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
  134. openlit/instrumentation/pydantic_ai/utils.py +89 -24
  135. openlit/instrumentation/qdrant/__init__.py +19 -4
  136. openlit/instrumentation/qdrant/async_qdrant.py +33 -7
  137. openlit/instrumentation/qdrant/qdrant.py +33 -7
  138. openlit/instrumentation/qdrant/utils.py +228 -93
  139. openlit/instrumentation/reka/__init__.py +23 -10
  140. openlit/instrumentation/reka/async_reka.py +17 -11
  141. openlit/instrumentation/reka/reka.py +17 -11
  142. openlit/instrumentation/reka/utils.py +138 -36
  143. openlit/instrumentation/together/__init__.py +44 -12
  144. openlit/instrumentation/together/async_together.py +50 -27
  145. openlit/instrumentation/together/together.py +50 -27
  146. openlit/instrumentation/together/utils.py +301 -71
  147. openlit/instrumentation/transformers/__init__.py +2 -1
  148. openlit/instrumentation/transformers/transformers.py +13 -3
  149. openlit/instrumentation/transformers/utils.py +139 -36
  150. openlit/instrumentation/vertexai/__init__.py +81 -16
  151. openlit/instrumentation/vertexai/async_vertexai.py +33 -15
  152. openlit/instrumentation/vertexai/utils.py +123 -27
  153. openlit/instrumentation/vertexai/vertexai.py +33 -15
  154. openlit/instrumentation/vllm/__init__.py +12 -5
  155. openlit/instrumentation/vllm/utils.py +121 -31
  156. openlit/instrumentation/vllm/vllm.py +16 -10
  157. openlit/otel/events.py +35 -10
  158. openlit/otel/metrics.py +32 -24
  159. openlit/otel/tracing.py +24 -9
  160. openlit/semcov/__init__.py +72 -6
  161. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/METADATA +2 -1
  162. openlit-1.34.31.dist-info/RECORD +166 -0
  163. openlit/instrumentation/langchain/async_langchain.py +0 -102
  164. openlit/instrumentation/langchain/langchain.py +0 -102
  165. openlit/instrumentation/langchain/utils.py +0 -252
  166. openlit-1.34.30.dist-info/RECORD +0 -168
  167. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/LICENSE +0 -0
  168. {openlit-1.34.30.dist-info → openlit-1.34.31.dist-info}/WHEEL +0 -0
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Haystack utilities
3
3
  """
4
+
4
5
  import time
5
6
  import json
6
7
  from typing import Dict, Any
@@ -19,10 +20,12 @@ OPERATION_MAP = {
19
20
  "document_embedder": SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING,
20
21
  }
21
22
 
23
+
22
24
  def set_server_address_and_port(instance):
23
25
  """Fast server address extraction"""
24
26
  return "localhost", 8080
25
27
 
28
+
26
29
  def object_count(obj):
27
30
  """Fast object counting"""
28
31
  try:
@@ -30,7 +33,10 @@ def object_count(obj):
30
33
  except:
31
34
  return 1
32
35
 
33
- def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dict[str, Any]:
36
+
37
+ def extract_component_technical_details(
38
+ instance, args, kwargs, endpoint
39
+ ) -> Dict[str, Any]:
34
40
  """Extract comprehensive component technical details with performance optimization"""
35
41
  details = {}
36
42
 
@@ -41,7 +47,9 @@ def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dic
41
47
  details["module_name"] = instance.__class__.__module__
42
48
 
43
49
  # Component input type extraction (optimized)
44
- if hasattr(instance, "_component_config") and hasattr(instance._component_config, "input_types"):
50
+ if hasattr(instance, "_component_config") and hasattr(
51
+ instance._component_config, "input_types"
52
+ ):
45
53
  input_types = {}
46
54
  for name, type_info in instance._component_config.input_types.items():
47
55
  input_types[name] = str(type_info) if type_info else "Any"
@@ -53,7 +61,9 @@ def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dic
53
61
  details["input_types"] = input_types
54
62
 
55
63
  # Component output type extraction (optimized)
56
- if hasattr(instance, "_component_config") and hasattr(instance._component_config, "output_types"):
64
+ if hasattr(instance, "_component_config") and hasattr(
65
+ instance._component_config, "output_types"
66
+ ):
57
67
  output_types = {}
58
68
  for name, type_info in instance._component_config.output_types.items():
59
69
  output_types[name] = str(type_info) if type_info else "Any"
@@ -70,7 +80,7 @@ def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dic
70
80
  spec_info = {
71
81
  "type": str(getattr(socket, "type", "Any")),
72
82
  "default_value": str(getattr(socket, "default_value", None)),
73
- "is_optional": getattr(socket, "is_optional", False)
83
+ "is_optional": getattr(socket, "is_optional", False),
74
84
  }
75
85
  input_spec[socket_name] = spec_info
76
86
  details["input_spec"] = input_spec
@@ -81,7 +91,7 @@ def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dic
81
91
  for socket_name, socket in config.output_sockets.items():
82
92
  spec_info = {
83
93
  "type": str(getattr(socket, "type", "Any")),
84
- "is_list": getattr(socket, "is_list", False)
94
+ "is_list": getattr(socket, "is_list", False),
85
95
  }
86
96
  output_spec[socket_name] = spec_info
87
97
  details["output_spec"] = output_spec
@@ -103,6 +113,7 @@ def extract_component_technical_details(instance, args, kwargs, endpoint) -> Dic
103
113
 
104
114
  return details
105
115
 
116
+
106
117
  def extract_pipeline_metadata(instance, args, kwargs) -> Dict[str, Any]:
107
118
  """Extract pipeline-level metadata and configuration"""
108
119
  metadata = {}
@@ -126,7 +137,7 @@ def extract_pipeline_metadata(instance, args, kwargs) -> Dict[str, Any]:
126
137
  connection_info = {
127
138
  "source": source,
128
139
  "target": target,
129
- "data": str(data) if data else None
140
+ "data": str(data) if data else None,
130
141
  }
131
142
  connections.append(connection_info)
132
143
  metadata["connections"] = connections
@@ -134,10 +145,14 @@ def extract_pipeline_metadata(instance, args, kwargs) -> Dict[str, Any]:
134
145
  # Component list with types
135
146
  components = []
136
147
  for node in graph.nodes():
137
- node_data = graph.nodes[node] if hasattr(graph.nodes[node], "get") else {}
148
+ node_data = (
149
+ graph.nodes[node] if hasattr(graph.nodes[node], "get") else {}
150
+ )
138
151
  component_info = {
139
152
  "name": node,
140
- "type": str(type(node_data.get("instance", ""))) if node_data.get("instance") else "unknown"
153
+ "type": str(type(node_data.get("instance", "")))
154
+ if node_data.get("instance")
155
+ else "unknown",
141
156
  }
142
157
  components.append(component_info)
143
158
  metadata["components"] = components
@@ -157,7 +172,9 @@ def extract_pipeline_metadata(instance, args, kwargs) -> Dict[str, Any]:
157
172
  if isinstance(value, (str, int, float, bool)):
158
173
  sanitized_input[key] = value
159
174
  elif isinstance(value, dict):
160
- sanitized_input[key] = {k: str(v)[:100] for k, v in value.items()}
175
+ sanitized_input[key] = {
176
+ k: str(v)[:100] for k, v in value.items()
177
+ }
161
178
  else:
162
179
  sanitized_input[key] = str(type(value)).__name__
163
180
  metadata["input_data"] = sanitized_input
@@ -168,33 +185,38 @@ def extract_pipeline_metadata(instance, args, kwargs) -> Dict[str, Any]:
168
185
 
169
186
  return metadata
170
187
 
188
+
171
189
  def extract_component_connections(instance) -> Dict[str, Any]:
172
190
  """Extract component connection and data flow information"""
173
191
  connections = {}
174
192
 
175
193
  try:
176
194
  # Extract senders (components that send data to this component)
177
- if hasattr(instance, "_component_config") and hasattr(instance._component_config, "input_sockets"):
195
+ if hasattr(instance, "_component_config") and hasattr(
196
+ instance._component_config, "input_sockets"
197
+ ):
178
198
  senders = []
179
199
  for socket_name, socket in instance._component_config.input_sockets.items():
180
200
  if hasattr(socket, "_senders") and socket._senders:
181
201
  for sender in socket._senders:
182
- sender_info = {
183
- "component": str(sender),
184
- "socket": socket_name
185
- }
202
+ sender_info = {"component": str(sender), "socket": socket_name}
186
203
  senders.append(sender_info)
187
204
  connections["senders"] = senders
188
205
 
189
206
  # Extract receivers (components that receive data from this component)
190
- if hasattr(instance, "_component_config") and hasattr(instance._component_config, "output_sockets"):
207
+ if hasattr(instance, "_component_config") and hasattr(
208
+ instance._component_config, "output_sockets"
209
+ ):
191
210
  receivers = []
192
- for socket_name, socket in instance._component_config.output_sockets.items():
211
+ for (
212
+ socket_name,
213
+ socket,
214
+ ) in instance._component_config.output_sockets.items():
193
215
  if hasattr(socket, "_receivers") and socket._receivers:
194
216
  for receiver in socket._receivers:
195
217
  receiver_info = {
196
218
  "component": str(receiver),
197
- "socket": socket_name
219
+ "socket": socket_name,
198
220
  }
199
221
  receivers.append(receiver_info)
200
222
  connections["receivers"] = receivers
@@ -205,26 +227,50 @@ def extract_component_connections(instance) -> Dict[str, Any]:
205
227
 
206
228
  return connections
207
229
 
208
- def process_haystack_response(response, operation_type, server_address, server_port,
209
- environment, application_name, metrics, start_time, span,
210
- capture_message_content, disable_metrics, version, instance=None,
211
- args=None, endpoint=None, **kwargs):
230
+
231
+ def process_haystack_response(
232
+ response,
233
+ operation_type,
234
+ server_address,
235
+ server_port,
236
+ environment,
237
+ application_name,
238
+ metrics,
239
+ start_time,
240
+ span,
241
+ capture_message_content,
242
+ disable_metrics,
243
+ version,
244
+ instance=None,
245
+ args=None,
246
+ endpoint=None,
247
+ **kwargs,
248
+ ):
212
249
  """Enhanced response processing with comprehensive technical details and optimized performance"""
213
250
 
214
251
  end_time = time.time()
215
252
 
216
253
  # Essential attributes
217
254
  common_framework_span_attributes(
218
- type("Scope", (), {
219
- "_span": span,
220
- "_server_address": server_address,
221
- "_server_port": server_port,
222
- "_start_time": start_time,
223
- "_end_time": end_time
224
- })(),
255
+ type(
256
+ "Scope",
257
+ (),
258
+ {
259
+ "_span": span,
260
+ "_server_address": server_address,
261
+ "_server_port": server_port,
262
+ "_start_time": start_time,
263
+ "_end_time": end_time,
264
+ },
265
+ )(),
225
266
  SemanticConvention.GEN_AI_SYSTEM_HAYSTACK,
226
- server_address, server_port, environment, application_name,
227
- version, endpoint, instance
267
+ server_address,
268
+ server_port,
269
+ environment,
270
+ application_name,
271
+ version,
272
+ endpoint,
273
+ instance,
228
274
  )
229
275
 
230
276
  # Core operation attributes
@@ -233,41 +279,60 @@ def process_haystack_response(response, operation_type, server_address, server_p
233
279
  # Enhanced technical details collection
234
280
  if instance:
235
281
  # Extract comprehensive component technical details
236
- tech_details = extract_component_technical_details(instance, args, kwargs, endpoint)
282
+ tech_details = extract_component_technical_details(
283
+ instance, args, kwargs, endpoint
284
+ )
237
285
 
238
286
  # Apply component technical attributes using new semantic conventions
239
287
  if tech_details.get("class_name"):
240
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_CLASS_NAME, tech_details["class_name"])
288
+ span.set_attribute(
289
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_CLASS_NAME,
290
+ tech_details["class_name"],
291
+ )
241
292
 
242
293
  if tech_details.get("input_types"):
243
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_INPUT_TYPES,
244
- json.dumps(tech_details["input_types"]))
294
+ span.set_attribute(
295
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_INPUT_TYPES,
296
+ json.dumps(tech_details["input_types"]),
297
+ )
245
298
 
246
299
  if tech_details.get("output_types"):
247
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_OUTPUT_TYPES,
248
- json.dumps(tech_details["output_types"]))
300
+ span.set_attribute(
301
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_OUTPUT_TYPES,
302
+ json.dumps(tech_details["output_types"]),
303
+ )
249
304
 
250
305
  if tech_details.get("input_spec"):
251
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_INPUT_SPEC,
252
- json.dumps(tech_details["input_spec"]))
306
+ span.set_attribute(
307
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_INPUT_SPEC,
308
+ json.dumps(tech_details["input_spec"]),
309
+ )
253
310
 
254
311
  if tech_details.get("output_spec"):
255
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_OUTPUT_SPEC,
256
- json.dumps(tech_details["output_spec"]))
312
+ span.set_attribute(
313
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_OUTPUT_SPEC,
314
+ json.dumps(tech_details["output_spec"]),
315
+ )
257
316
 
258
317
  # Component connections and data flow
259
318
  connections = extract_component_connections(instance)
260
319
  if connections.get("senders"):
261
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_SENDERS,
262
- json.dumps(connections["senders"]))
320
+ span.set_attribute(
321
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_SENDERS,
322
+ json.dumps(connections["senders"]),
323
+ )
263
324
 
264
325
  if connections.get("receivers"):
265
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_RECEIVERS,
266
- json.dumps(connections["receivers"]))
326
+ span.set_attribute(
327
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_RECEIVERS,
328
+ json.dumps(connections["receivers"]),
329
+ )
267
330
 
268
331
  # Enhanced telemetry - pipeline level
269
332
  if endpoint == "pipeline" and isinstance(response, dict):
270
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT, len(response))
333
+ span.set_attribute(
334
+ SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT, len(response)
335
+ )
271
336
 
272
337
  # Enhanced pipeline metadata collection
273
338
  if instance:
@@ -275,24 +340,34 @@ def process_haystack_response(response, operation_type, server_address, server_p
275
340
 
276
341
  # Apply pipeline metadata using new semantic conventions
277
342
  if pipeline_metadata.get("component_count"):
278
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_COMPONENT_COUNT,
279
- pipeline_metadata["component_count"])
343
+ span.set_attribute(
344
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_COMPONENT_COUNT,
345
+ pipeline_metadata["component_count"],
346
+ )
280
347
 
281
348
  if pipeline_metadata.get("max_runs_per_component"):
282
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_MAX_RUNS,
283
- pipeline_metadata["max_runs_per_component"])
349
+ span.set_attribute(
350
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_MAX_RUNS,
351
+ pipeline_metadata["max_runs_per_component"],
352
+ )
284
353
 
285
354
  if pipeline_metadata.get("connections"):
286
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_CONNECTIONS,
287
- json.dumps(pipeline_metadata["connections"]))
355
+ span.set_attribute(
356
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_CONNECTIONS,
357
+ json.dumps(pipeline_metadata["connections"]),
358
+ )
288
359
 
289
360
  if pipeline_metadata.get("components"):
290
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_METADATA,
291
- json.dumps(pipeline_metadata["components"]))
361
+ span.set_attribute(
362
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_METADATA,
363
+ json.dumps(pipeline_metadata["components"]),
364
+ )
292
365
 
293
366
  if pipeline_metadata.get("input_data"):
294
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_INPUT_DATA,
295
- json.dumps(pipeline_metadata["input_data"]))
367
+ span.set_attribute(
368
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_INPUT_DATA,
369
+ json.dumps(pipeline_metadata["input_data"]),
370
+ )
296
371
 
297
372
  # Pipeline output data
298
373
  if response:
@@ -305,53 +380,87 @@ def process_haystack_response(response, operation_type, server_address, server_p
305
380
  sanitized_output[key] = f"{len(value['replies'])} replies"
306
381
  else:
307
382
  sanitized_output[key] = str(type(value)).__name__
308
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_OUTPUT_DATA,
309
- json.dumps(sanitized_output))
383
+ span.set_attribute(
384
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_OUTPUT_DATA,
385
+ json.dumps(sanitized_output),
386
+ )
310
387
 
311
388
  # Fast LLM response extraction
312
389
  for key, value in response.items():
313
- if key in ["llm", "generator"] and isinstance(value, dict) and "replies" in value:
390
+ if (
391
+ key in ["llm", "generator"]
392
+ and isinstance(value, dict)
393
+ and "replies" in value
394
+ ):
314
395
  replies = value["replies"]
315
396
  if replies and capture_message_content:
316
- span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(replies[0])[:500])
397
+ span.set_attribute(
398
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION,
399
+ str(replies[0])[:500],
400
+ )
317
401
  break
318
402
 
319
403
  # Enhanced telemetry - retriever level
320
- elif "retriever" in endpoint and isinstance(response, dict) and "documents" in response:
404
+ elif (
405
+ "retriever" in endpoint
406
+ and isinstance(response, dict)
407
+ and "documents" in response
408
+ ):
321
409
  docs = response["documents"]
322
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_RETRIEVAL_COUNT, object_count(docs))
323
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_DOCUMENTS_COUNT, object_count(docs))
410
+ span.set_attribute(
411
+ SemanticConvention.GEN_AI_FRAMEWORK_RETRIEVAL_COUNT, object_count(docs)
412
+ )
413
+ span.set_attribute(
414
+ SemanticConvention.GEN_AI_FRAMEWORK_DOCUMENTS_COUNT, object_count(docs)
415
+ )
324
416
 
325
417
  # Component identification
326
418
  if instance:
327
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint)
328
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "retriever")
419
+ span.set_attribute(
420
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint
421
+ )
422
+ span.set_attribute(
423
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "retriever"
424
+ )
329
425
 
330
426
  # Enhanced telemetry - generator level
331
427
  elif "generator" in endpoint:
332
428
  # Component identification
333
429
  if instance:
334
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint)
335
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "generator")
430
+ span.set_attribute(
431
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint
432
+ )
433
+ span.set_attribute(
434
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "generator"
435
+ )
336
436
 
337
437
  if args and capture_message_content:
338
438
  span.set_attribute(SemanticConvention.GEN_AI_PROMPT, str(args[0])[:500])
339
439
 
340
440
  if isinstance(response, dict) and "replies" in response:
341
441
  replies = response["replies"]
342
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT, object_count(replies))
442
+ span.set_attribute(
443
+ SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT, object_count(replies)
444
+ )
343
445
 
344
446
  # Enhanced telemetry - prompt builder level
345
447
  elif endpoint == "prompt_builder":
346
448
  # Component identification
347
449
  if instance:
348
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint)
349
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "prompt_builder")
450
+ span.set_attribute(
451
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_NAME, endpoint
452
+ )
453
+ span.set_attribute(
454
+ SemanticConvention.GEN_AI_FRAMEWORK_COMPONENT_TYPE, "prompt_builder"
455
+ )
350
456
 
351
457
  if kwargs and capture_message_content:
352
458
  for key, value in kwargs.items():
353
459
  if key in ["documents", "question"] and value:
354
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT, object_count([value]))
460
+ span.set_attribute(
461
+ SemanticConvention.GEN_AI_FRAMEWORK_CONTEXT_COUNT,
462
+ object_count([value]),
463
+ )
355
464
  break
356
465
 
357
466
  # Component visit tracking (simulate component execution count)
@@ -361,17 +470,30 @@ def process_haystack_response(response, operation_type, server_address, server_p
361
470
 
362
471
  # Duration and status
363
472
  execution_time = end_time - start_time
364
- span.set_attribute(SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, execution_time)
473
+ span.set_attribute(
474
+ SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, execution_time
475
+ )
365
476
 
366
477
  # Pipeline execution time tracking
367
478
  if endpoint == "pipeline":
368
- span.set_attribute(SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_EXECUTION_TIME, execution_time)
479
+ span.set_attribute(
480
+ SemanticConvention.GEN_AI_FRAMEWORK_PIPELINE_EXECUTION_TIME, execution_time
481
+ )
369
482
 
370
483
  span.set_status(Status(StatusCode.OK))
371
484
 
372
485
  # Metrics
373
486
  if not disable_metrics:
374
- record_framework_metrics(metrics, operation_type, SemanticConvention.GEN_AI_SYSTEM_HAYSTACK,
375
- server_address, server_port, environment, application_name, start_time, end_time)
487
+ record_framework_metrics(
488
+ metrics,
489
+ operation_type,
490
+ SemanticConvention.GEN_AI_SYSTEM_HAYSTACK,
491
+ server_address,
492
+ server_port,
493
+ environment,
494
+ application_name,
495
+ start_time,
496
+ end_time,
497
+ )
376
498
 
377
499
  return response
@@ -6,16 +6,13 @@ import importlib.metadata
6
6
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from wrapt import wrap_function_wrapper
8
8
 
9
- from openlit.instrumentation.julep.julep import (
10
- wrap_julep
11
- )
9
+ from openlit.instrumentation.julep.julep import wrap_julep
12
10
 
13
- from openlit.instrumentation.julep.async_julep import (
14
- async_wrap_julep
15
- )
11
+ from openlit.instrumentation.julep.async_julep import async_wrap_julep
16
12
 
17
13
  _instruments = ("julep >= 1.42.0",)
18
14
 
15
+
19
16
  class JulepInstrumentor(BaseInstrumentor):
20
17
  """
21
18
  An instrumentor for Julep's client library.
@@ -38,43 +35,96 @@ class JulepInstrumentor(BaseInstrumentor):
38
35
  wrap_function_wrapper(
39
36
  "julep.resources.agents.agents",
40
37
  "AgentsResource.create",
41
- wrap_julep("julep.agents_create", version, environment, application_name,
42
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
38
+ wrap_julep(
39
+ "julep.agents_create",
40
+ version,
41
+ environment,
42
+ application_name,
43
+ tracer,
44
+ pricing_info,
45
+ capture_message_content,
46
+ metrics,
47
+ disable_metrics,
48
+ ),
43
49
  )
44
50
  wrap_function_wrapper(
45
51
  "julep.resources.tasks",
46
52
  "TasksResource.create",
47
- wrap_julep("julep.task_create", version, environment, application_name,
48
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
53
+ wrap_julep(
54
+ "julep.task_create",
55
+ version,
56
+ environment,
57
+ application_name,
58
+ tracer,
59
+ pricing_info,
60
+ capture_message_content,
61
+ metrics,
62
+ disable_metrics,
63
+ ),
49
64
  )
50
65
  wrap_function_wrapper(
51
66
  "julep.resources.executions.executions",
52
67
  "ExecutionsResource.create",
53
- wrap_julep("julep.execution_create", version, environment, application_name,
54
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
68
+ wrap_julep(
69
+ "julep.execution_create",
70
+ version,
71
+ environment,
72
+ application_name,
73
+ tracer,
74
+ pricing_info,
75
+ capture_message_content,
76
+ metrics,
77
+ disable_metrics,
78
+ ),
55
79
  )
56
80
 
57
81
  # async
58
82
  wrap_function_wrapper(
59
83
  "julep.resources.agents.agents",
60
84
  "AsyncAgentsResource.create",
61
- async_wrap_julep("julep.agents_create", version, environment, application_name,
62
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
85
+ async_wrap_julep(
86
+ "julep.agents_create",
87
+ version,
88
+ environment,
89
+ application_name,
90
+ tracer,
91
+ pricing_info,
92
+ capture_message_content,
93
+ metrics,
94
+ disable_metrics,
95
+ ),
63
96
  )
64
97
  wrap_function_wrapper(
65
98
  "julep.resources.tasks",
66
99
  "AsyncTasksResource.create",
67
- async_wrap_julep("julep.task_create", version, environment, application_name,
68
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
100
+ async_wrap_julep(
101
+ "julep.task_create",
102
+ version,
103
+ environment,
104
+ application_name,
105
+ tracer,
106
+ pricing_info,
107
+ capture_message_content,
108
+ metrics,
109
+ disable_metrics,
110
+ ),
69
111
  )
70
112
  wrap_function_wrapper(
71
113
  "julep.resources.executions.executions",
72
114
  "AsyncExecutionsResource.create",
73
- async_wrap_julep("julep.execution_create", version, environment, application_name,
74
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
115
+ async_wrap_julep(
116
+ "julep.execution_create",
117
+ version,
118
+ environment,
119
+ application_name,
120
+ tracer,
121
+ pricing_info,
122
+ capture_message_content,
123
+ metrics,
124
+ disable_metrics,
125
+ ),
75
126
  )
76
127
 
77
-
78
128
  def _uninstrument(self, **kwargs):
79
129
  # Proper uninstrumentation logic to revert patched methods
80
130
  pass