openlit 1.34.30__py3-none-any.whl → 1.34.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +235 -86
- openlit/__init__.py +19 -14
- openlit/_instrumentors.py +2 -1
- openlit/evals/all.py +50 -21
- openlit/evals/bias_detection.py +47 -20
- openlit/evals/hallucination.py +53 -22
- openlit/evals/toxicity.py +50 -21
- openlit/evals/utils.py +54 -30
- openlit/guard/all.py +61 -19
- openlit/guard/prompt_injection.py +34 -14
- openlit/guard/restrict_topic.py +46 -15
- openlit/guard/sensitive_topic.py +34 -14
- openlit/guard/utils.py +58 -22
- openlit/instrumentation/ag2/__init__.py +113 -6
- openlit/instrumentation/ag2/ag2.py +459 -17
- openlit/instrumentation/ag2/async_ag2.py +459 -17
- openlit/instrumentation/ag2/utils.py +475 -31
- openlit/instrumentation/ai21/__init__.py +43 -14
- openlit/instrumentation/ai21/ai21.py +47 -21
- openlit/instrumentation/ai21/async_ai21.py +47 -21
- openlit/instrumentation/ai21/utils.py +299 -78
- openlit/instrumentation/anthropic/__init__.py +21 -4
- openlit/instrumentation/anthropic/anthropic.py +28 -17
- openlit/instrumentation/anthropic/async_anthropic.py +28 -17
- openlit/instrumentation/anthropic/utils.py +145 -35
- openlit/instrumentation/assemblyai/__init__.py +11 -2
- openlit/instrumentation/assemblyai/assemblyai.py +15 -4
- openlit/instrumentation/assemblyai/utils.py +120 -25
- openlit/instrumentation/astra/__init__.py +43 -10
- openlit/instrumentation/astra/astra.py +28 -5
- openlit/instrumentation/astra/async_astra.py +28 -5
- openlit/instrumentation/astra/utils.py +151 -55
- openlit/instrumentation/azure_ai_inference/__init__.py +43 -10
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +53 -21
- openlit/instrumentation/azure_ai_inference/utils.py +307 -83
- openlit/instrumentation/bedrock/__init__.py +21 -4
- openlit/instrumentation/bedrock/bedrock.py +63 -25
- openlit/instrumentation/bedrock/utils.py +139 -30
- openlit/instrumentation/chroma/__init__.py +89 -16
- openlit/instrumentation/chroma/chroma.py +28 -6
- openlit/instrumentation/chroma/utils.py +167 -51
- openlit/instrumentation/cohere/__init__.py +63 -18
- openlit/instrumentation/cohere/async_cohere.py +63 -24
- openlit/instrumentation/cohere/cohere.py +63 -24
- openlit/instrumentation/cohere/utils.py +286 -73
- openlit/instrumentation/controlflow/__init__.py +35 -9
- openlit/instrumentation/controlflow/controlflow.py +66 -33
- openlit/instrumentation/crawl4ai/__init__.py +25 -10
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +78 -31
- openlit/instrumentation/crawl4ai/crawl4ai.py +78 -31
- openlit/instrumentation/crewai/__init__.py +40 -15
- openlit/instrumentation/crewai/async_crewai.py +32 -7
- openlit/instrumentation/crewai/crewai.py +32 -7
- openlit/instrumentation/crewai/utils.py +159 -56
- openlit/instrumentation/dynamiq/__init__.py +46 -12
- openlit/instrumentation/dynamiq/dynamiq.py +74 -33
- openlit/instrumentation/elevenlabs/__init__.py +23 -4
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/elevenlabs.py +16 -4
- openlit/instrumentation/elevenlabs/utils.py +128 -25
- openlit/instrumentation/embedchain/__init__.py +11 -2
- openlit/instrumentation/embedchain/embedchain.py +68 -35
- openlit/instrumentation/firecrawl/__init__.py +24 -7
- openlit/instrumentation/firecrawl/firecrawl.py +46 -20
- openlit/instrumentation/google_ai_studio/__init__.py +45 -10
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +67 -44
- openlit/instrumentation/google_ai_studio/utils.py +180 -67
- openlit/instrumentation/gpt4all/__init__.py +22 -7
- openlit/instrumentation/gpt4all/gpt4all.py +67 -29
- openlit/instrumentation/gpt4all/utils.py +285 -61
- openlit/instrumentation/gpu/__init__.py +128 -47
- openlit/instrumentation/groq/__init__.py +21 -4
- openlit/instrumentation/groq/async_groq.py +33 -21
- openlit/instrumentation/groq/groq.py +33 -21
- openlit/instrumentation/groq/utils.py +192 -55
- openlit/instrumentation/haystack/__init__.py +70 -24
- openlit/instrumentation/haystack/async_haystack.py +28 -6
- openlit/instrumentation/haystack/haystack.py +28 -6
- openlit/instrumentation/haystack/utils.py +196 -74
- openlit/instrumentation/julep/__init__.py +69 -19
- openlit/instrumentation/julep/async_julep.py +53 -27
- openlit/instrumentation/julep/julep.py +53 -28
- openlit/instrumentation/langchain/__init__.py +74 -63
- openlit/instrumentation/langchain/callback_handler.py +1100 -0
- openlit/instrumentation/langchain_community/__init__.py +13 -2
- openlit/instrumentation/langchain_community/async_langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/langchain_community.py +23 -5
- openlit/instrumentation/langchain_community/utils.py +35 -9
- openlit/instrumentation/letta/__init__.py +68 -15
- openlit/instrumentation/letta/letta.py +99 -54
- openlit/instrumentation/litellm/__init__.py +43 -14
- openlit/instrumentation/litellm/async_litellm.py +51 -26
- openlit/instrumentation/litellm/litellm.py +51 -26
- openlit/instrumentation/litellm/utils.py +304 -102
- openlit/instrumentation/llamaindex/__init__.py +267 -90
- openlit/instrumentation/llamaindex/async_llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/llamaindex.py +28 -6
- openlit/instrumentation/llamaindex/utils.py +204 -91
- openlit/instrumentation/mem0/__init__.py +11 -2
- openlit/instrumentation/mem0/mem0.py +50 -29
- openlit/instrumentation/milvus/__init__.py +10 -2
- openlit/instrumentation/milvus/milvus.py +31 -6
- openlit/instrumentation/milvus/utils.py +166 -67
- openlit/instrumentation/mistral/__init__.py +63 -18
- openlit/instrumentation/mistral/async_mistral.py +63 -24
- openlit/instrumentation/mistral/mistral.py +63 -24
- openlit/instrumentation/mistral/utils.py +277 -69
- openlit/instrumentation/multion/__init__.py +69 -19
- openlit/instrumentation/multion/async_multion.py +57 -26
- openlit/instrumentation/multion/multion.py +57 -26
- openlit/instrumentation/ollama/__init__.py +39 -18
- openlit/instrumentation/ollama/async_ollama.py +57 -26
- openlit/instrumentation/ollama/ollama.py +57 -26
- openlit/instrumentation/ollama/utils.py +226 -50
- openlit/instrumentation/openai/__init__.py +156 -32
- openlit/instrumentation/openai/async_openai.py +147 -67
- openlit/instrumentation/openai/openai.py +150 -67
- openlit/instrumentation/openai/utils.py +657 -185
- openlit/instrumentation/openai_agents/__init__.py +5 -1
- openlit/instrumentation/openai_agents/processor.py +110 -90
- openlit/instrumentation/phidata/__init__.py +13 -5
- openlit/instrumentation/phidata/phidata.py +67 -32
- openlit/instrumentation/pinecone/__init__.py +48 -9
- openlit/instrumentation/pinecone/async_pinecone.py +27 -5
- openlit/instrumentation/pinecone/pinecone.py +27 -5
- openlit/instrumentation/pinecone/utils.py +153 -47
- openlit/instrumentation/premai/__init__.py +22 -7
- openlit/instrumentation/premai/premai.py +51 -26
- openlit/instrumentation/premai/utils.py +246 -59
- openlit/instrumentation/pydantic_ai/__init__.py +49 -22
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +69 -16
- openlit/instrumentation/pydantic_ai/utils.py +89 -24
- openlit/instrumentation/qdrant/__init__.py +19 -4
- openlit/instrumentation/qdrant/async_qdrant.py +33 -7
- openlit/instrumentation/qdrant/qdrant.py +33 -7
- openlit/instrumentation/qdrant/utils.py +228 -93
- openlit/instrumentation/reka/__init__.py +23 -10
- openlit/instrumentation/reka/async_reka.py +17 -11
- openlit/instrumentation/reka/reka.py +17 -11
- openlit/instrumentation/reka/utils.py +138 -36
- openlit/instrumentation/together/__init__.py +44 -12
- openlit/instrumentation/together/async_together.py +50 -27
- openlit/instrumentation/together/together.py +50 -27
- openlit/instrumentation/together/utils.py +301 -71
- openlit/instrumentation/transformers/__init__.py +2 -1
- openlit/instrumentation/transformers/transformers.py +13 -3
- openlit/instrumentation/transformers/utils.py +139 -36
- openlit/instrumentation/vertexai/__init__.py +81 -16
- openlit/instrumentation/vertexai/async_vertexai.py +33 -15
- openlit/instrumentation/vertexai/utils.py +123 -27
- openlit/instrumentation/vertexai/vertexai.py +33 -15
- openlit/instrumentation/vllm/__init__.py +12 -5
- openlit/instrumentation/vllm/utils.py +121 -31
- openlit/instrumentation/vllm/vllm.py +16 -10
- openlit/otel/events.py +35 -10
- openlit/otel/metrics.py +32 -24
- openlit/otel/tracing.py +24 -9
- openlit/semcov/__init__.py +82 -6
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/METADATA +2 -1
- openlit-1.34.32.dist-info/RECORD +166 -0
- openlit/instrumentation/langchain/async_langchain.py +0 -102
- openlit/instrumentation/langchain/langchain.py +0 -102
- openlit/instrumentation/langchain/utils.py +0 -252
- openlit-1.34.30.dist-info/RECORD +0 -168
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/LICENSE +0 -0
- {openlit-1.34.30.dist-info → openlit-1.34.32.dist-info}/WHEEL +0 -0
@@ -1,6 +1,7 @@
|
|
1
1
|
"""
|
2
2
|
AG2 OpenTelemetry instrumentation utility functions
|
3
3
|
"""
|
4
|
+
|
4
5
|
import time
|
5
6
|
|
6
7
|
from opentelemetry.trace import Status, StatusCode
|
@@ -12,6 +13,7 @@ from openlit.__helpers import (
|
|
12
13
|
)
|
13
14
|
from openlit.semcov import SemanticConvention
|
14
15
|
|
16
|
+
|
15
17
|
def calculate_tokens_and_cost(response, request_model, pricing_info):
|
16
18
|
"""
|
17
19
|
Calculate the input, output tokens, and their respective costs from AG2 response.
|
@@ -21,7 +23,9 @@ def calculate_tokens_and_cost(response, request_model, pricing_info):
|
|
21
23
|
|
22
24
|
# Early return if response doesn't have cost data
|
23
25
|
if not hasattr(response, "cost") or response.cost is None:
|
24
|
-
cost = get_chat_model_cost(
|
26
|
+
cost = get_chat_model_cost(
|
27
|
+
request_model, pricing_info, input_tokens, output_tokens
|
28
|
+
)
|
25
29
|
return input_tokens, output_tokens, cost
|
26
30
|
|
27
31
|
try:
|
@@ -34,6 +38,7 @@ def calculate_tokens_and_cost(response, request_model, pricing_info):
|
|
34
38
|
cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
|
35
39
|
return input_tokens, output_tokens, cost
|
36
40
|
|
41
|
+
|
37
42
|
def _extract_tokens_from_cost(cost_data):
|
38
43
|
"""
|
39
44
|
Extract input and output tokens from AG2 cost data structure.
|
@@ -52,6 +57,7 @@ def _extract_tokens_from_cost(cost_data):
|
|
52
57
|
|
53
58
|
return input_tokens, output_tokens
|
54
59
|
|
60
|
+
|
55
61
|
def format_content(chat_history):
|
56
62
|
"""
|
57
63
|
Format the chat history into a string for span events.
|
@@ -67,32 +73,62 @@ def format_content(chat_history):
|
|
67
73
|
|
68
74
|
return "\n".join(formatted_messages)
|
69
75
|
|
70
|
-
|
71
|
-
|
76
|
+
|
77
|
+
def common_agent_logic(
|
78
|
+
scope,
|
79
|
+
pricing_info,
|
80
|
+
environment,
|
81
|
+
application_name,
|
82
|
+
metrics,
|
83
|
+
capture_message_content,
|
84
|
+
disable_metrics,
|
85
|
+
version,
|
86
|
+
operation_type,
|
87
|
+
):
|
72
88
|
"""
|
73
89
|
Process agent request and generate Telemetry
|
74
90
|
"""
|
75
91
|
|
76
92
|
# Common Span Attributes
|
77
|
-
common_span_attributes(
|
78
|
-
|
79
|
-
|
80
|
-
|
93
|
+
common_span_attributes(
|
94
|
+
scope,
|
95
|
+
operation_type,
|
96
|
+
SemanticConvention.GEN_AI_SYSTEM_AG2,
|
97
|
+
scope._server_address,
|
98
|
+
scope._server_port,
|
99
|
+
scope._request_model,
|
100
|
+
scope._response_model,
|
101
|
+
environment,
|
102
|
+
application_name,
|
103
|
+
False,
|
104
|
+
0,
|
105
|
+
scope._end_time - scope._start_time,
|
106
|
+
version,
|
107
|
+
)
|
81
108
|
|
82
109
|
# Span Attributes for Agent-specific parameters
|
83
110
|
scope._span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, scope._agent_name)
|
84
111
|
|
85
112
|
# Span Attributes for Response parameters
|
86
113
|
if hasattr(scope, "_input_tokens"):
|
87
|
-
scope._span.set_attribute(
|
88
|
-
|
89
|
-
|
114
|
+
scope._span.set_attribute(
|
115
|
+
SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens
|
116
|
+
)
|
117
|
+
scope._span.set_attribute(
|
118
|
+
SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens
|
119
|
+
)
|
120
|
+
scope._span.set_attribute(
|
121
|
+
SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE,
|
122
|
+
scope._input_tokens + scope._output_tokens,
|
123
|
+
)
|
90
124
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, scope._cost)
|
91
125
|
|
92
126
|
# Span Attributes for Content
|
93
127
|
if capture_message_content and hasattr(scope, "_chat_history"):
|
94
128
|
chat_content = format_content(scope._chat_history)
|
95
|
-
scope._span.set_attribute(
|
129
|
+
scope._span.set_attribute(
|
130
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION, chat_content
|
131
|
+
)
|
96
132
|
|
97
133
|
# To be removed once the change to span_attributes (from span events) is complete
|
98
134
|
scope._span.add_event(
|
@@ -104,20 +140,51 @@ def common_agent_logic(scope, pricing_info, environment, application_name, metri
|
|
104
140
|
|
105
141
|
# Set agent description for create agent operation
|
106
142
|
if hasattr(scope, "_system_message"):
|
107
|
-
scope._span.set_attribute(
|
143
|
+
scope._span.set_attribute(
|
144
|
+
SemanticConvention.GEN_AI_AGENT_DESCRIPTION, scope._system_message
|
145
|
+
)
|
108
146
|
|
109
147
|
scope._span.set_status(Status(StatusCode.OK))
|
110
148
|
|
111
149
|
# Metrics
|
112
|
-
if not disable_metrics and hasattr(scope, "_input_tokens"):
|
113
|
-
record_completion_metrics(
|
114
|
-
|
115
|
-
|
116
|
-
|
150
|
+
if not disable_metrics and metrics is not None and hasattr(scope, "_input_tokens"):
|
151
|
+
record_completion_metrics(
|
152
|
+
metrics,
|
153
|
+
operation_type,
|
154
|
+
SemanticConvention.GEN_AI_SYSTEM_AG2,
|
155
|
+
scope._server_address,
|
156
|
+
scope._server_port,
|
157
|
+
scope._request_model,
|
158
|
+
scope._response_model,
|
159
|
+
environment,
|
160
|
+
application_name,
|
161
|
+
scope._start_time,
|
162
|
+
scope._end_time,
|
163
|
+
scope._input_tokens,
|
164
|
+
scope._output_tokens,
|
165
|
+
scope._cost,
|
166
|
+
0,
|
167
|
+
scope._end_time - scope._start_time,
|
168
|
+
)
|
169
|
+
|
117
170
|
|
118
|
-
def process_agent_creation(
|
119
|
-
|
120
|
-
|
171
|
+
def process_agent_creation(
|
172
|
+
agent_name,
|
173
|
+
llm_config,
|
174
|
+
system_message,
|
175
|
+
pricing_info,
|
176
|
+
server_port,
|
177
|
+
server_address,
|
178
|
+
environment,
|
179
|
+
application_name,
|
180
|
+
metrics,
|
181
|
+
start_time,
|
182
|
+
span,
|
183
|
+
capture_message_content=False,
|
184
|
+
disable_metrics=False,
|
185
|
+
version="1.0.0",
|
186
|
+
**kwargs,
|
187
|
+
):
|
121
188
|
"""
|
122
189
|
Process agent creation and generate Telemetry
|
123
190
|
"""
|
@@ -129,17 +196,41 @@ def process_agent_creation(agent_name, llm_config, system_message, pricing_info,
|
|
129
196
|
scope._end_time = time.time()
|
130
197
|
scope._span = span
|
131
198
|
scope._agent_name = agent_name
|
132
|
-
scope._request_model = llm_config.get("model", "
|
199
|
+
scope._request_model = llm_config.get("model", "unknown")
|
133
200
|
scope._response_model = scope._request_model
|
134
201
|
scope._system_message = system_message
|
135
202
|
scope._server_address, scope._server_port = server_address, server_port
|
136
203
|
|
137
|
-
common_agent_logic(
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
204
|
+
common_agent_logic(
|
205
|
+
scope,
|
206
|
+
pricing_info,
|
207
|
+
environment,
|
208
|
+
application_name,
|
209
|
+
metrics,
|
210
|
+
capture_message_content,
|
211
|
+
disable_metrics,
|
212
|
+
version,
|
213
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
|
214
|
+
)
|
215
|
+
|
216
|
+
|
217
|
+
def process_agent_run(
|
218
|
+
response,
|
219
|
+
agent_name,
|
220
|
+
request_model,
|
221
|
+
pricing_info,
|
222
|
+
server_port,
|
223
|
+
server_address,
|
224
|
+
environment,
|
225
|
+
application_name,
|
226
|
+
metrics,
|
227
|
+
start_time,
|
228
|
+
span,
|
229
|
+
capture_message_content=False,
|
230
|
+
disable_metrics=False,
|
231
|
+
version="1.0.0",
|
232
|
+
**kwargs,
|
233
|
+
):
|
143
234
|
"""
|
144
235
|
Process agent run and generate Telemetry
|
145
236
|
"""
|
@@ -157,19 +248,372 @@ def process_agent_run(response, agent_name, request_model, pricing_info, server_
|
|
157
248
|
|
158
249
|
# Calculate tokens and cost
|
159
250
|
scope._input_tokens, scope._output_tokens, scope._cost = calculate_tokens_and_cost(
|
160
|
-
response, request_model, pricing_info
|
251
|
+
response, request_model, pricing_info
|
252
|
+
)
|
161
253
|
|
162
254
|
# Extract response model from cost data
|
163
255
|
try:
|
164
256
|
if hasattr(response, "cost") and response.cost is not None:
|
165
257
|
cost_data = response.cost.get("usage_including_cached_inference", {})
|
166
|
-
scope._response_model =
|
258
|
+
scope._response_model = (
|
259
|
+
list(cost_data.keys())[1] if len(cost_data) > 1 else request_model
|
260
|
+
)
|
167
261
|
else:
|
168
262
|
scope._response_model = request_model
|
169
263
|
except (AttributeError, IndexError, KeyError, TypeError):
|
170
264
|
scope._response_model = request_model
|
171
265
|
|
172
|
-
common_agent_logic(
|
173
|
-
|
266
|
+
common_agent_logic(
|
267
|
+
scope,
|
268
|
+
pricing_info,
|
269
|
+
environment,
|
270
|
+
application_name,
|
271
|
+
metrics,
|
272
|
+
capture_message_content,
|
273
|
+
disable_metrics,
|
274
|
+
version,
|
275
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
|
276
|
+
)
|
277
|
+
|
278
|
+
return response
|
279
|
+
|
280
|
+
|
281
|
+
def process_agent_generate_reply(
|
282
|
+
response,
|
283
|
+
agent_name,
|
284
|
+
request_model,
|
285
|
+
messages,
|
286
|
+
sender,
|
287
|
+
pricing_info,
|
288
|
+
server_port,
|
289
|
+
server_address,
|
290
|
+
environment,
|
291
|
+
application_name,
|
292
|
+
metrics,
|
293
|
+
start_time,
|
294
|
+
span,
|
295
|
+
capture_message_content=False,
|
296
|
+
disable_metrics=False,
|
297
|
+
version="1.0.0",
|
298
|
+
**kwargs,
|
299
|
+
):
|
300
|
+
"""
|
301
|
+
Process agent generate_reply and generate Telemetry
|
302
|
+
"""
|
303
|
+
|
304
|
+
# Create scope object
|
305
|
+
scope = type("GenericScope", (), {})()
|
306
|
+
|
307
|
+
scope._start_time = start_time
|
308
|
+
scope._end_time = time.time()
|
309
|
+
scope._span = span
|
310
|
+
scope._agent_name = agent_name
|
311
|
+
scope._request_model = request_model
|
312
|
+
scope._response_model = request_model
|
313
|
+
scope._server_address, scope._server_port = server_address, server_port
|
314
|
+
scope._messages = messages
|
315
|
+
scope._sender_name = getattr(sender, "name", "Unknown") if sender else "Unknown"
|
316
|
+
|
317
|
+
# Set agent-specific attributes
|
318
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_MESSAGE_TYPE, "generate_reply")
|
319
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_SENDER, scope._sender_name)
|
320
|
+
|
321
|
+
# Process response content
|
322
|
+
if response and isinstance(response, str):
|
323
|
+
scope._response_content = response
|
324
|
+
elif response and hasattr(response, "content"):
|
325
|
+
scope._response_content = response.content
|
326
|
+
else:
|
327
|
+
scope._response_content = str(response) if response else ""
|
328
|
+
|
329
|
+
# Try to extract token information if available
|
330
|
+
try:
|
331
|
+
# Mock token calculation for generate_reply
|
332
|
+
scope._input_tokens = len(str(messages)) // 4 if messages else 0
|
333
|
+
scope._output_tokens = len(scope._response_content) // 4
|
334
|
+
scope._cost = get_chat_model_cost(
|
335
|
+
request_model, pricing_info, scope._input_tokens, scope._output_tokens
|
336
|
+
)
|
337
|
+
except Exception:
|
338
|
+
scope._input_tokens = 0
|
339
|
+
scope._output_tokens = 0
|
340
|
+
scope._cost = 0.0
|
341
|
+
|
342
|
+
common_agent_logic(
|
343
|
+
scope,
|
344
|
+
pricing_info,
|
345
|
+
environment,
|
346
|
+
application_name,
|
347
|
+
metrics,
|
348
|
+
capture_message_content,
|
349
|
+
disable_metrics,
|
350
|
+
version,
|
351
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
352
|
+
)
|
174
353
|
|
175
354
|
return response
|
355
|
+
|
356
|
+
|
357
|
+
def process_agent_receive(
|
358
|
+
message,
|
359
|
+
agent_name,
|
360
|
+
sender_name,
|
361
|
+
agent_instance,
|
362
|
+
pricing_info,
|
363
|
+
server_port,
|
364
|
+
server_address,
|
365
|
+
environment,
|
366
|
+
application_name,
|
367
|
+
metrics,
|
368
|
+
start_time,
|
369
|
+
span,
|
370
|
+
capture_message_content=False,
|
371
|
+
disable_metrics=False,
|
372
|
+
version="1.0.0",
|
373
|
+
**kwargs,
|
374
|
+
):
|
375
|
+
"""
|
376
|
+
Process agent receive and generate Telemetry
|
377
|
+
"""
|
378
|
+
|
379
|
+
# Create scope object
|
380
|
+
scope = type("GenericScope", (), {})()
|
381
|
+
|
382
|
+
scope._start_time = start_time
|
383
|
+
scope._end_time = time.time()
|
384
|
+
scope._span = span
|
385
|
+
scope._agent_name = agent_name
|
386
|
+
scope._sender_name = sender_name
|
387
|
+
scope._server_address, scope._server_port = server_address, server_port
|
388
|
+
scope._message = message
|
389
|
+
|
390
|
+
# Extract model from agent instance
|
391
|
+
if hasattr(agent_instance, "llm_config") and isinstance(
|
392
|
+
agent_instance.llm_config, dict
|
393
|
+
):
|
394
|
+
scope._request_model = agent_instance.llm_config.get("model", "unknown")
|
395
|
+
else:
|
396
|
+
scope._request_model = "unknown"
|
397
|
+
scope._response_model = scope._request_model
|
398
|
+
|
399
|
+
# Set agent-specific attributes
|
400
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_MESSAGE_TYPE, "receive")
|
401
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_SENDER, sender_name)
|
402
|
+
|
403
|
+
# Content capture for received message
|
404
|
+
if capture_message_content:
|
405
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, str(message))
|
406
|
+
|
407
|
+
common_agent_logic(
|
408
|
+
scope,
|
409
|
+
pricing_info,
|
410
|
+
environment,
|
411
|
+
application_name,
|
412
|
+
metrics,
|
413
|
+
capture_message_content,
|
414
|
+
disable_metrics,
|
415
|
+
version,
|
416
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
417
|
+
)
|
418
|
+
|
419
|
+
|
420
|
+
def process_agent_send(
|
421
|
+
message,
|
422
|
+
agent_name,
|
423
|
+
recipient_name,
|
424
|
+
agent_instance,
|
425
|
+
pricing_info,
|
426
|
+
server_port,
|
427
|
+
server_address,
|
428
|
+
environment,
|
429
|
+
application_name,
|
430
|
+
metrics,
|
431
|
+
start_time,
|
432
|
+
span,
|
433
|
+
capture_message_content=False,
|
434
|
+
disable_metrics=False,
|
435
|
+
version="1.0.0",
|
436
|
+
**kwargs,
|
437
|
+
):
|
438
|
+
"""
|
439
|
+
Process agent send and generate Telemetry
|
440
|
+
"""
|
441
|
+
|
442
|
+
# Create scope object
|
443
|
+
scope = type("GenericScope", (), {})()
|
444
|
+
|
445
|
+
scope._start_time = start_time
|
446
|
+
scope._end_time = time.time()
|
447
|
+
scope._span = span
|
448
|
+
scope._agent_name = agent_name
|
449
|
+
scope._recipient_name = recipient_name
|
450
|
+
scope._server_address, scope._server_port = server_address, server_port
|
451
|
+
scope._message = message
|
452
|
+
|
453
|
+
# Extract model from agent instance
|
454
|
+
if hasattr(agent_instance, "llm_config") and isinstance(
|
455
|
+
agent_instance.llm_config, dict
|
456
|
+
):
|
457
|
+
scope._request_model = agent_instance.llm_config.get("model", "unknown")
|
458
|
+
else:
|
459
|
+
scope._request_model = "unknown"
|
460
|
+
scope._response_model = scope._request_model
|
461
|
+
|
462
|
+
# Set agent-specific attributes
|
463
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_MESSAGE_TYPE, "send")
|
464
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_RECIPIENT, recipient_name)
|
465
|
+
|
466
|
+
# Content capture for sent message
|
467
|
+
if capture_message_content:
|
468
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(message))
|
469
|
+
|
470
|
+
common_agent_logic(
|
471
|
+
scope,
|
472
|
+
pricing_info,
|
473
|
+
environment,
|
474
|
+
application_name,
|
475
|
+
metrics,
|
476
|
+
capture_message_content,
|
477
|
+
disable_metrics,
|
478
|
+
version,
|
479
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
480
|
+
)
|
481
|
+
|
482
|
+
|
483
|
+
def process_groupchat_operation(
|
484
|
+
group_name,
|
485
|
+
participants,
|
486
|
+
messages,
|
487
|
+
sender,
|
488
|
+
max_turns,
|
489
|
+
request_model,
|
490
|
+
pricing_info,
|
491
|
+
server_port,
|
492
|
+
server_address,
|
493
|
+
environment,
|
494
|
+
application_name,
|
495
|
+
metrics,
|
496
|
+
start_time,
|
497
|
+
span,
|
498
|
+
capture_message_content=False,
|
499
|
+
disable_metrics=False,
|
500
|
+
version="1.0.0",
|
501
|
+
**kwargs,
|
502
|
+
):
|
503
|
+
"""
|
504
|
+
Process GroupChat operation and generate Telemetry
|
505
|
+
"""
|
506
|
+
|
507
|
+
# Create scope object
|
508
|
+
scope = type("GenericScope", (), {})()
|
509
|
+
|
510
|
+
scope._start_time = start_time
|
511
|
+
scope._end_time = time.time()
|
512
|
+
scope._span = span
|
513
|
+
scope._group_name = group_name
|
514
|
+
scope._participants = participants
|
515
|
+
scope._server_address, scope._server_port = server_address, server_port
|
516
|
+
scope._sender_name = getattr(sender, "name", "Unknown") if sender else "Unknown"
|
517
|
+
|
518
|
+
# Add required model attributes for common_agent_logic
|
519
|
+
scope._request_model = request_model
|
520
|
+
scope._response_model = request_model
|
521
|
+
|
522
|
+
# Set agent name for groupchat
|
523
|
+
scope._agent_name = group_name
|
524
|
+
|
525
|
+
# Set GroupChat-specific attributes
|
526
|
+
span.set_attribute(
|
527
|
+
SemanticConvention.GEN_AI_GROUPCHAT_PARTICIPANTS, ",".join(participants)
|
528
|
+
)
|
529
|
+
span.set_attribute(
|
530
|
+
SemanticConvention.GEN_AI_WORKFLOW_AGENT_COUNT, len(participants)
|
531
|
+
)
|
532
|
+
span.set_attribute(SemanticConvention.GEN_AI_WORKFLOW_EXECUTION_TYPE, "groupchat")
|
533
|
+
|
534
|
+
if max_turns:
|
535
|
+
span.set_attribute(SemanticConvention.GEN_AI_GROUPCHAT_TURN_COUNT, max_turns)
|
536
|
+
|
537
|
+
# Content capture for GroupChat
|
538
|
+
if capture_message_content and messages:
|
539
|
+
span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, str(messages))
|
540
|
+
|
541
|
+
# Use framework operation type for GroupChat
|
542
|
+
common_agent_logic(
|
543
|
+
scope,
|
544
|
+
pricing_info,
|
545
|
+
environment,
|
546
|
+
application_name,
|
547
|
+
metrics,
|
548
|
+
capture_message_content,
|
549
|
+
disable_metrics,
|
550
|
+
version,
|
551
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK,
|
552
|
+
)
|
553
|
+
|
554
|
+
|
555
|
+
def process_speaker_selection(
|
556
|
+
last_speaker,
|
557
|
+
selected_speaker,
|
558
|
+
selector,
|
559
|
+
agents,
|
560
|
+
request_model,
|
561
|
+
pricing_info,
|
562
|
+
server_port,
|
563
|
+
server_address,
|
564
|
+
environment,
|
565
|
+
application_name,
|
566
|
+
metrics,
|
567
|
+
start_time,
|
568
|
+
span,
|
569
|
+
capture_message_content=False,
|
570
|
+
disable_metrics=False,
|
571
|
+
version="1.0.0",
|
572
|
+
**kwargs,
|
573
|
+
):
|
574
|
+
"""
|
575
|
+
Process speaker selection and generate Telemetry
|
576
|
+
"""
|
577
|
+
|
578
|
+
# Create scope object
|
579
|
+
scope = type("GenericScope", (), {})()
|
580
|
+
|
581
|
+
scope._start_time = start_time
|
582
|
+
scope._end_time = time.time()
|
583
|
+
scope._span = span
|
584
|
+
scope._last_speaker = last_speaker
|
585
|
+
scope._selected_speaker = selected_speaker
|
586
|
+
scope._server_address, scope._server_port = server_address, server_port
|
587
|
+
|
588
|
+
# Add required model attributes for common_agent_logic
|
589
|
+
scope._request_model = request_model
|
590
|
+
scope._response_model = request_model
|
591
|
+
|
592
|
+
# Set agent name for speaker selection
|
593
|
+
scope._agent_name = "speaker_selection"
|
594
|
+
|
595
|
+
# Set speaker selection attributes
|
596
|
+
span.set_attribute(
|
597
|
+
SemanticConvention.GEN_AI_GROUPCHAT_SPEAKER_SELECTION, selected_speaker
|
598
|
+
)
|
599
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_SENDER, last_speaker)
|
600
|
+
|
601
|
+
if selector:
|
602
|
+
span.set_attribute(SemanticConvention.GEN_AI_AGENT_ROLE, "selector")
|
603
|
+
|
604
|
+
# Set agent count
|
605
|
+
if agents:
|
606
|
+
span.set_attribute(SemanticConvention.GEN_AI_WORKFLOW_AGENT_COUNT, len(agents))
|
607
|
+
|
608
|
+
# Use agent operation type for speaker selection
|
609
|
+
common_agent_logic(
|
610
|
+
scope,
|
611
|
+
pricing_info,
|
612
|
+
environment,
|
613
|
+
application_name,
|
614
|
+
metrics,
|
615
|
+
capture_message_content,
|
616
|
+
disable_metrics,
|
617
|
+
version,
|
618
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT,
|
619
|
+
)
|
@@ -5,15 +5,12 @@ import importlib.metadata
|
|
5
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
6
|
from wrapt import wrap_function_wrapper
|
7
7
|
|
8
|
-
from openlit.instrumentation.ai21.ai21 import
|
9
|
-
|
10
|
-
)
|
11
|
-
from openlit.instrumentation.ai21.async_ai21 import (
|
12
|
-
async_chat, async_chat_rag
|
13
|
-
)
|
8
|
+
from openlit.instrumentation.ai21.ai21 import chat, chat_rag
|
9
|
+
from openlit.instrumentation.ai21.async_ai21 import async_chat, async_chat_rag
|
14
10
|
|
15
11
|
_instruments = ("ai21 >= 3.0.0",)
|
16
12
|
|
13
|
+
|
17
14
|
class AI21Instrumentor(BaseInstrumentor):
|
18
15
|
"""
|
19
16
|
An instrumentor for AI21 client library.
|
@@ -36,32 +33,64 @@ class AI21Instrumentor(BaseInstrumentor):
|
|
36
33
|
wrap_function_wrapper(
|
37
34
|
"ai21.clients.studio.resources.chat.chat_completions",
|
38
35
|
"ChatCompletions.create",
|
39
|
-
chat(
|
40
|
-
|
36
|
+
chat(
|
37
|
+
version,
|
38
|
+
environment,
|
39
|
+
application_name,
|
40
|
+
tracer,
|
41
|
+
pricing_info,
|
42
|
+
capture_message_content,
|
43
|
+
metrics,
|
44
|
+
disable_metrics,
|
45
|
+
),
|
41
46
|
)
|
42
47
|
|
43
48
|
# RAG completions
|
44
49
|
wrap_function_wrapper(
|
45
50
|
"ai21.clients.studio.resources.studio_conversational_rag",
|
46
51
|
"StudioConversationalRag.create",
|
47
|
-
chat_rag(
|
48
|
-
|
52
|
+
chat_rag(
|
53
|
+
version,
|
54
|
+
environment,
|
55
|
+
application_name,
|
56
|
+
tracer,
|
57
|
+
pricing_info,
|
58
|
+
capture_message_content,
|
59
|
+
metrics,
|
60
|
+
disable_metrics,
|
61
|
+
),
|
49
62
|
)
|
50
63
|
|
51
64
|
# Async chat completions
|
52
65
|
wrap_function_wrapper(
|
53
66
|
"ai21.clients.studio.resources.chat.async_chat_completions",
|
54
67
|
"AsyncChatCompletions.create",
|
55
|
-
async_chat(
|
56
|
-
|
68
|
+
async_chat(
|
69
|
+
version,
|
70
|
+
environment,
|
71
|
+
application_name,
|
72
|
+
tracer,
|
73
|
+
pricing_info,
|
74
|
+
capture_message_content,
|
75
|
+
metrics,
|
76
|
+
disable_metrics,
|
77
|
+
),
|
57
78
|
)
|
58
79
|
|
59
80
|
# Async RAG completions
|
60
81
|
wrap_function_wrapper(
|
61
82
|
"ai21.clients.studio.resources.studio_conversational_rag",
|
62
83
|
"AsyncStudioConversationalRag.create",
|
63
|
-
async_chat_rag(
|
64
|
-
|
84
|
+
async_chat_rag(
|
85
|
+
version,
|
86
|
+
environment,
|
87
|
+
application_name,
|
88
|
+
tracer,
|
89
|
+
pricing_info,
|
90
|
+
capture_message_content,
|
91
|
+
metrics,
|
92
|
+
disable_metrics,
|
93
|
+
),
|
65
94
|
)
|
66
95
|
|
67
96
|
def _uninstrument(self, **kwargs):
|