openlit 1.34.31__py3-none-any.whl → 1.34.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__init__.py +3 -1
- openlit/instrumentation/ag2/__init__.py +92 -1
- openlit/instrumentation/ag2/ag2.py +425 -4
- openlit/instrumentation/ag2/async_ag2.py +425 -4
- openlit/instrumentation/ag2/utils.py +343 -2
- openlit/instrumentation/pydantic_ai/__init__.py +88 -0
- openlit/instrumentation/pydantic_ai/async_pydantic_ai.py +38 -0
- openlit/instrumentation/pydantic_ai/pydantic_ai.py +99 -10
- openlit/instrumentation/pydantic_ai/utils.py +834 -81
- openlit/semcov/__init__.py +59 -0
- {openlit-1.34.31.dist-info → openlit-1.34.33.dist-info}/METADATA +1 -1
- {openlit-1.34.31.dist-info → openlit-1.34.33.dist-info}/RECORD +14 -13
- {openlit-1.34.31.dist-info → openlit-1.34.33.dist-info}/LICENSE +0 -0
- {openlit-1.34.31.dist-info → openlit-1.34.33.dist-info}/WHEEL +0 -0
@@ -8,10 +8,45 @@ from openlit.__helpers import handle_exception, set_server_address_and_port
|
|
8
8
|
from openlit.instrumentation.ag2.utils import (
|
9
9
|
process_agent_creation,
|
10
10
|
process_agent_run,
|
11
|
+
process_agent_generate_reply,
|
12
|
+
process_agent_receive,
|
13
|
+
process_agent_send,
|
14
|
+
process_groupchat_operation,
|
15
|
+
process_speaker_selection,
|
11
16
|
)
|
12
17
|
from openlit.semcov import SemanticConvention
|
13
18
|
|
14
19
|
|
20
|
+
def extract_agent_name(instance, fallback="unknown_agent"):
|
21
|
+
"""
|
22
|
+
Extract agent name from AG2 instance with intelligent fallbacks.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
instance: AG2 instance (Agent, GroupChat, etc.)
|
26
|
+
fallback: Default name if no name can be extracted
|
27
|
+
|
28
|
+
Returns:
|
29
|
+
str: Agent name or meaningful fallback
|
30
|
+
"""
|
31
|
+
# Try to get the name attribute first
|
32
|
+
agent_name = getattr(instance, "name", None)
|
33
|
+
if agent_name:
|
34
|
+
return agent_name
|
35
|
+
|
36
|
+
# Try to get from class name and make it meaningful
|
37
|
+
class_name = getattr(instance, "__class__", type(instance)).__name__.lower()
|
38
|
+
|
39
|
+
# Map common AG2 class names to meaningful names
|
40
|
+
class_name_map = {
|
41
|
+
"conversableagent": "conversable_agent",
|
42
|
+
"groupchat": "group_chat",
|
43
|
+
"groupchatmanager": "group_chat_manager",
|
44
|
+
"agent": "agent",
|
45
|
+
}
|
46
|
+
|
47
|
+
return class_name_map.get(class_name, fallback)
|
48
|
+
|
49
|
+
|
15
50
|
def async_conversable_agent(
|
16
51
|
version,
|
17
52
|
environment,
|
@@ -34,7 +69,7 @@ def async_conversable_agent(
|
|
34
69
|
server_address, server_port = set_server_address_and_port(
|
35
70
|
instance, "127.0.0.1", 80
|
36
71
|
)
|
37
|
-
agent_name = kwargs.get("name", "
|
72
|
+
agent_name = kwargs.get("name", "unknown_agent")
|
38
73
|
llm_config = kwargs.get("llm_config", {})
|
39
74
|
system_message = kwargs.get("system_message", "")
|
40
75
|
|
@@ -96,12 +131,12 @@ def async_agent_run(
|
|
96
131
|
)
|
97
132
|
|
98
133
|
# Extract agent name from instance
|
99
|
-
agent_name =
|
134
|
+
agent_name = extract_agent_name(instance)
|
100
135
|
|
101
136
|
# Extract model from instance llm_config
|
102
|
-
request_model = "
|
137
|
+
request_model = "unknown"
|
103
138
|
if hasattr(instance, "llm_config") and isinstance(instance.llm_config, dict):
|
104
|
-
request_model = instance.llm_config.get("model", "
|
139
|
+
request_model = instance.llm_config.get("model", "unknown")
|
105
140
|
|
106
141
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}"
|
107
142
|
|
@@ -133,3 +168,389 @@ def async_agent_run(
|
|
133
168
|
return response
|
134
169
|
|
135
170
|
return wrapper
|
171
|
+
|
172
|
+
|
173
|
+
def async_agent_generate_reply(
|
174
|
+
version,
|
175
|
+
environment,
|
176
|
+
application_name,
|
177
|
+
tracer,
|
178
|
+
pricing_info,
|
179
|
+
capture_message_content,
|
180
|
+
metrics,
|
181
|
+
disable_metrics,
|
182
|
+
):
|
183
|
+
"""
|
184
|
+
Generates a telemetry wrapper for AG2 ConversableAgent.generate_reply (async version).
|
185
|
+
"""
|
186
|
+
|
187
|
+
def wrapper(wrapped, instance, args, kwargs):
|
188
|
+
"""
|
189
|
+
Wraps the AG2 ConversableAgent.generate_reply call (async version).
|
190
|
+
"""
|
191
|
+
|
192
|
+
server_address, server_port = set_server_address_and_port(
|
193
|
+
instance, "127.0.0.1", 80
|
194
|
+
)
|
195
|
+
|
196
|
+
# Extract agent name from instance
|
197
|
+
agent_name = extract_agent_name(instance)
|
198
|
+
|
199
|
+
# Extract model from instance llm_config
|
200
|
+
request_model = "unknown"
|
201
|
+
if hasattr(instance, "llm_config") and isinstance(instance.llm_config, dict):
|
202
|
+
request_model = instance.llm_config.get("model", "unknown")
|
203
|
+
|
204
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {agent_name}"
|
205
|
+
|
206
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
207
|
+
start_time = time.time()
|
208
|
+
response = wrapped(*args, **kwargs)
|
209
|
+
|
210
|
+
try:
|
211
|
+
response = process_agent_generate_reply(
|
212
|
+
response=response,
|
213
|
+
agent_name=agent_name,
|
214
|
+
request_model=request_model,
|
215
|
+
messages=args[0] if args else kwargs.get("messages", []),
|
216
|
+
sender=args[1] if len(args) > 1 else kwargs.get("sender", None),
|
217
|
+
pricing_info=pricing_info,
|
218
|
+
server_port=server_port,
|
219
|
+
server_address=server_address,
|
220
|
+
environment=environment,
|
221
|
+
application_name=application_name,
|
222
|
+
metrics=metrics,
|
223
|
+
start_time=start_time,
|
224
|
+
span=span,
|
225
|
+
capture_message_content=capture_message_content,
|
226
|
+
disable_metrics=disable_metrics,
|
227
|
+
version=version,
|
228
|
+
)
|
229
|
+
|
230
|
+
except Exception as e:
|
231
|
+
handle_exception(span, e)
|
232
|
+
|
233
|
+
return response
|
234
|
+
|
235
|
+
return wrapper
|
236
|
+
|
237
|
+
|
238
|
+
def async_agent_receive(
|
239
|
+
version,
|
240
|
+
environment,
|
241
|
+
application_name,
|
242
|
+
tracer,
|
243
|
+
pricing_info,
|
244
|
+
capture_message_content,
|
245
|
+
metrics,
|
246
|
+
disable_metrics,
|
247
|
+
):
|
248
|
+
"""
|
249
|
+
Generates a telemetry wrapper for AG2 ConversableAgent.receive (async version).
|
250
|
+
"""
|
251
|
+
|
252
|
+
def wrapper(wrapped, instance, args, kwargs):
|
253
|
+
"""
|
254
|
+
Wraps the AG2 ConversableAgent.receive call (async version).
|
255
|
+
"""
|
256
|
+
|
257
|
+
server_address, server_port = set_server_address_and_port(
|
258
|
+
instance, "127.0.0.1", 80
|
259
|
+
)
|
260
|
+
|
261
|
+
# Extract agent name from instance
|
262
|
+
agent_name = extract_agent_name(instance)
|
263
|
+
|
264
|
+
# Extract sender information
|
265
|
+
sender = args[0] if args else kwargs.get("sender", None)
|
266
|
+
sender_name = getattr(sender, "name", "Unknown") if sender else "Unknown"
|
267
|
+
|
268
|
+
# Extract message
|
269
|
+
message = args[1] if len(args) > 1 else kwargs.get("message", "")
|
270
|
+
|
271
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {agent_name}"
|
272
|
+
|
273
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
274
|
+
start_time = time.time()
|
275
|
+
response = wrapped(*args, **kwargs)
|
276
|
+
|
277
|
+
try:
|
278
|
+
process_agent_receive(
|
279
|
+
message=message,
|
280
|
+
agent_name=agent_name,
|
281
|
+
sender_name=sender_name,
|
282
|
+
agent_instance=instance,
|
283
|
+
pricing_info=pricing_info,
|
284
|
+
server_port=server_port,
|
285
|
+
server_address=server_address,
|
286
|
+
environment=environment,
|
287
|
+
application_name=application_name,
|
288
|
+
metrics=metrics,
|
289
|
+
start_time=start_time,
|
290
|
+
span=span,
|
291
|
+
capture_message_content=capture_message_content,
|
292
|
+
disable_metrics=disable_metrics,
|
293
|
+
version=version,
|
294
|
+
)
|
295
|
+
|
296
|
+
except Exception as e:
|
297
|
+
handle_exception(span, e)
|
298
|
+
|
299
|
+
return response
|
300
|
+
|
301
|
+
return wrapper
|
302
|
+
|
303
|
+
|
304
|
+
def async_agent_send(
|
305
|
+
version,
|
306
|
+
environment,
|
307
|
+
application_name,
|
308
|
+
tracer,
|
309
|
+
pricing_info,
|
310
|
+
capture_message_content,
|
311
|
+
metrics,
|
312
|
+
disable_metrics,
|
313
|
+
):
|
314
|
+
"""
|
315
|
+
Generates a telemetry wrapper for AG2 ConversableAgent.send (async version).
|
316
|
+
"""
|
317
|
+
|
318
|
+
def wrapper(wrapped, instance, args, kwargs):
|
319
|
+
"""
|
320
|
+
Wraps the AG2 ConversableAgent.send call (async version).
|
321
|
+
"""
|
322
|
+
|
323
|
+
server_address, server_port = set_server_address_and_port(
|
324
|
+
instance, "127.0.0.1", 80
|
325
|
+
)
|
326
|
+
|
327
|
+
# Extract agent name from instance
|
328
|
+
agent_name = extract_agent_name(instance)
|
329
|
+
|
330
|
+
# Extract recipient information
|
331
|
+
recipient = args[0] if args else kwargs.get("recipient", None)
|
332
|
+
recipient_name = (
|
333
|
+
getattr(recipient, "name", "Unknown") if recipient else "Unknown"
|
334
|
+
)
|
335
|
+
|
336
|
+
# Extract message
|
337
|
+
message = args[1] if len(args) > 1 else kwargs.get("message", "")
|
338
|
+
|
339
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {agent_name}"
|
340
|
+
|
341
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
342
|
+
start_time = time.time()
|
343
|
+
response = wrapped(*args, **kwargs)
|
344
|
+
|
345
|
+
try:
|
346
|
+
process_agent_send(
|
347
|
+
message=message,
|
348
|
+
agent_name=agent_name,
|
349
|
+
recipient_name=recipient_name,
|
350
|
+
agent_instance=instance,
|
351
|
+
pricing_info=pricing_info,
|
352
|
+
server_port=server_port,
|
353
|
+
server_address=server_address,
|
354
|
+
environment=environment,
|
355
|
+
application_name=application_name,
|
356
|
+
metrics=metrics,
|
357
|
+
start_time=start_time,
|
358
|
+
span=span,
|
359
|
+
capture_message_content=capture_message_content,
|
360
|
+
disable_metrics=disable_metrics,
|
361
|
+
version=version,
|
362
|
+
)
|
363
|
+
|
364
|
+
except Exception as e:
|
365
|
+
handle_exception(span, e)
|
366
|
+
|
367
|
+
return response
|
368
|
+
|
369
|
+
return wrapper
|
370
|
+
|
371
|
+
|
372
|
+
def async_groupchat_manager_run_chat(
|
373
|
+
version,
|
374
|
+
environment,
|
375
|
+
application_name,
|
376
|
+
tracer,
|
377
|
+
pricing_info,
|
378
|
+
capture_message_content,
|
379
|
+
metrics,
|
380
|
+
disable_metrics,
|
381
|
+
):
|
382
|
+
"""
|
383
|
+
Generates a telemetry wrapper for AG2 GroupChatManager.run_chat (async version).
|
384
|
+
"""
|
385
|
+
|
386
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
387
|
+
"""
|
388
|
+
Wraps the AG2 GroupChatManager.run_chat call (async version).
|
389
|
+
"""
|
390
|
+
|
391
|
+
server_address, server_port = set_server_address_and_port(
|
392
|
+
instance, "127.0.0.1", 80
|
393
|
+
)
|
394
|
+
|
395
|
+
# Extract groupchat information
|
396
|
+
groupchat = getattr(instance, "groupchat", None)
|
397
|
+
if groupchat:
|
398
|
+
participants = [agent.name for agent in groupchat.agents]
|
399
|
+
group_name = f"GroupChat_{len(participants)}_agents"
|
400
|
+
else:
|
401
|
+
participants = []
|
402
|
+
group_name = "UnknownGroupChat"
|
403
|
+
|
404
|
+
# Extract model information from GroupChatManager
|
405
|
+
request_model = "unknown" # Default fallback
|
406
|
+
if hasattr(instance, "llm_config") and isinstance(instance.llm_config, dict):
|
407
|
+
request_model = instance.llm_config.get("model", "unknown")
|
408
|
+
|
409
|
+
# Try to get more specific model from groupchat
|
410
|
+
if groupchat and hasattr(groupchat, "select_speaker_auto_llm_config"):
|
411
|
+
llm_config = groupchat.select_speaker_auto_llm_config
|
412
|
+
if isinstance(llm_config, dict):
|
413
|
+
request_model = llm_config.get("model", request_model)
|
414
|
+
elif hasattr(llm_config, "model"):
|
415
|
+
request_model = llm_config.model
|
416
|
+
|
417
|
+
# Extract sender information
|
418
|
+
sender = kwargs.get("sender", None)
|
419
|
+
|
420
|
+
# Extract messages
|
421
|
+
messages = args[0] if args else kwargs.get("messages", [])
|
422
|
+
|
423
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_FRAMEWORK} {group_name}"
|
424
|
+
|
425
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
426
|
+
start_time = time.time()
|
427
|
+
response = await wrapped(*args, **kwargs)
|
428
|
+
|
429
|
+
try:
|
430
|
+
process_groupchat_operation(
|
431
|
+
group_name=group_name,
|
432
|
+
participants=participants,
|
433
|
+
messages=messages,
|
434
|
+
sender=sender,
|
435
|
+
max_turns=None, # Not available in new API
|
436
|
+
request_model=request_model,
|
437
|
+
pricing_info=pricing_info,
|
438
|
+
server_port=server_port,
|
439
|
+
server_address=server_address,
|
440
|
+
environment=environment,
|
441
|
+
application_name=application_name,
|
442
|
+
metrics=metrics,
|
443
|
+
start_time=start_time,
|
444
|
+
span=span,
|
445
|
+
capture_message_content=capture_message_content,
|
446
|
+
disable_metrics=disable_metrics,
|
447
|
+
version=version,
|
448
|
+
)
|
449
|
+
|
450
|
+
except Exception as e:
|
451
|
+
handle_exception(span, e)
|
452
|
+
|
453
|
+
return response
|
454
|
+
|
455
|
+
return wrapper
|
456
|
+
|
457
|
+
|
458
|
+
def async_groupchat_select_speaker(
|
459
|
+
version,
|
460
|
+
environment,
|
461
|
+
application_name,
|
462
|
+
tracer,
|
463
|
+
pricing_info,
|
464
|
+
capture_message_content,
|
465
|
+
metrics,
|
466
|
+
disable_metrics,
|
467
|
+
):
|
468
|
+
"""
|
469
|
+
Generates a telemetry wrapper for AG2 GroupChat.select_speaker (async version).
|
470
|
+
"""
|
471
|
+
|
472
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
473
|
+
"""
|
474
|
+
Wraps the AG2 GroupChat.select_speaker call (async version).
|
475
|
+
"""
|
476
|
+
|
477
|
+
server_address, server_port = set_server_address_and_port(
|
478
|
+
instance, "127.0.0.1", 80
|
479
|
+
)
|
480
|
+
|
481
|
+
# Extract speaker information
|
482
|
+
last_speaker = args[0] if args else kwargs.get("last_speaker", None)
|
483
|
+
selector = args[1] if len(args) > 1 else kwargs.get("selector", None)
|
484
|
+
|
485
|
+
last_speaker_name = (
|
486
|
+
getattr(last_speaker, "name", "Unknown") if last_speaker else "Unknown"
|
487
|
+
)
|
488
|
+
|
489
|
+
# Extract agents list
|
490
|
+
agents = getattr(instance, "agents", [])
|
491
|
+
|
492
|
+
# Extract model information from GroupChat instance
|
493
|
+
request_model = "unknown" # Default fallback
|
494
|
+
# Check for speaker selection specific config
|
495
|
+
if hasattr(instance, "select_speaker_auto_llm_config"):
|
496
|
+
llm_config = instance.select_speaker_auto_llm_config
|
497
|
+
if isinstance(llm_config, dict):
|
498
|
+
request_model = llm_config.get("model", "unknown")
|
499
|
+
elif hasattr(llm_config, "model"):
|
500
|
+
request_model = llm_config.model
|
501
|
+
|
502
|
+
# Try to get model from selector if available
|
503
|
+
if (
|
504
|
+
selector
|
505
|
+
and hasattr(selector, "llm_config")
|
506
|
+
and isinstance(selector.llm_config, dict)
|
507
|
+
):
|
508
|
+
request_model = selector.llm_config.get("model", request_model)
|
509
|
+
|
510
|
+
# Try to get model from agents if still unknown
|
511
|
+
if request_model == "unknown" and agents:
|
512
|
+
for agent in agents:
|
513
|
+
if hasattr(agent, "llm_config") and isinstance(agent.llm_config, dict):
|
514
|
+
model = agent.llm_config.get("model")
|
515
|
+
if model:
|
516
|
+
request_model = model
|
517
|
+
break
|
518
|
+
|
519
|
+
span_name = (
|
520
|
+
f"{SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT} speaker_selection"
|
521
|
+
)
|
522
|
+
|
523
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
524
|
+
start_time = time.time()
|
525
|
+
response = await wrapped(*args, **kwargs)
|
526
|
+
|
527
|
+
try:
|
528
|
+
selected_speaker_name = (
|
529
|
+
getattr(response, "name", "Unknown") if response else "Unknown"
|
530
|
+
)
|
531
|
+
|
532
|
+
process_speaker_selection(
|
533
|
+
last_speaker=last_speaker_name,
|
534
|
+
selected_speaker=selected_speaker_name,
|
535
|
+
selector=selector,
|
536
|
+
agents=agents,
|
537
|
+
request_model=request_model,
|
538
|
+
pricing_info=pricing_info,
|
539
|
+
server_port=server_port,
|
540
|
+
server_address=server_address,
|
541
|
+
environment=environment,
|
542
|
+
application_name=application_name,
|
543
|
+
metrics=metrics,
|
544
|
+
start_time=start_time,
|
545
|
+
span=span,
|
546
|
+
capture_message_content=capture_message_content,
|
547
|
+
disable_metrics=disable_metrics,
|
548
|
+
version=version,
|
549
|
+
)
|
550
|
+
|
551
|
+
except Exception as e:
|
552
|
+
handle_exception(span, e)
|
553
|
+
|
554
|
+
return response
|
555
|
+
|
556
|
+
return wrapper
|