openlit 1.34.31__py3-none-any.whl → 1.34.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,51 +1,471 @@
1
1
  """
2
- Pydantic AI OpenTelemetry instrumentation utility functions
2
+ Optimized Pydantic AI OpenTelemetry instrumentation utility functions
3
+ This version reduces code duplication and improves performance while maintaining all data.
3
4
  """
4
5
 
5
6
  import logging
7
+ import json
8
+ from typing import Dict, Any, Optional, List, Tuple
6
9
  from opentelemetry.sdk.resources import (
7
10
  SERVICE_NAME,
8
11
  TELEMETRY_SDK_NAME,
9
12
  DEPLOYMENT_ENVIRONMENT,
10
13
  )
11
14
  from opentelemetry.trace import Status, StatusCode, SpanKind
15
+ from opentelemetry import context as context_api
12
16
  from openlit.__helpers import handle_exception
13
17
  from openlit.semcov import SemanticConvention
14
18
 
19
+ # Try to import enhanced helpers for business intelligence
20
+ try:
21
+ from openlit.__helpers import get_chat_model_cost
22
+
23
+ ENHANCED_HELPERS_AVAILABLE = True
24
+ except ImportError:
25
+ ENHANCED_HELPERS_AVAILABLE = False
26
+
15
27
  # Initialize logger for logging potential issues and operations
16
28
  logger = logging.getLogger(__name__)
17
29
 
30
+ # Constants for common node names to avoid hardcoding
31
+ INTERNAL_NODE_NAMES = {"tool_calls_node", "model_request_node", "user_prompt_node"}
32
+
33
+
34
+ class PydanticAIInstrumentationContext:
35
+ """
36
+ Context object to hold common instrumentation data and reduce repeated extraction.
37
+ """
38
+
39
+ def __init__(self, instance, args, kwargs, version, environment, application_name):
40
+ self.instance = instance
41
+ self.args = args
42
+ self.kwargs = kwargs
43
+ self.version = version
44
+ self.environment = environment
45
+ self.application_name = application_name
46
+
47
+ # Pre-extract common data to avoid repeated parsing
48
+ self._agent_name = None
49
+ self._model_name = None
50
+ self._server_info = None
51
+ self._messages = None
52
+ self._tools = None
53
+ self._model_params = None
54
+
55
+ @property
56
+ def agent_name(self) -> str:
57
+ """Get agent name with caching."""
58
+ if self._agent_name is None:
59
+ self._agent_name = getattr(self.instance, "name", None) or "pydantic_agent"
60
+ return self._agent_name
61
+
62
+ @property
63
+ def model_name(self) -> str:
64
+ """Get model name with caching."""
65
+ if self._model_name is None:
66
+ if hasattr(self.instance, "model") and hasattr(
67
+ self.instance.model, "model_name"
68
+ ):
69
+ self._model_name = str(self.instance.model.model_name)
70
+ else:
71
+ self._model_name = "unknown"
72
+ return self._model_name
73
+
74
+ @property
75
+ def server_info(self) -> Tuple[str, int]:
76
+ """Get server address and port with caching."""
77
+ if self._server_info is None:
78
+ # Determine server based on model
79
+ if "openai" in self.model_name.lower():
80
+ self._server_info = ("api.openai.com", 443)
81
+ else:
82
+ self._server_info = ("127.0.0.1", 80)
83
+ return self._server_info
84
+
85
+ @property
86
+ def messages(self) -> List[Dict]:
87
+ """Get extracted messages with caching."""
88
+ if self._messages is None:
89
+ self._messages = self._extract_messages()
90
+ return self._messages
91
+
92
+ @property
93
+ def tools(self) -> List:
94
+ """Get extracted tools with caching."""
95
+ if self._tools is None:
96
+ self._tools = self._extract_tools()
97
+ return self._tools
98
+
99
+ @property
100
+ def model_params(self) -> Dict[str, Any]:
101
+ """Get model parameters with caching."""
102
+ if self._model_params is None:
103
+ self._model_params = self._extract_model_parameters()
104
+ return self._model_params
105
+
106
+ def _extract_messages(self) -> List[Dict]:
107
+ """Extract messages from context."""
108
+ messages = []
109
+ try:
110
+ # Extract user message from args
111
+ if self.args and len(self.args) > 0:
112
+ user_message = self.args[0]
113
+ if isinstance(user_message, str):
114
+ messages.append({"role": "user", "content": user_message})
115
+
116
+ # Extract system prompt if available
117
+ if (
118
+ hasattr(self.instance, "_system_prompts")
119
+ and self.instance._system_prompts
120
+ ):
121
+ system_prompt = str(self.instance._system_prompts)
122
+ if system_prompt:
123
+ messages.insert(0, {"role": "system", "content": system_prompt})
124
+
125
+ # Extract additional context from kwargs
126
+ if "message_history" in self.kwargs:
127
+ history = self.kwargs["message_history"]
128
+ if isinstance(history, list):
129
+ messages.extend(history)
130
+ except Exception as e:
131
+ logger.debug("Failed to extract messages: %s", e)
132
+
133
+ return messages
134
+
135
+ def _extract_tools(self) -> List:
136
+ """Extract tool definitions from instance."""
137
+ tools = []
138
+ try:
139
+ if hasattr(self.instance, "_tools") and self.instance._tools:
140
+ tools = self.instance._tools
141
+ except Exception as e:
142
+ logger.debug("Failed to extract tools: %s", e)
143
+ return tools
144
+
145
+ def _extract_model_parameters(self) -> Dict[str, Any]:
146
+ """Extract model parameters from instance."""
147
+ parameters = {}
148
+ try:
149
+ if hasattr(self.instance, "model"):
150
+ model = self.instance.model
151
+ param_names = [
152
+ "temperature",
153
+ "top_p",
154
+ "max_tokens",
155
+ "frequency_penalty",
156
+ "presence_penalty",
157
+ "stop",
158
+ "seed",
159
+ "top_k",
160
+ ]
161
+
162
+ for param in param_names:
163
+ if hasattr(model, param):
164
+ value = getattr(model, param)
165
+ if value is not None:
166
+ parameters[param] = value
167
+ except Exception as e:
168
+ logger.debug("Failed to extract model parameters: %s", e)
169
+
170
+ return parameters
171
+
18
172
 
19
173
  def set_span_attributes(
20
174
  span,
21
- version,
22
- operation_name,
23
- environment,
24
- application_name,
25
- server_address,
26
- server_port,
27
- request_model,
28
- agent_name,
175
+ operation_name: str,
176
+ ctx: PydanticAIInstrumentationContext,
177
+ agent_name: Optional[str] = None,
178
+ lifecycle_phase: Optional[str] = None,
179
+ additional_attrs: Optional[Dict[str, Any]] = None,
29
180
  ):
30
181
  """
31
- Set common OpenTelemetry span attributes for Pydantic AI operations.
182
+ Optimized function to set common OpenTelemetry span attributes.
183
+
184
+ Args:
185
+ span: OpenTelemetry span object
186
+ operation_name: The operation name for the span
187
+ ctx: PydanticAIInstrumentationContext with cached data
188
+ agent_name: Optional agent name (uses ctx.agent_name if not provided)
189
+ lifecycle_phase: Optional lifecycle phase
190
+ additional_attrs: Optional additional attributes to set
32
191
  """
33
192
 
34
- # Set Span attributes (OTel Semconv)
193
+ # Set core attributes
35
194
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
36
195
  span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_name)
37
196
  span.set_attribute(
38
197
  SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_PYDANTIC_AI
39
198
  )
40
- span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, agent_name)
199
+
200
+ # Set agent name if meaningful
201
+ final_agent_name = agent_name or ctx.agent_name
202
+ if final_agent_name and final_agent_name not in INTERNAL_NODE_NAMES:
203
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, final_agent_name)
204
+
205
+ # Set server info
206
+ server_address, server_port = ctx.server_info
41
207
  span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
42
208
  span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
43
- span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
44
209
 
45
- # Set Span attributes (Extras)
46
- span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
47
- span.set_attribute(SERVICE_NAME, application_name)
48
- span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
210
+ # Set model info
211
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, ctx.model_name)
212
+
213
+ # Set environment attributes
214
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT, ctx.environment)
215
+ span.set_attribute(SERVICE_NAME, ctx.application_name)
216
+ span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, ctx.version)
217
+
218
+ # Set lifecycle phase if provided
219
+ if lifecycle_phase:
220
+ span.set_attribute(
221
+ SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE, lifecycle_phase
222
+ )
223
+
224
+ # Set additional attributes
225
+ if additional_attrs:
226
+ for key, value in additional_attrs.items():
227
+ span.set_attribute(key, value)
228
+
229
+
230
+ def add_message_tracking(span, messages: List[Dict], message_type: str = "input"):
231
+ """
232
+ Optimized message tracking function.
233
+ """
234
+ if not messages:
235
+ return
236
+
237
+ try:
238
+ # Convert to standard format
239
+ formatted_messages = []
240
+ for message in messages:
241
+ formatted_message = {
242
+ "role": message.get("role", "user"),
243
+ "content": message.get("content", ""),
244
+ }
245
+ if "tool_calls" in message:
246
+ formatted_message["tool_calls"] = message["tool_calls"]
247
+ formatted_messages.append(formatted_message)
248
+
249
+ # Set message attributes
250
+ if message_type == "input":
251
+ span.set_attribute(
252
+ SemanticConvention.GEN_AI_CONTENT_PROMPT, json.dumps(formatted_messages)
253
+ )
254
+ else:
255
+ span.set_attribute(
256
+ SemanticConvention.GEN_AI_OUTPUT_MESSAGES,
257
+ json.dumps(formatted_messages),
258
+ )
259
+
260
+ # Add metadata
261
+ if formatted_messages:
262
+ span.set_attribute(
263
+ SemanticConvention.GEN_AI_MESSAGE_ROLE,
264
+ formatted_messages[0].get("role", "user"),
265
+ )
266
+ total_length = sum(
267
+ len(str(msg.get("content", ""))) for msg in formatted_messages
268
+ )
269
+ span.set_attribute("gen_ai.message.total_length", total_length)
270
+
271
+ except Exception as e:
272
+ logger.debug("Failed to add message tracking: %s", e)
273
+
274
+
275
+ def add_tool_tracking(span, tools: List):
276
+ """
277
+ Optimized tool tracking function.
278
+ """
279
+ if not tools:
280
+ return
281
+
282
+ try:
283
+ formatted_tools = []
284
+ for tool in tools:
285
+ if hasattr(tool, "name"):
286
+ formatted_tool = {
287
+ "name": tool.name,
288
+ "description": getattr(tool, "description", ""),
289
+ }
290
+ if hasattr(tool, "json_schema"):
291
+ formatted_tool["schema"] = tool.json_schema
292
+ else:
293
+ formatted_tool = {
294
+ "name": tool.get("name", ""),
295
+ "description": tool.get("description", ""),
296
+ }
297
+ if "schema" in tool:
298
+ formatted_tool["schema"] = tool["schema"]
299
+ formatted_tools.append(formatted_tool)
300
+
301
+ span.set_attribute(
302
+ SemanticConvention.GEN_AI_AGENT_TOOLS, json.dumps(formatted_tools)
303
+ )
304
+
305
+ except Exception as e:
306
+ logger.debug("Failed to add tool tracking: %s", e)
307
+
308
+
309
+ def execute_with_error_handling(
310
+ span, wrapped, args, kwargs, capture_completion: bool = False
311
+ ):
312
+ """
313
+ Execute wrapped function with standardized error handling.
314
+ """
315
+ try:
316
+ response = wrapped(*args, **kwargs)
317
+
318
+ # Add completion content if requested
319
+ if capture_completion and hasattr(response, "data"):
320
+ span.set_attribute(
321
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(response.data)
322
+ )
323
+
324
+ span.set_status(Status(StatusCode.OK))
325
+ return response
326
+
327
+ except Exception as e:
328
+ handle_exception(span, e)
329
+ logger.error("Error in instrumentation: %s", e)
330
+ raise
331
+
332
+
333
+ # Context extraction utilities for internal nodes
334
+ def extract_context_info(args, kwargs) -> Dict[str, Any]:
335
+ """
336
+ Extract context information from internal node arguments.
337
+ This reduces code duplication across node instrumentation functions.
338
+ """
339
+ info = {
340
+ "model_info": "",
341
+ "agent_name": "",
342
+ "user_input": "",
343
+ "tool_info": "",
344
+ "tool_count": 0,
345
+ "message_count": 0,
346
+ }
347
+
348
+ try:
349
+ if args and len(args) > 0:
350
+ context = args[0]
351
+
352
+ # Extract model info
353
+ if hasattr(context, "deps") and hasattr(context.deps, "model"):
354
+ model = context.deps.model
355
+ if hasattr(model, "model_name"):
356
+ info["model_info"] = str(model.model_name)
357
+
358
+ # Extract agent name
359
+ if hasattr(context, "deps") and hasattr(context.deps, "agent"):
360
+ agent = context.deps.agent
361
+ if hasattr(agent, "name") and agent.name:
362
+ info["agent_name"] = str(agent.name)
363
+ elif hasattr(context, "agent") and hasattr(context.agent, "name"):
364
+ info["agent_name"] = str(context.agent.name)
365
+
366
+ # Extract user input
367
+ if hasattr(context, "user_input"):
368
+ info["user_input"] = str(context.user_input)[:50]
369
+
370
+ # Extract tool information
371
+ if hasattr(context, "tool_calls") and context.tool_calls:
372
+ info["tool_count"] = len(context.tool_calls)
373
+ if context.tool_calls:
374
+ info["tool_info"] = getattr(
375
+ context.tool_calls[0], "function", {}
376
+ ).get("name", "")
377
+
378
+ # Extract message count
379
+ if hasattr(context, "messages") and context.messages:
380
+ info["message_count"] = len(context.messages)
381
+
382
+ except Exception as e:
383
+ logger.debug("Failed to extract context info: %s", e)
384
+
385
+ return info
386
+
387
+
388
+ def add_business_intelligence_attributes(
389
+ span, model_name: str, response, pricing_info, capture_message_content: bool
390
+ ):
391
+ """
392
+ Optimized business intelligence attributes function.
393
+ """
394
+ try:
395
+ # Extract usage information
396
+ usage_info = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
397
+
398
+ if hasattr(response, "usage"):
399
+ usage_obj = response.usage
400
+ usage_info["input_tokens"] = (
401
+ getattr(usage_obj, "input_tokens", 0)
402
+ or getattr(usage_obj, "request_tokens", 0)
403
+ or getattr(usage_obj, "prompt_tokens", 0)
404
+ or 0
405
+ )
406
+ usage_info["output_tokens"] = (
407
+ getattr(usage_obj, "output_tokens", 0)
408
+ or getattr(usage_obj, "response_tokens", 0)
409
+ or getattr(usage_obj, "completion_tokens", 0)
410
+ or 0
411
+ )
412
+ usage_info["total_tokens"] = (
413
+ usage_info["input_tokens"] + usage_info["output_tokens"]
414
+ )
415
+
416
+ # Set usage attributes
417
+ if usage_info["input_tokens"] > 0:
418
+ span.set_attribute(
419
+ SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, usage_info["input_tokens"]
420
+ )
421
+ if usage_info["output_tokens"] > 0:
422
+ span.set_attribute(
423
+ SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS,
424
+ usage_info["output_tokens"],
425
+ )
426
+ if usage_info["total_tokens"] > 0:
427
+ span.set_attribute(
428
+ SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS, usage_info["total_tokens"]
429
+ )
430
+
431
+ # Calculate cost
432
+ if (
433
+ ENHANCED_HELPERS_AVAILABLE
434
+ and pricing_info
435
+ and usage_info["input_tokens"] > 0
436
+ ):
437
+ try:
438
+ cost = get_chat_model_cost(
439
+ model_name,
440
+ pricing_info,
441
+ usage_info["input_tokens"],
442
+ usage_info["output_tokens"],
443
+ )
444
+ if cost > 0:
445
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
446
+ except Exception as e:
447
+ logger.debug("Failed to calculate cost: %s", e)
448
+
449
+ # Add performance metrics
450
+ if hasattr(response, "duration") and response.duration:
451
+ span.set_attribute(
452
+ SemanticConvention.GEN_AI_CLIENT_OPERATION_DURATION, response.duration
453
+ )
454
+ if usage_info["total_tokens"] > 0:
455
+ tokens_per_second = usage_info["total_tokens"] / response.duration
456
+ span.set_attribute(
457
+ SemanticConvention.GEN_AI_PERFORMANCE_TOKENS_PER_SECOND,
458
+ tokens_per_second,
459
+ )
460
+
461
+ # Enhanced content capture
462
+ if capture_message_content and hasattr(response, "output") and response.output:
463
+ span.set_attribute(
464
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION, str(response.output)
465
+ )
466
+
467
+ except Exception as e:
468
+ logger.debug("Failed to add business intelligence attributes: %s", e)
49
469
 
50
470
 
51
471
  def common_agent_run(
@@ -58,54 +478,135 @@ def common_agent_run(
58
478
  environment,
59
479
  application_name,
60
480
  capture_message_content,
61
- response,
481
+ pricing_info=None,
62
482
  ):
63
483
  """
64
- Handle telemetry for Pydantic AI agent run operations.
484
+ Optimized agent run function using context caching and standardized patterns.
65
485
  """
486
+ # Suppression check
487
+ if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
488
+ return wrapped(*args, **kwargs)
66
489
 
67
- server_address, server_port = instance.model.base_url, 443
68
- agent_name = instance.name or "pydantic_agent"
69
- request_model = str(instance.model.model_name)
70
- span_name = (
71
- f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}"
490
+ # Create cached context
491
+ ctx = PydanticAIInstrumentationContext(
492
+ instance, args, kwargs, version, environment, application_name
72
493
  )
73
494
 
495
+ # Determine span name
496
+ operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_AGENT
497
+ span_name = f"{operation_type} {ctx.agent_name}"
498
+
74
499
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
75
- try:
76
- set_span_attributes(
77
- span,
78
- version,
79
- SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
80
- environment,
81
- application_name,
82
- server_address,
83
- server_port,
84
- request_model,
85
- agent_name,
86
- )
87
- span.set_attribute(
88
- SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
89
- str(instance._system_prompts),
90
- )
91
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
92
-
93
- if capture_message_content:
94
- span.add_event(
95
- name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
96
- attributes={
97
- SemanticConvention.GEN_AI_CONTENT_COMPLETION: response.output,
98
- },
99
- )
500
+ # Set common attributes
501
+ set_span_attributes(
502
+ span=span,
503
+ operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
504
+ ctx=ctx,
505
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_EXECUTE,
506
+ additional_attrs={
507
+ SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
508
+ getattr(instance, "_system_prompts", "")
509
+ ),
510
+ SemanticConvention.GEN_AI_RESPONSE_MODEL: ctx.model_name,
511
+ },
512
+ )
100
513
 
101
- span.set_status(Status(StatusCode.OK))
514
+ # Add message tracking if enabled
515
+ if capture_message_content and ctx.messages:
516
+ add_message_tracking(span, ctx.messages, "input")
102
517
 
103
- return response
518
+ # Add tool tracking if tools exist
519
+ if ctx.tools:
520
+ add_tool_tracking(span, ctx.tools)
104
521
 
105
- except Exception as e:
106
- handle_exception(span, e)
107
- logger.error("Error in trace creation: %s", e)
108
- return response
522
+ # Add model parameters if available
523
+ if ctx.model_params:
524
+ span.set_attribute(
525
+ SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
526
+ json.dumps(ctx.model_params),
527
+ )
528
+
529
+ # Execute with error handling
530
+ response = execute_with_error_handling(
531
+ span, wrapped, args, kwargs, capture_completion=False
532
+ )
533
+
534
+ # Add business intelligence
535
+ add_business_intelligence_attributes(
536
+ span, ctx.model_name, response, pricing_info, capture_message_content
537
+ )
538
+
539
+ return response
540
+
541
+
542
+ async def common_agent_run_async(
543
+ wrapped,
544
+ instance,
545
+ args,
546
+ kwargs,
547
+ tracer,
548
+ version,
549
+ environment,
550
+ application_name,
551
+ capture_message_content,
552
+ pricing_info=None,
553
+ ):
554
+ """
555
+ Optimized async agent run function using context caching and standardized patterns.
556
+ """
557
+ # Suppression check
558
+ if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
559
+ return await wrapped(*args, **kwargs)
560
+
561
+ # Create cached context
562
+ ctx = PydanticAIInstrumentationContext(
563
+ instance, args, kwargs, version, environment, application_name
564
+ )
565
+
566
+ # Determine span name
567
+ operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK
568
+ span_name = f"{operation_type} {ctx.agent_name}"
569
+
570
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
571
+ # Set common attributes
572
+ set_span_attributes(
573
+ span=span,
574
+ operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
575
+ ctx=ctx,
576
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_EXECUTE,
577
+ additional_attrs={
578
+ SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
579
+ getattr(instance, "_system_prompts", "")
580
+ ),
581
+ SemanticConvention.GEN_AI_RESPONSE_MODEL: ctx.model_name,
582
+ },
583
+ )
584
+
585
+ # Add message tracking if enabled
586
+ if capture_message_content and ctx.messages:
587
+ add_message_tracking(span, ctx.messages, "input")
588
+
589
+ # Add tool tracking if tools exist
590
+ if ctx.tools:
591
+ add_tool_tracking(span, ctx.tools)
592
+
593
+ # Add model parameters if available
594
+ if ctx.model_params:
595
+ span.set_attribute(
596
+ SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
597
+ json.dumps(ctx.model_params),
598
+ )
599
+
600
+ # Execute async function
601
+ response = await wrapped(*args, **kwargs)
602
+
603
+ # Add business intelligence
604
+ add_business_intelligence_attributes(
605
+ span, ctx.model_name, response, pricing_info, capture_message_content
606
+ )
607
+
608
+ span.set_status(Status(StatusCode.OK))
609
+ return response
109
610
 
110
611
 
111
612
  def common_agent_create(
@@ -118,43 +619,295 @@ def common_agent_create(
118
619
  environment,
119
620
  application_name,
120
621
  capture_message_content,
121
- response,
122
622
  ):
123
623
  """
124
- Handle telemetry for Pydantic AI agent creation operations.
624
+ Optimized agent creation function using context caching and standardized patterns.
125
625
  """
626
+ # Suppression check
627
+ if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
628
+ return wrapped(*args, **kwargs)
126
629
 
127
- server_address, server_port = "127.0.0.1", 80
630
+ # Create minimal context for agent creation
128
631
  agent_name = kwargs.get("name", "pydantic_agent")
129
- span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {agent_name}"
632
+ request_model = (
633
+ args[0] if args else kwargs.get("model", "google-gla:gemini-1.5-flash")
634
+ )
635
+
636
+ # Create a minimal context object for creation
637
+ class CreateContext:
638
+ """Minimal context for agent creation instrumentation."""
639
+
640
+ def __init__(self):
641
+ self.agent_name = agent_name
642
+ self.model_name = request_model
643
+ self.server_info = ("127.0.0.1", 80)
644
+ self.environment = environment
645
+ self.application_name = application_name
646
+ self.version = version
647
+ self.messages = []
648
+ self.tools = kwargs.get("tools", [])
649
+ self.model_params = {}
650
+
651
+ def get_context_info(self):
652
+ """Get context information for instrumentation."""
653
+ return {
654
+ "agent_name": self.agent_name,
655
+ "model_name": self.model_name,
656
+ "tools_count": len(self.tools),
657
+ }
658
+
659
+ def has_tools(self):
660
+ """Check if agent has tools configured."""
661
+ return len(self.tools) > 0
662
+
663
+ ctx = CreateContext()
664
+
665
+ with tracer.start_as_current_span(
666
+ f"create_agent {agent_name}", kind=SpanKind.CLIENT
667
+ ) as span:
668
+ # Set common attributes
669
+ set_span_attributes(
670
+ span=span,
671
+ operation_name=SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
672
+ ctx=ctx,
673
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_CREATE,
674
+ additional_attrs={
675
+ SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
676
+ kwargs.get("system_prompt", "")
677
+ ),
678
+ SemanticConvention.GEN_AI_RESPONSE_MODEL: request_model,
679
+ },
680
+ )
681
+
682
+ # Add tools if any are provided during creation
683
+ if ctx.tools:
684
+ add_tool_tracking(span, ctx.tools)
685
+
686
+ # Execute with error handling
687
+ return execute_with_error_handling(
688
+ span, wrapped, args, kwargs, capture_completion=False
689
+ )
690
+
691
+
692
+ def common_graph_execution(
693
+ wrapped,
694
+ instance,
695
+ args,
696
+ kwargs,
697
+ tracer,
698
+ version,
699
+ environment,
700
+ application_name,
701
+ capture_message_content,
702
+ ):
703
+ """
704
+ Handle telemetry for Pydantic AI graph execution operations.
705
+ This wraps the Agent.iter() method to track graph execution.
706
+ """
707
+
708
+ # CRITICAL: Suppression check to prevent double instrumentation
709
+ if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
710
+ return wrapped(*args, **kwargs)
711
+
712
+ # Create cached context for agent-based operations
713
+ ctx = PydanticAIInstrumentationContext(
714
+ instance, args, kwargs, version, environment, application_name
715
+ )
716
+
717
+ operation_type = SemanticConvention.GEN_AI_OPERATION_TYPE_GRAPH_EXECUTION
718
+ span_name = f"{operation_type} {ctx.agent_name}"
130
719
 
131
720
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
132
- try:
133
- request_model = args[0] or kwargs.get(
134
- "model", "google-gla:gemini-1.5-flash"
721
+ # Set common attributes
722
+ set_span_attributes(
723
+ span=span,
724
+ operation_name=operation_type,
725
+ ctx=ctx,
726
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_GRAPH_EXECUTION,
727
+ additional_attrs={
728
+ SemanticConvention.GEN_AI_AGENT_DESCRIPTION: str(
729
+ getattr(instance, "_system_prompts", "")
730
+ ),
731
+ },
732
+ )
733
+
734
+ # Add model parameters if available
735
+ if ctx.model_params:
736
+ span.set_attribute(
737
+ SemanticConvention.GEN_AI_REQUEST_PARAMETERS,
738
+ json.dumps(ctx.model_params),
739
+ )
740
+
741
+ # Execute with error handling
742
+ return execute_with_error_handling(
743
+ span, wrapped, args, kwargs, capture_completion=False
744
+ )
745
+
746
+
747
+ def common_internal_node(
748
+ wrapped,
749
+ instance,
750
+ args,
751
+ kwargs,
752
+ tracer,
753
+ version,
754
+ environment,
755
+ application_name,
756
+ capture_message_content,
757
+ operation_type,
758
+ lifecycle_phase,
759
+ node_type="internal",
760
+ ):
761
+ """
762
+ Optimized generic function for internal node instrumentation.
763
+ This consolidates common logic for all internal node types.
764
+ """
765
+ # Suppression check
766
+ if context_api.get_value(context_api._SUPPRESS_INSTRUMENTATION_KEY):
767
+ return wrapped(*args, **kwargs)
768
+
769
+ # Extract context info efficiently
770
+ context_info = extract_context_info(args, kwargs)
771
+
772
+ # Determine span name
773
+ if context_info["model_info"]:
774
+ span_name = f"{operation_type} {context_info['model_info']}"
775
+ elif context_info["agent_name"]:
776
+ span_name = f"{operation_type} {context_info['agent_name']}"
777
+ elif context_info["tool_info"]:
778
+ span_name = f"{operation_type} {context_info['tool_info']}"
779
+ else:
780
+ span_name = f"{operation_type} {node_type}"
781
+
782
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
783
+ # Set basic attributes
784
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_type)
785
+ span.set_attribute(
786
+ SemanticConvention.GEN_AI_SYSTEM,
787
+ SemanticConvention.GEN_AI_SYSTEM_PYDANTIC_AI,
788
+ )
789
+ span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
790
+ span.set_attribute(
791
+ SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE, lifecycle_phase
792
+ )
793
+
794
+ # Set server info
795
+ if operation_type == SemanticConvention.GEN_AI_OPERATION_TYPE_MODEL_REQUEST:
796
+ span.set_attribute(SemanticConvention.SERVER_ADDRESS, "api.openai.com")
797
+ span.set_attribute(SemanticConvention.SERVER_PORT, 443)
798
+ else:
799
+ span.set_attribute(SemanticConvention.SERVER_ADDRESS, "127.0.0.1")
800
+ span.set_attribute(SemanticConvention.SERVER_PORT, 80)
801
+
802
+ # Set extracted context attributes
803
+ if context_info["model_info"]:
804
+ span.set_attribute(
805
+ SemanticConvention.GEN_AI_REQUEST_MODEL, context_info["model_info"]
135
806
  )
136
- set_span_attributes(
137
- span,
138
- version,
139
- SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
140
- environment,
141
- application_name,
142
- server_address,
143
- server_port,
144
- request_model,
145
- agent_name,
807
+ if context_info["agent_name"]:
808
+ span.set_attribute(
809
+ SemanticConvention.GEN_AI_AGENT_NAME, context_info["agent_name"]
146
810
  )
811
+ if context_info["user_input"]:
147
812
  span.set_attribute(
148
- SemanticConvention.GEN_AI_AGENT_DESCRIPTION,
149
- str(kwargs.get("system_prompt", "")),
813
+ SemanticConvention.GEN_AI_CONTENT_PROMPT, context_info["user_input"]
814
+ )
815
+ if context_info["tool_info"]:
816
+ span.set_attribute(
817
+ SemanticConvention.GEN_AI_TOOL_NAME, context_info["tool_info"]
150
818
  )
151
- span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, request_model)
152
819
 
153
- span.set_status(Status(StatusCode.OK))
820
+ # Execute with error handling
821
+ return execute_with_error_handling(
822
+ span, wrapped, args, kwargs, capture_completion=False
823
+ )
154
824
 
155
- return response
156
825
 
157
- except Exception as e:
158
- handle_exception(span, e)
159
- logger.error("Error in trace creation: %s", e)
160
- return response
826
+ def common_user_prompt_processing(
827
+ wrapped,
828
+ instance,
829
+ args,
830
+ kwargs,
831
+ tracer,
832
+ version,
833
+ environment,
834
+ application_name,
835
+ capture_message_content,
836
+ ):
837
+ """
838
+ Optimized user prompt processing function using generic internal node handler.
839
+ """
840
+ return common_internal_node(
841
+ wrapped,
842
+ instance,
843
+ args,
844
+ kwargs,
845
+ tracer,
846
+ version,
847
+ environment,
848
+ application_name,
849
+ capture_message_content,
850
+ operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_USER_PROMPT_PROCESSING,
851
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_USER_PROMPT_PROCESSING,
852
+ node_type="user_input",
853
+ )
854
+
855
+
856
+ def common_model_request_processing(
857
+ wrapped,
858
+ instance,
859
+ args,
860
+ kwargs,
861
+ tracer,
862
+ version,
863
+ environment,
864
+ application_name,
865
+ capture_message_content,
866
+ ):
867
+ """
868
+ Optimized model request processing function using generic internal node handler.
869
+ """
870
+ return common_internal_node(
871
+ wrapped,
872
+ instance,
873
+ args,
874
+ kwargs,
875
+ tracer,
876
+ version,
877
+ environment,
878
+ application_name,
879
+ capture_message_content,
880
+ operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_MODEL_REQUEST,
881
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_MODEL_REQUEST,
882
+ node_type="llm",
883
+ )
884
+
885
+
886
+ def common_tool_calls_processing(
887
+ wrapped,
888
+ instance,
889
+ args,
890
+ kwargs,
891
+ tracer,
892
+ version,
893
+ environment,
894
+ application_name,
895
+ capture_message_content,
896
+ ):
897
+ """
898
+ Optimized tool calls processing function using generic internal node handler.
899
+ """
900
+ return common_internal_node(
901
+ wrapped,
902
+ instance,
903
+ args,
904
+ kwargs,
905
+ tracer,
906
+ version,
907
+ environment,
908
+ application_name,
909
+ capture_message_content,
910
+ operation_type=SemanticConvention.GEN_AI_OPERATION_TYPE_TOOLS,
911
+ lifecycle_phase=SemanticConvention.GEN_AI_AGENT_LIFECYCLE_PHASE_TOOL_EXECUTION,
912
+ node_type="tools",
913
+ )