veadk-python 0.2.16__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. veadk/a2a/remote_ve_agent.py +56 -1
  2. veadk/agent.py +79 -26
  3. veadk/agents/loop_agent.py +22 -9
  4. veadk/agents/parallel_agent.py +21 -9
  5. veadk/agents/sequential_agent.py +18 -9
  6. veadk/auth/veauth/apmplus_veauth.py +32 -39
  7. veadk/auth/veauth/ark_veauth.py +3 -1
  8. veadk/auth/veauth/utils.py +12 -0
  9. veadk/auth/veauth/viking_mem0_veauth.py +91 -0
  10. veadk/cli/cli.py +5 -1
  11. veadk/cli/cli_create.py +62 -1
  12. veadk/cli/cli_deploy.py +36 -1
  13. veadk/cli/cli_eval.py +55 -0
  14. veadk/cli/cli_init.py +44 -3
  15. veadk/cli/cli_kb.py +36 -1
  16. veadk/cli/cli_pipeline.py +66 -1
  17. veadk/cli/cli_prompt.py +16 -1
  18. veadk/cli/cli_uploadevalset.py +15 -1
  19. veadk/cli/cli_web.py +35 -4
  20. veadk/cloud/cloud_agent_engine.py +142 -25
  21. veadk/cloud/cloud_app.py +219 -12
  22. veadk/configs/database_configs.py +4 -0
  23. veadk/configs/model_configs.py +5 -1
  24. veadk/configs/tracing_configs.py +2 -2
  25. veadk/evaluation/adk_evaluator/adk_evaluator.py +77 -17
  26. veadk/evaluation/base_evaluator.py +219 -3
  27. veadk/evaluation/deepeval_evaluator/deepeval_evaluator.py +116 -1
  28. veadk/evaluation/eval_set_file_loader.py +20 -0
  29. veadk/evaluation/eval_set_recorder.py +54 -0
  30. veadk/evaluation/types.py +32 -0
  31. veadk/evaluation/utils/prometheus.py +61 -0
  32. veadk/knowledgebase/backends/base_backend.py +14 -1
  33. veadk/knowledgebase/backends/in_memory_backend.py +10 -1
  34. veadk/knowledgebase/backends/opensearch_backend.py +26 -0
  35. veadk/knowledgebase/backends/redis_backend.py +29 -2
  36. veadk/knowledgebase/backends/vikingdb_knowledge_backend.py +43 -5
  37. veadk/knowledgebase/knowledgebase.py +173 -12
  38. veadk/memory/long_term_memory.py +148 -4
  39. veadk/memory/long_term_memory_backends/mem0_backend.py +11 -0
  40. veadk/memory/short_term_memory.py +119 -5
  41. veadk/runner.py +412 -1
  42. veadk/tools/builtin_tools/llm_shield.py +381 -0
  43. veadk/tools/builtin_tools/mcp_router.py +9 -2
  44. veadk/tools/builtin_tools/run_code.py +25 -5
  45. veadk/tools/builtin_tools/web_search.py +38 -154
  46. veadk/tracing/base_tracer.py +28 -1
  47. veadk/tracing/telemetry/attributes/extractors/common_attributes_extractors.py +105 -1
  48. veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py +260 -0
  49. veadk/tracing/telemetry/attributes/extractors/tool_attributes_extractors.py +69 -0
  50. veadk/tracing/telemetry/attributes/extractors/types.py +78 -0
  51. veadk/tracing/telemetry/exporters/apmplus_exporter.py +157 -0
  52. veadk/tracing/telemetry/exporters/base_exporter.py +8 -0
  53. veadk/tracing/telemetry/exporters/cozeloop_exporter.py +60 -1
  54. veadk/tracing/telemetry/exporters/inmemory_exporter.py +118 -1
  55. veadk/tracing/telemetry/exporters/tls_exporter.py +66 -0
  56. veadk/tracing/telemetry/opentelemetry_tracer.py +111 -1
  57. veadk/tracing/telemetry/telemetry.py +118 -2
  58. veadk/version.py +1 -1
  59. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/METADATA +1 -1
  60. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/RECORD +64 -62
  61. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/WHEEL +0 -0
  62. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/entry_points.txt +0 -0
  63. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/licenses/LICENSE +0 -0
  64. {veadk_python-0.2.16.dist-info → veadk_python-0.2.17.dist-info}/top_level.txt +0 -0
@@ -20,12 +20,39 @@ logger = get_logger(__name__)
20
20
 
21
21
 
22
22
  class BaseTracer(ABC):
23
+ """Abstract base class for implementing tracing functionality in VeADK agents.
24
+
25
+ BaseTracer provides the foundation for collecting, managing, and exporting
26
+ trace data from agent execution sessions. It defines the interface that all
27
+ concrete tracer implementations must follow, enabling pluggable tracing
28
+ backends for different observability platforms.
29
+
30
+ Attributes:
31
+ name: Unique identifier for this tracer instance
32
+ _trace_id: Internal trace identifier for current execution context
33
+ _trace_file_path: Path to the current trace data file
34
+ """
35
+
23
36
  def __init__(self, name: str):
37
+ """Initialize a new BaseTracer instance.
38
+
39
+ Args:
40
+ name: Unique identifier for this tracer instance.
41
+ """
24
42
  self.name = name
25
43
  self._trace_id = "<unknown_trace_id>"
26
44
  self._trace_file_path = "<unknown_trace_file_path>"
27
45
 
28
46
  @abstractmethod
29
47
  def dump(self, user_id: str, session_id: str, path: str = "/tmp") -> str:
30
- """Dump the trace data to a local file."""
48
+ """Dump the collected trace data to a local file.
49
+
50
+ This method must be implemented by concrete tracer classes to export
51
+ trace data in a format suitable for analysis or storage.
52
+
53
+ Args:
54
+ user_id: User identifier for trace organization and file naming
55
+ session_id: Session identifier for filtering and organizing spans
56
+ path: Directory path for the output file. Defaults to system temp directory
57
+ """
31
58
  ...
@@ -16,44 +16,148 @@ from veadk.version import VERSION
16
16
 
17
17
 
18
18
  def common_gen_ai_system(**kwargs) -> str:
19
- """This field will be parsed as `model_provider` in Volcengine CozeLoop platform."""
19
+ """Extract the generative AI system provider name.
20
+
21
+ This field identifies the model provider and will be parsed as `model_provider`
22
+ in Volcengine CozeLoop platform for system categorization and analysis.
23
+
24
+ Args:
25
+ **kwargs: Keyword arguments containing context information.
26
+ Expected to include 'model_provider' key.
27
+
28
+ Returns:
29
+ str: Model provider name or placeholder if not available
30
+ """
20
31
  model_provider = kwargs.get("model_provider")
21
32
  return model_provider or "<unknown_model_provider>"
22
33
 
23
34
 
24
35
  def common_gen_ai_system_version(**kwargs) -> str:
36
+ """Extract the VeADK system version.
37
+
38
+ Provides version information for the VeADK framework being used,
39
+ enabling version-specific analysis and compatibility tracking.
40
+
41
+ Args:
42
+ **kwargs: Keyword arguments (unused in this extractor)
43
+
44
+ Returns:
45
+ str: Current VeADK version string
46
+ """
25
47
  return VERSION
26
48
 
27
49
 
28
50
  def common_gen_ai_app_name(**kwargs) -> str:
51
+ """Extract the application name from context.
52
+
53
+ Provides application-level identification for organizing and
54
+ filtering telemetry data by application or service.
55
+
56
+ Args:
57
+ **kwargs: Keyword arguments containing context information.
58
+ Expected to include 'app_name' key.
59
+
60
+ Returns:
61
+ str: Application name or placeholder if not available
62
+ """
29
63
  app_name = kwargs.get("app_name")
30
64
  return app_name or "<unknown_app_name>"
31
65
 
32
66
 
33
67
  def common_gen_ai_agent_name(**kwargs) -> str:
68
+ """Extract the agent name from context.
69
+
70
+ Provides agent-level identification for organizing telemetry data
71
+ by specific agent instances within an application.
72
+
73
+ Args:
74
+ **kwargs: Keyword arguments containing context information.
75
+ Expected to include 'agent_name' key.
76
+
77
+ Returns:
78
+ str: Agent name or placeholder if not available
79
+ """
34
80
  agent_name = kwargs.get("agent_name")
35
81
  return agent_name or "<unknown_agent_name>"
36
82
 
37
83
 
38
84
  def common_gen_ai_user_id(**kwargs) -> str:
85
+ """Extract the user identifier from context.
86
+
87
+ Provides user-level identification for organizing telemetry data
88
+ by user sessions and enabling user-specific analytics.
89
+
90
+ Args:
91
+ **kwargs: Keyword arguments containing context information.
92
+ Expected to include 'user_id' key.
93
+
94
+ Returns:
95
+ str: User identifier or placeholder if not available
96
+ """
39
97
  user_id = kwargs.get("user_id")
40
98
  return user_id or "<unknown_user_id>"
41
99
 
42
100
 
43
101
  def common_gen_ai_session_id(**kwargs) -> str:
102
+ """Extract the session identifier from context.
103
+
104
+ Provides session-level identification for organizing telemetry data
105
+ by conversation sessions and enabling session-based analysis.
106
+
107
+ Args:
108
+ **kwargs: Keyword arguments containing context information.
109
+ Expected to include 'session_id' key.
110
+
111
+ Returns:
112
+ str: Session identifier or placeholder if not available
113
+ """
44
114
  session_id = kwargs.get("session_id")
45
115
  return session_id or "<unknown_session_id>"
46
116
 
47
117
 
48
118
  def common_cozeloop_report_source(**kwargs) -> str:
119
+ """Extract the CozeLoop report source identifier.
120
+
121
+ Provides a fixed identifier indicating that telemetry data originated
122
+ from the VeADK framework for CozeLoop platform integration.
123
+
124
+ Args:
125
+ **kwargs: Keyword arguments (unused in this extractor)
126
+
127
+ Returns:
128
+ str: Always returns "veadk" as the report source
129
+ """
49
130
  return "veadk"
50
131
 
51
132
 
52
133
  def common_cozeloop_call_type(**kwargs) -> str:
134
+ """Extract the CozeLoop call type from context.
135
+
136
+ Provides call type classification for CozeLoop platform analysis,
137
+ enabling categorization of different operation types.
138
+
139
+ Args:
140
+ **kwargs: Keyword arguments containing context information.
141
+ Expected to include 'call_type' key.
142
+
143
+ Returns:
144
+ str: Call type identifier or None if not available
145
+ """
53
146
  return kwargs.get("call_type")
54
147
 
55
148
 
56
149
  def llm_openinference_instrumentation_veadk(**kwargs) -> str:
150
+ """Extract the OpenInference instrumentation version for VeADK.
151
+
152
+ Provides instrumentation version information following OpenInference
153
+ standards for telemetry framework identification.
154
+
155
+ Args:
156
+ **kwargs: Keyword arguments (unused in this extractor)
157
+
158
+ Returns:
159
+ str: Current VeADK version as instrumentation identifier
160
+ """
57
161
  return VERSION
58
162
 
59
163
 
@@ -22,39 +22,143 @@ from veadk.utils.misc import safe_json_serialize
22
22
 
23
23
 
24
24
  def llm_gen_ai_request_model(params: LLMAttributesParams) -> ExtractorResponse:
25
+ """Extract the requested language model name.
26
+
27
+ Provides the model identifier that was specified in the LLM request
28
+ for tracking model usage patterns and performance analysis.
29
+
30
+ Args:
31
+ params: LLM execution parameters containing request details
32
+
33
+ Returns:
34
+ ExtractorResponse: Response containing the model name or placeholder
35
+ """
25
36
  return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
26
37
 
27
38
 
28
39
  def llm_gen_ai_request_type(params: LLMAttributesParams) -> ExtractorResponse:
40
+ """Extract the LLM request type.
41
+
42
+ Provides the type of LLM operation being performed, typically "chat"
43
+ for conversational interactions with language models.
44
+
45
+ Args:
46
+ params: LLM execution parameters (unused in this extractor)
47
+
48
+ Returns:
49
+ ExtractorResponse: Response containing "chat" as the request type
50
+ """
29
51
  return ExtractorResponse(content="chat" or "<unknown_type>")
30
52
 
31
53
 
32
54
  def llm_gen_ai_response_model(params: LLMAttributesParams) -> ExtractorResponse:
55
+ """Extract the responding language model name.
56
+
57
+ Provides the actual model that generated the response, which should
58
+ match the requested model for verification and tracking purposes.
59
+
60
+ Args:
61
+ params: LLM execution parameters containing request details
62
+
63
+ Returns:
64
+ ExtractorResponse: Response containing the response model name or placeholder
65
+ """
33
66
  return ExtractorResponse(content=params.llm_request.model or "<unknown_model_name>")
34
67
 
35
68
 
36
69
  def llm_gen_ai_request_max_tokens(params: LLMAttributesParams) -> ExtractorResponse:
70
+ """Extract the maximum output tokens configuration.
71
+
72
+ Provides the maximum number of tokens the model is allowed to generate
73
+ in its response, used for cost prediction and output length control.
74
+
75
+ Args:
76
+ params: LLM execution parameters containing request configuration
77
+
78
+ Returns:
79
+ ExtractorResponse: Response containing max output tokens value
80
+ """
37
81
  return ExtractorResponse(content=params.llm_request.config.max_output_tokens)
38
82
 
39
83
 
40
84
  def llm_gen_ai_request_temperature(params: LLMAttributesParams) -> ExtractorResponse:
85
+ """Extract the temperature parameter for response randomness.
86
+
87
+ Provides the temperature setting that controls randomness in model
88
+ responses, affecting creativity and consistency of outputs.
89
+
90
+ Args:
91
+ params: LLM execution parameters containing request configuration
92
+
93
+ Returns:
94
+ ExtractorResponse: Response containing temperature value
95
+ """
41
96
  return ExtractorResponse(content=params.llm_request.config.temperature)
42
97
 
43
98
 
44
99
  def llm_gen_ai_request_top_p(params: LLMAttributesParams) -> ExtractorResponse:
100
+ """Extract the top-p parameter for nucleus sampling.
101
+
102
+ Provides the top-p (nucleus sampling) setting that controls the
103
+ diversity of token sampling in model responses.
104
+
105
+ Args:
106
+ params: LLM execution parameters containing request configuration
107
+
108
+ Returns:
109
+ ExtractorResponse: Response containing top-p value
110
+ """
45
111
  return ExtractorResponse(content=params.llm_request.config.top_p)
46
112
 
47
113
 
48
114
  def llm_gen_ai_response_stop_reason(params: LLMAttributesParams) -> ExtractorResponse:
115
+ """Extract the stop reason for response completion.
116
+
117
+ Provides information about why the model stopped generating tokens,
118
+ which helps identify truncation or completion patterns.
119
+
120
+ Args:
121
+ params: LLM execution parameters (currently not implemented)
122
+
123
+ Returns:
124
+ ExtractorResponse: Response containing placeholder stop reason
125
+ """
49
126
  return ExtractorResponse(content="<no_stop_reason_provided>")
50
127
 
51
128
 
52
129
  def llm_gen_ai_response_finish_reason(params: LLMAttributesParams) -> ExtractorResponse:
130
+ """Extract the finish reason for response completion.
131
+
132
+ Provides information about how the model completed its response,
133
+ such as natural completion, token limit, or stop sequence.
134
+
135
+ Args:
136
+ params: LLM execution parameters (currently not implemented)
137
+
138
+ Returns:
139
+ ExtractorResponse: Response containing placeholder finish reason
140
+
141
+ Note:
142
+ - Currently returns placeholder value
143
+ - TODO: Update implementation for Google ADK v1.12.0
144
+ - Critical for understanding response quality and completeness
145
+ """
53
146
  # TODO: update to google-adk v1.12.0
54
147
  return ExtractorResponse(content="<no_finish_reason_provided>")
55
148
 
56
149
 
57
150
  def llm_gen_ai_usage_input_tokens(params: LLMAttributesParams) -> ExtractorResponse:
151
+ """Extract the number of input tokens consumed.
152
+
153
+ Provides the count of tokens in the prompt and context that were
154
+ processed by the model, essential for cost tracking and analysis.
155
+
156
+ Args:
157
+ params: LLM execution parameters containing response metadata
158
+
159
+ Returns:
160
+ ExtractorResponse: Response containing input token count or None
161
+ """
58
162
  if params.llm_response.usage_metadata:
59
163
  return ExtractorResponse(
60
164
  content=params.llm_response.usage_metadata.prompt_token_count
@@ -63,6 +167,17 @@ def llm_gen_ai_usage_input_tokens(params: LLMAttributesParams) -> ExtractorRespo
63
167
 
64
168
 
65
169
  def llm_gen_ai_usage_output_tokens(params: LLMAttributesParams) -> ExtractorResponse:
170
+ """Extract the number of output tokens generated.
171
+
172
+ Provides the count of tokens generated by the model in its response,
173
+ essential for cost tracking and response length analysis.
174
+
175
+ Args:
176
+ params: LLM execution parameters containing response metadata
177
+
178
+ Returns:
179
+ ExtractorResponse: Response containing output token count or None
180
+ """
66
181
  if params.llm_response.usage_metadata:
67
182
  return ExtractorResponse(
68
183
  content=params.llm_response.usage_metadata.candidates_token_count,
@@ -71,6 +186,17 @@ def llm_gen_ai_usage_output_tokens(params: LLMAttributesParams) -> ExtractorResp
71
186
 
72
187
 
73
188
  def llm_gen_ai_usage_total_tokens(params: LLMAttributesParams) -> ExtractorResponse:
189
+ """Extract the total number of tokens consumed.
190
+
191
+ Provides the total count of tokens (input + output) consumed by
192
+ the model interaction, used for overall cost tracking.
193
+
194
+ Args:
195
+ params: LLM execution parameters containing response metadata
196
+
197
+ Returns:
198
+ ExtractorResponse: Response containing total token count or None
199
+ """
74
200
  if params.llm_response.usage_metadata:
75
201
  return ExtractorResponse(
76
202
  content=params.llm_response.usage_metadata.total_token_count,
@@ -82,6 +208,17 @@ def llm_gen_ai_usage_total_tokens(params: LLMAttributesParams) -> ExtractorRespo
82
208
  def llm_gen_ai_usage_cache_creation_input_tokens(
83
209
  params: LLMAttributesParams,
84
210
  ) -> ExtractorResponse:
211
+ """Extract the number of tokens used for cache creation.
212
+
213
+ Provides the count of tokens used for creating cached content,
214
+ which affects cost calculation in caching-enabled models.
215
+
216
+ Args:
217
+ params: LLM execution parameters containing response metadata
218
+
219
+ Returns:
220
+ ExtractorResponse: Response containing cache creation token count or None
221
+ """
85
222
  if params.llm_response.usage_metadata:
86
223
  return ExtractorResponse(
87
224
  content=params.llm_response.usage_metadata.cached_content_token_count,
@@ -93,6 +230,17 @@ def llm_gen_ai_usage_cache_creation_input_tokens(
93
230
  def llm_gen_ai_usage_cache_read_input_tokens(
94
231
  params: LLMAttributesParams,
95
232
  ) -> ExtractorResponse:
233
+ """Extract the number of tokens used for cache reading.
234
+
235
+ Provides the count of tokens read from cached content,
236
+ which affects cost calculation in caching-enabled models.
237
+
238
+ Args:
239
+ params: LLM execution parameters containing response metadata
240
+
241
+ Returns:
242
+ ExtractorResponse: Response containing cache read token count or None
243
+ """
96
244
  if params.llm_response.usage_metadata:
97
245
  return ExtractorResponse(
98
246
  content=params.llm_response.usage_metadata.cached_content_token_count,
@@ -101,6 +249,18 @@ def llm_gen_ai_usage_cache_read_input_tokens(
101
249
 
102
250
 
103
251
  def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
252
+ """Extract structured prompt data for span attributes.
253
+
254
+ Processes the complete conversation history from the LLM request
255
+ and structures it into indexed prompt messages with role, content,
256
+ and metadata information for analysis and debugging.
257
+
258
+ Args:
259
+ params: LLM execution parameters containing request content
260
+
261
+ Returns:
262
+ ExtractorResponse: Response containing list of structured prompt messages
263
+ """
104
264
  # a part is a message
105
265
  messages: list[dict] = []
106
266
  idx = 0
@@ -159,6 +319,18 @@ def llm_gen_ai_prompt(params: LLMAttributesParams) -> ExtractorResponse:
159
319
 
160
320
 
161
321
  def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
322
+ """Extract structured completion data for span attributes.
323
+
324
+ Processes the model's response content and structures it into
325
+ indexed completion messages with role, content, and tool call
326
+ information for analysis and evaluation.
327
+
328
+ Args:
329
+ params: LLM execution parameters containing response content
330
+
331
+ Returns:
332
+ ExtractorResponse: Response containing list of structured completion messages
333
+ """
162
334
  messages = []
163
335
 
164
336
  content = params.llm_response.content
@@ -193,6 +365,18 @@ def llm_gen_ai_completion(params: LLMAttributesParams) -> ExtractorResponse:
193
365
 
194
366
 
195
367
  def llm_gen_ai_messages(params: LLMAttributesParams) -> ExtractorResponse:
368
+ """Extract complete conversation messages as structured events.
369
+
370
+ Processes the entire conversation context including system instructions,
371
+ user messages, tool messages, and assistant responses into structured
372
+ events for comprehensive conversation flow analysis.
373
+
374
+ Args:
375
+ params: LLM execution parameters containing request content
376
+
377
+ Returns:
378
+ ExtractorResponse: Event list response containing structured conversation events
379
+ """
196
380
  events = []
197
381
 
198
382
  # system message
@@ -317,15 +501,45 @@ def llm_gen_ai_messages(params: LLMAttributesParams) -> ExtractorResponse:
317
501
 
318
502
 
319
503
  def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
504
+ """Extract streaming mode indicator.
505
+
506
+ Indicates whether the LLM request was processed in streaming mode
507
+ for performance analysis and debugging purposes.
508
+
509
+ Args:
510
+ params: LLM execution parameters (currently not implemented)
511
+
512
+ Returns:
513
+ ExtractorResponse: Response containing None (not implemented)
514
+ """
320
515
  # return params.llm_request.stream
321
516
  return ExtractorResponse(content=None)
322
517
 
323
518
 
324
519
  def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
520
+ """Extract the operation name for LLM spans.
521
+
522
+ Provides a standardized operation name for LLM interactions,
523
+ enabling consistent categorization across all model calls.
524
+
525
+ Args:
526
+ params: LLM execution parameters (unused in this extractor)
527
+
528
+ Returns:
529
+ ExtractorResponse: Response containing "chat" as the operation name
530
+ """
325
531
  return ExtractorResponse(content="chat")
326
532
 
327
533
 
328
534
  def llm_gen_ai_span_kind(params: LLMAttributesParams) -> ExtractorResponse:
535
+ """Extract the span kind for LLM spans.
536
+
537
+ Provides span kind classification following OpenTelemetry semantic
538
+ conventions for generative AI LLM operations.
539
+
540
+ Returns:
541
+ ExtractorResponse: Response containing "llm" as the span kind
542
+ """
329
543
  return ExtractorResponse(content="llm")
330
544
 
331
545
 
@@ -452,6 +666,18 @@ def llm_gen_ai_span_kind(params: LLMAttributesParams) -> ExtractorResponse:
452
666
 
453
667
 
454
668
  def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
669
+ """Extract model choice data as span events.
670
+
671
+ Processes the model's response content and creates choice events
672
+ containing response metadata, content, and tool calls for
673
+ detailed response analysis.
674
+
675
+ Args:
676
+ params: LLM execution parameters containing response content
677
+
678
+ Returns:
679
+ ExtractorResponse: Event response containing structured choice data
680
+ """
455
681
  message = {}
456
682
 
457
683
  # parse content to build a message
@@ -510,18 +736,52 @@ def llm_gen_ai_choice(params: LLMAttributesParams) -> ExtractorResponse:
510
736
 
511
737
 
512
738
  def llm_input_value(params: LLMAttributesParams) -> ExtractorResponse:
739
+ """Extract complete LLM request data for debugging.
740
+
741
+ Provides the complete LLM request object in string format
742
+ for detailed debugging and analysis purposes.
743
+
744
+ Args:
745
+ params: LLM execution parameters containing request details
746
+
747
+ Returns:
748
+ ExtractorResponse: Response containing serialized request data
749
+ """
513
750
  return ExtractorResponse(
514
751
  content=str(params.llm_request.model_dump(exclude_none=True))
515
752
  )
516
753
 
517
754
 
518
755
  def llm_output_value(params: LLMAttributesParams) -> ExtractorResponse:
756
+ """Extract complete LLM response data for debugging.
757
+
758
+ Provides the complete LLM response object in string format
759
+ for detailed debugging and analysis purposes.
760
+
761
+ Args:
762
+ params: LLM execution parameters containing response details
763
+
764
+ Returns:
765
+ ExtractorResponse: Response containing serialized response data
766
+ """
519
767
  return ExtractorResponse(
520
768
  content=str(params.llm_response.model_dump(exclude_none=True))
521
769
  )
522
770
 
523
771
 
524
772
  def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorResponse:
773
+ """Extract available functions/tools from the LLM request.
774
+
775
+ Processes the tools dictionary from the LLM request and extracts
776
+ function metadata including names, descriptions, and parameters
777
+ for tool usage analysis and debugging.
778
+
779
+ Args:
780
+ params: LLM execution parameters containing request tools
781
+
782
+ Returns:
783
+ ExtractorResponse: Response containing list of function metadata
784
+ """
525
785
  functions = []
526
786
 
527
787
  for idx, (tool_name, tool_instance) in enumerate(
@@ -20,14 +20,50 @@ from veadk.utils.misc import safe_json_serialize
20
20
 
21
21
 
22
22
  def tool_gen_ai_operation_name(params: ToolAttributesParams) -> ExtractorResponse:
23
+ """Extract the operation name for tool execution spans.
24
+
25
+ Provides a standardized operation name identifier for tool execution
26
+ operations, enabling consistent categorization across all tool invocations.
27
+
28
+ Args:
29
+ params: Tool execution parameters (unused in this extractor)
30
+
31
+ Returns:
32
+ ExtractorResponse: Response containing "execute_tool" as the operation name
33
+ """
23
34
  return ExtractorResponse(content="execute_tool")
24
35
 
25
36
 
26
37
  def tool_gen_ai_span_kind(params: ToolAttributesParams) -> ExtractorResponse:
38
+ """Extract the span kind for tool execution spans.
39
+
40
+ Provides span kind classification following OpenTelemetry semantic
41
+ conventions for generative AI tool operations.
42
+
43
+ Args:
44
+ params: Tool execution parameters (unused in this extractor)
45
+
46
+ Returns:
47
+ ExtractorResponse: Response containing "tool" as the span kind
48
+ """
27
49
  return ExtractorResponse(content="tool")
28
50
 
29
51
 
30
52
  def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
53
+ """Extract tool message event data for span annotation.
54
+
55
+ Creates a structured tool message event containing tool metadata and
56
+ execution parameters in a format suitable for observability platforms
57
+ and debugging workflows.
58
+
59
+ Args:
60
+ params: Tool execution parameters containing tool instance and arguments
61
+
62
+ Returns:
63
+ ExtractorResponse: Event response with tool message data including:
64
+ - role: "tool" for message classification
65
+ - content: JSON serialized tool information
66
+ """
31
67
  tool_input = {
32
68
  "role": "tool",
33
69
  "content": safe_json_serialize(
@@ -42,6 +78,17 @@ def tool_gen_ai_tool_message(params: ToolAttributesParams) -> ExtractorResponse:
42
78
 
43
79
 
44
80
  def tool_gen_ai_tool_input(params: ToolAttributesParams) -> ExtractorResponse:
81
+ """Extract tool input data for span attributes.
82
+
83
+ Captures comprehensive tool input information including tool metadata
84
+ and execution parameters in JSON format for analysis and debugging.
85
+
86
+ Args:
87
+ params: Tool execution parameters containing tool instance and arguments
88
+
89
+ Returns:
90
+ ExtractorResponse: Response containing JSON serialized tool input data
91
+ """
45
92
  tool_input = {
46
93
  "name": params.tool.name,
47
94
  "description": params.tool.description,
@@ -53,10 +100,32 @@ def tool_gen_ai_tool_input(params: ToolAttributesParams) -> ExtractorResponse:
53
100
 
54
101
 
55
102
  def tool_gen_ai_tool_name(params: ToolAttributesParams) -> ExtractorResponse:
103
+ """Extract the tool name for span identification.
104
+
105
+ Provides the tool function name for identification and categorization
106
+ purposes in observability platforms and analysis workflows.
107
+
108
+ Args:
109
+ params: Tool execution parameters containing tool instance
110
+
111
+ Returns:
112
+ ExtractorResponse: Response containing the tool name or placeholder
113
+ """
56
114
  return ExtractorResponse(content=params.tool.name or "<unknown_tool_name>")
57
115
 
58
116
 
59
117
  def tool_gen_ai_tool_output(params: ToolAttributesParams) -> ExtractorResponse:
118
+ """Extract tool output data for span attributes.
119
+
120
+ Captures tool execution results including response data and metadata
121
+ in JSON format for analysis, debugging, and evaluation purposes.
122
+
123
+ Args:
124
+ params: Tool execution parameters containing function response event
125
+
126
+ Returns:
127
+ ExtractorResponse: Response containing JSON serialized tool output data
128
+ """
60
129
  function_response = params.function_response_event.get_function_responses()[
61
130
  0
62
131
  ].model_dump()