kagent-adk 0.6.10__tar.gz → 0.6.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kagent-adk might be problematic. Click here for more details.

Files changed (25) hide show
  1. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/PKG-INFO +1 -1
  2. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/pyproject.toml +1 -1
  3. kagent_adk-0.6.12/src/kagent/adk/converters/error_mappings.py +60 -0
  4. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/converters/event_converter.py +6 -3
  5. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/models/_openai.py +50 -44
  6. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/types.py +11 -4
  7. kagent_adk-0.6.12/tests/unittests/converters/test_event_converter.py +119 -0
  8. kagent_adk-0.6.12/tests/unittests/models/__init__.py +0 -0
  9. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/.gitignore +0 -0
  10. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/.python-version +0 -0
  11. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/README.md +0 -0
  12. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/__init__.py +0 -0
  13. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/_a2a.py +0 -0
  14. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/_agent_executor.py +0 -0
  15. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/_session_service.py +0 -0
  16. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/_token.py +0 -0
  17. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/cli.py +0 -0
  18. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/converters/__init__.py +0 -0
  19. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/converters/part_converter.py +0 -0
  20. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/converters/request_converter.py +0 -0
  21. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/src/kagent/adk/models/__init__.py +0 -0
  22. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/tests/__init__.py +0 -0
  23. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/tests/unittests/__init__.py +0 -0
  24. {kagent_adk-0.6.10/tests/unittests/models → kagent_adk-0.6.12/tests/unittests/converters}/__init__.py +0 -0
  25. {kagent_adk-0.6.10 → kagent_adk-0.6.12}/tests/unittests/models/test_openai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kagent-adk
3
- Version: 0.6.10
3
+ Version: 0.6.12
4
4
  Summary: kagent-adk is an sdk for integrating adk agents with kagent
5
5
  Requires-Python: >=3.12.11
6
6
  Requires-Dist: a2a-sdk>=0.3.1
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "kagent-adk"
7
- version = "0.6.10"
7
+ version = "0.6.12"
8
8
  description = "kagent-adk is an sdk for integrating adk agents with kagent"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.12.11"
@@ -0,0 +1,60 @@
1
+ """Error code to user-friendly message mappings for ADK events.
2
+
3
+ This module provides mappings from Google GenAI finish reasons to user-friendly
4
+ error messages, excluding STOP which is a normal completion reason.
5
+ """
6
+
7
+ from typing import Dict, Optional
8
+
9
+ from google.genai import types as genai_types
10
+
11
+ # Error code to user-friendly message mappings
12
+ # Based on Google GenAI types.py FinishReason enum (excluding STOP)
13
+ ERROR_CODE_MESSAGES: Dict[str, str] = {
14
+ # Length and token limits
15
+ genai_types.FinishReason.MAX_TOKENS: "Response was truncated due to maximum token limit. Try asking a shorter question or breaking it into parts.",
16
+ # Safety and content filtering
17
+ genai_types.FinishReason.SAFETY: "Response was blocked due to safety concerns. Please rephrase your request to avoid potentially harmful content.",
18
+ genai_types.FinishReason.RECITATION: "Response was blocked due to unauthorized citations. Please rephrase your request.",
19
+ genai_types.FinishReason.BLOCKLIST: "Response was blocked due to restricted terminology. Please rephrase your request using different words.",
20
+ genai_types.FinishReason.PROHIBITED_CONTENT: "Response was blocked due to prohibited content. Please rephrase your request.",
21
+ genai_types.FinishReason.SPII: "Response was blocked due to sensitive personal information concerns. Please avoid including personal details.",
22
+ # Function calling errors
23
+ genai_types.FinishReason.MALFORMED_FUNCTION_CALL: "The agent generated an invalid function call. This may be due to complex input data. Try rephrasing your request or breaking it into simpler steps.",
24
+ # Generic fallback
25
+ genai_types.FinishReason.OTHER: "An unexpected error occurred during processing. Please try again or rephrase your request.",
26
+ }
27
+
28
+ # Normal completion reasons that should not be treated as errors
29
+ NORMAL_COMPLETION_REASONS = {
30
+ genai_types.FinishReason.STOP, # Normal completion
31
+ }
32
+
33
+ # Default error message when no specific mapping exists
34
+ DEFAULT_ERROR_MESSAGE = "An error occurred during processing"
35
+
36
+
37
+ def _get_error_message(error_code: Optional[str]) -> str:
38
+ """Get a user-friendly error message for the given error code.
39
+
40
+ Args:
41
+ error_code: The error code from the ADK event (e.g., finish_reason)
42
+
43
+ Returns:
44
+ User-friendly error message string
45
+ """
46
+
47
+ # Return mapped message or default
48
+ return ERROR_CODE_MESSAGES.get(error_code, DEFAULT_ERROR_MESSAGE)
49
+
50
+
51
+ def _is_normal_completion(error_code: Optional[str]) -> bool:
52
+ """Check if the error code represents normal completion rather than an error.
53
+
54
+ Args:
55
+ error_code: The error code to check
56
+
57
+ Returns:
58
+ True if this is a normal completion reason, False otherwise
59
+ """
60
+ return error_code in NORMAL_COMPLETION_REASONS
@@ -20,6 +20,7 @@ from kagent.core.a2a import (
20
20
  get_kagent_metadata_key,
21
21
  )
22
22
 
23
+ from .error_mappings import _get_error_message, _is_normal_completion
23
24
  from .part_converter import (
24
25
  convert_genai_part_to_a2a_part,
25
26
  )
@@ -27,7 +28,6 @@ from .part_converter import (
27
28
  # Constants
28
29
 
29
30
  ARTIFACT_ID_SEPARATOR = "-"
30
- DEFAULT_ERROR_MESSAGE = "An error occurred during processing"
31
31
 
32
32
  # Logger
33
33
  logger = logging.getLogger("kagent_adk." + __name__)
@@ -191,13 +191,16 @@ def _create_error_status_event(
191
191
  Returns:
192
192
  A TaskStatusUpdateEvent with FAILED state.
193
193
  """
194
- error_message = getattr(event, "error_message", None) or DEFAULT_ERROR_MESSAGE
194
+ error_message = getattr(event, "error_message", None)
195
195
 
196
196
  # Get context metadata and add error code
197
197
  event_metadata = _get_context_metadata(event, invocation_context)
198
198
  if event.error_code:
199
199
  event_metadata[get_kagent_metadata_key("error_code")] = str(event.error_code)
200
200
 
201
+ if not error_message:
202
+ error_message = _get_error_message(event.error_code)
203
+
201
204
  return TaskStatusUpdateEvent(
202
205
  task_id=task_id,
203
206
  context_id=context_id,
@@ -298,7 +301,7 @@ def convert_event_to_a2a_events(
298
301
 
299
302
  try:
300
303
  # Handle error scenarios
301
- if event.error_code:
304
+ if event.error_code and not _is_normal_completion(event.error_code):
302
305
  error_event = _create_error_status_event(event, invocation_context, task_id, context_id)
303
306
  a2a_events.append(error_event)
304
307
 
@@ -9,6 +9,7 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Iterable, Literal, Option
9
9
  from google.adk.models import BaseLlm
10
10
  from google.adk.models.llm_response import LlmResponse
11
11
  from google.genai import types
12
+ from google.genai.types import FunctionCall, FunctionResponse
12
13
  from openai import AsyncAzureOpenAI, AsyncOpenAI
13
14
  from openai.types.chat import (
14
15
  ChatCompletion,
@@ -27,7 +28,7 @@ from openai.types.chat.chat_completion_message_tool_call_param import (
27
28
  from openai.types.chat.chat_completion_message_tool_call_param import (
28
29
  Function as ToolCallFunction,
29
30
  )
30
- from openai.types.shared_params import FunctionDefinition
31
+ from openai.types.shared_params import FunctionDefinition, FunctionParameters
31
32
  from pydantic import Field
32
33
 
33
34
  if TYPE_CHECKING:
@@ -56,7 +57,7 @@ def _convert_content_to_openai_messages(
56
57
  messages.append(system_message)
57
58
 
58
59
  # First pass: collect all function responses to match with tool calls
59
- all_function_responses = {}
60
+ all_function_responses: dict[str, FunctionResponse] = {}
60
61
  for content in contents:
61
62
  for part in content.parts or []:
62
63
  if part.function_response:
@@ -67,18 +68,18 @@ def _convert_content_to_openai_messages(
67
68
  role = _convert_role_to_openai(content.role)
68
69
 
69
70
  # Separate different types of parts
70
- text_parts = []
71
- function_calls = []
72
- function_responses = []
71
+ text_parts: list[str] = []
72
+ function_calls: list[FunctionCall] = []
73
+ function_responses: list[FunctionResponse] = []
73
74
  image_parts = []
74
75
 
75
76
  for part in content.parts or []:
76
77
  if part.text:
77
78
  text_parts.append(part.text)
78
79
  elif part.function_call:
79
- function_calls.append(part)
80
+ function_calls.append(part.function_call)
80
81
  elif part.function_response:
81
- function_responses.append(part)
82
+ function_responses.append(part.function_response)
82
83
  elif part.inline_data and part.inline_data.mime_type and part.inline_data.mime_type.startswith("image"):
83
84
  if part.inline_data.data:
84
85
  image_data = base64.b64encode(part.inline_data.data).decode()
@@ -98,43 +99,43 @@ def _convert_content_to_openai_messages(
98
99
 
99
100
  for func_call in function_calls:
100
101
  tool_call_function: ToolCallFunction = {
101
- "name": func_call.function_call.name or "",
102
- "arguments": str(func_call.function_call.args) if func_call.function_call.args else "{}",
103
- }
104
- tool_call_id = func_call.function_call.id or "call_1"
105
- tool_call: ChatCompletionMessageToolCallParam = {
106
- "id": tool_call_id,
107
- "type": "function",
108
- "function": tool_call_function,
102
+ "name": func_call.name or "",
103
+ "arguments": json.dumps(func_call.args) if func_call.args else "{}",
109
104
  }
105
+ tool_call_id = func_call.id or "call_1"
106
+ tool_call = ChatCompletionMessageToolCallParam(
107
+ id=tool_call_id,
108
+ type="function",
109
+ function=tool_call_function,
110
+ )
110
111
  tool_calls.append(tool_call)
111
112
 
112
113
  # Check if we have a response for this tool call
113
114
  if tool_call_id in all_function_responses:
114
115
  func_response = all_function_responses[tool_call_id]
115
- tool_message: ChatCompletionToolMessageParam = {
116
- "role": "tool",
117
- "tool_call_id": tool_call_id,
118
- "content": str(func_response.response.get("result", "")) if func_response.response else "",
119
- }
116
+ tool_message = ChatCompletionToolMessageParam(
117
+ role="tool",
118
+ tool_call_id=tool_call_id,
119
+ content=str(func_response.response.get("result", "")) if func_response.response else "",
120
+ )
120
121
  tool_response_messages.append(tool_message)
121
122
  else:
122
123
  # If no response is available, create a placeholder response
123
124
  # This prevents the OpenAI API error
124
- tool_message: ChatCompletionToolMessageParam = {
125
- "role": "tool",
126
- "tool_call_id": tool_call_id,
127
- "content": "No response available for this function call.",
128
- }
125
+ tool_message = ChatCompletionToolMessageParam(
126
+ role="tool",
127
+ tool_call_id=tool_call_id,
128
+ content="No response available for this function call.",
129
+ )
129
130
  tool_response_messages.append(tool_message)
130
131
 
131
132
  # Create assistant message with tool calls
132
133
  text_content = "\n".join(text_parts) if text_parts else None
133
- assistant_message: ChatCompletionAssistantMessageParam = {
134
- "role": "assistant",
135
- "content": text_content,
136
- "tool_calls": tool_calls,
137
- }
134
+ assistant_message = ChatCompletionAssistantMessageParam(
135
+ role="assistant",
136
+ content=text_content,
137
+ tool_calls=tool_calls,
138
+ )
138
139
  messages.append(assistant_message)
139
140
 
140
141
  # Add all tool response messages immediately after the assistant message
@@ -145,22 +146,22 @@ def _convert_content_to_openai_messages(
145
146
  if role == "user":
146
147
  if image_parts and text_parts:
147
148
  # Multi-modal content
148
- text_part: ChatCompletionContentPartTextParam = {"type": "text", "text": "\n".join(text_parts)}
149
+ text_part = ChatCompletionContentPartTextParam(type="text", text="\n".join(text_parts))
149
150
  content_parts = [text_part] + image_parts
150
- user_message: ChatCompletionUserMessageParam = {"role": "user", "content": content_parts}
151
+ user_message = ChatCompletionUserMessageParam(role="user", content=content_parts)
151
152
  elif image_parts:
152
153
  # Image only
153
- user_message: ChatCompletionUserMessageParam = {"role": "user", "content": image_parts}
154
+ user_message = ChatCompletionUserMessageParam(role="user", content=image_parts)
154
155
  else:
155
156
  # Text only
156
- user_message: ChatCompletionUserMessageParam = {"role": "user", "content": "\n".join(text_parts)}
157
+ user_message = ChatCompletionUserMessageParam(role="user", content="\n".join(text_parts))
157
158
  messages.append(user_message)
158
159
  elif role == "assistant":
159
160
  # Assistant messages with text (no tool calls)
160
- assistant_message: ChatCompletionAssistantMessageParam = {
161
- "role": "assistant",
162
- "content": "\n".join(text_parts),
163
- }
161
+ assistant_message = ChatCompletionAssistantMessageParam(
162
+ role="assistant",
163
+ content="\n".join(text_parts),
164
+ )
164
165
  messages.append(assistant_message)
165
166
 
166
167
  return messages
@@ -197,10 +198,10 @@ def _convert_tools_to_openai(tools: list[types.Tool]) -> list[ChatCompletionTool
197
198
  if tool.function_declarations:
198
199
  for func_decl in tool.function_declarations:
199
200
  # Build function definition
200
- function_def: FunctionDefinition = {
201
- "name": func_decl.name or "",
202
- "description": func_decl.description or "",
203
- }
201
+ function_def = FunctionDefinition(
202
+ name=func_decl.name or "",
203
+ description=func_decl.description or "",
204
+ )
204
205
 
205
206
  # Always include parameters field, even if empty
206
207
  properties = {}
@@ -219,7 +220,7 @@ def _convert_tools_to_openai(tools: list[types.Tool]) -> list[ChatCompletionTool
219
220
  function_def["parameters"] = {"type": "object", "properties": properties, "required": required}
220
221
 
221
222
  # Create the tool param
222
- openai_tool: ChatCompletionToolParam = {"type": "function", "function": function_def}
223
+ openai_tool = ChatCompletionToolParam(type="function", function=function_def)
223
224
  openai_tools.append(openai_tool)
224
225
 
225
226
  return openai_tools
@@ -389,6 +390,7 @@ class AzureOpenAI(BaseOpenAI):
389
390
  api_version: Optional[str] = None
390
391
  azure_endpoint: Optional[str] = None
391
392
  azure_deployment: Optional[str] = None
393
+ headers: Optional[dict[str, str]] = None
392
394
 
393
395
  @cached_property
394
396
  def _client(self) -> AsyncAzureOpenAI:
@@ -407,4 +409,8 @@ class AzureOpenAI(BaseOpenAI):
407
409
  "API key must be provided either via api_key parameter or AZURE_OPENAI_API_KEY environment variable"
408
410
  )
409
411
 
410
- return AsyncAzureOpenAI(api_version=api_version, azure_endpoint=azure_endpoint, api_key=api_key)
412
+ default_headers = self.headers or {}
413
+
414
+ return AsyncAzureOpenAI(
415
+ api_version=api_version, azure_endpoint=azure_endpoint, api_key=api_key, default_headers=default_headers
416
+ )
@@ -36,6 +36,7 @@ class RemoteAgentConfig(BaseModel):
36
36
 
37
37
  class BaseLLM(BaseModel):
38
38
  model: str
39
+ headers: dict[str, str] | None = None
39
40
 
40
41
 
41
42
  class OpenAI(BaseLLM):
@@ -97,20 +98,26 @@ class AgentConfig(BaseModel):
97
98
  agent_card=f"{remote_agent.url}/{AGENT_CARD_WELL_KNOWN_PATH}",
98
99
  description=remote_agent.description,
99
100
  )
100
- mcp_toolsets.append(AgentTool(agent=remote_agent, skip_summarization=True))
101
+ mcp_toolsets.append(
102
+ AgentTool(agent=remote_agent, skip_summarization=True)
103
+ ) # Get headers from model config
104
+
105
+ extra_headers = self.model.headers or {}
101
106
 
102
107
  if self.model.type == "openai":
103
108
  model = OpenAINative(model=self.model.model, base_url=self.model.base_url, type="openai")
104
109
  elif self.model.type == "anthropic":
105
- model = LiteLlm(model=f"anthropic/{self.model.model}", base_url=self.model.base_url)
110
+ model = LiteLlm(
111
+ model=f"anthropic/{self.model.model}", base_url=self.model.base_url, extra_headers=extra_headers
112
+ )
106
113
  elif self.model.type == "gemini_vertex_ai":
107
114
  model = GeminiLLM(model=self.model.model)
108
115
  elif self.model.type == "gemini_anthropic":
109
116
  model = ClaudeLLM(model=self.model.model)
110
117
  elif self.model.type == "ollama":
111
- model = LiteLlm(model=f"ollama_chat/{self.model.model}")
118
+ model = LiteLlm(model=f"ollama_chat/{self.model.model}", extra_headers=extra_headers)
112
119
  elif self.model.type == "azure_openai":
113
- model = OpenAIAzure(model=self.model.model, type="azure_openai")
120
+ model = OpenAIAzure(model=self.model.model, type="azure_openai", headers=extra_headers)
114
121
  elif self.model.type == "gemini":
115
122
  model = self.model.model
116
123
  else:
@@ -0,0 +1,119 @@
1
+ from unittest.mock import Mock
2
+
3
+ import pytest
4
+ from a2a.types import TaskState, TaskStatusUpdateEvent
5
+ from google.genai import types as genai_types
6
+
7
+ from kagent.adk.converters.event_converter import convert_event_to_a2a_events
8
+ from kagent.core.a2a import get_kagent_metadata_key
9
+
10
+
11
+ def _create_mock_invocation_context():
12
+ """Create a mock invocation context for testing."""
13
+ context = Mock()
14
+ context.app_name = "test_app"
15
+ context.user_id = "test_user"
16
+ context.session.id = "test_session"
17
+ return context
18
+
19
+
20
+ def _create_mock_event(error_code=None, content=None, invocation_id="test_invocation", author="test_author"):
21
+ """Create a mock event for testing."""
22
+ event = Mock()
23
+ event.error_code = error_code
24
+ event.content = content
25
+ event.invocation_id = invocation_id
26
+ event.author = author
27
+ event.branch = None
28
+ event.grounding_metadata = None
29
+ event.custom_metadata = None
30
+ event.usage_metadata = None
31
+ event.error_message = None
32
+ return event
33
+
34
+
35
+ class TestEventConverter:
36
+ """Test cases for event converter functions."""
37
+
38
+ def test_convert_event_to_a2a_events(self):
39
+ """Test that STOP error codes with empty content don't create any events, while actual error codes create error events."""
40
+
41
+ invocation_context = _create_mock_invocation_context()
42
+
43
+ # Test case 1: Empty content with STOP error code
44
+ event1 = _create_mock_event(
45
+ error_code=genai_types.FinishReason.STOP, content=None, invocation_id="test_invocation_1"
46
+ )
47
+ result1 = convert_event_to_a2a_events(
48
+ event1, invocation_context, task_id="test_task_1", context_id="test_context_1"
49
+ )
50
+ error_events1 = [
51
+ e for e in result1 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
52
+ ]
53
+ working_events1 = [
54
+ e for e in result1 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
55
+ ]
56
+ assert len(error_events1) == 0, (
57
+ f"Expected no error events for STOP with empty content, got {len(error_events1)}"
58
+ )
59
+ assert len(working_events1) == 0, (
60
+ f"Expected no working events for STOP with empty content (no content to convert), got {len(working_events1)}"
61
+ )
62
+
63
+ # Test case 2: Empty parts with STOP error code
64
+ content_mock = Mock()
65
+ content_mock.parts = []
66
+ event2 = _create_mock_event(
67
+ error_code=genai_types.FinishReason.STOP, content=content_mock, invocation_id="test_invocation_2"
68
+ )
69
+ result2 = convert_event_to_a2a_events(
70
+ event2, invocation_context, task_id="test_task_2", context_id="test_context_2"
71
+ )
72
+ error_events2 = [
73
+ e for e in result2 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
74
+ ]
75
+ working_events2 = [
76
+ e for e in result2 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
77
+ ]
78
+ assert len(error_events2) == 0, f"Expected no error events for STOP with empty parts, got {len(error_events2)}"
79
+ assert len(working_events2) == 0, (
80
+ f"Expected no working events for STOP with empty parts (no content to convert), got {len(working_events2)}"
81
+ )
82
+
83
+ # Test case 3: Missing content with STOP error code
84
+ event3 = _create_mock_event(
85
+ error_code=genai_types.FinishReason.STOP, content=None, invocation_id="test_invocation_3"
86
+ )
87
+ result3 = convert_event_to_a2a_events(
88
+ event3, invocation_context, task_id="test_task_3", context_id="test_context_3"
89
+ )
90
+ error_events3 = [
91
+ e for e in result3 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
92
+ ]
93
+ working_events3 = [
94
+ e for e in result3 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
95
+ ]
96
+ assert len(error_events3) == 0, (
97
+ f"Expected no error events for STOP with missing content, got {len(error_events3)}"
98
+ )
99
+ assert len(working_events3) == 0, (
100
+ f"Expected no working events for STOP with missing content (no content to convert), got {len(working_events3)}"
101
+ )
102
+
103
+ # Test case 4: Actual error code should create error event
104
+ event4 = _create_mock_event(
105
+ error_code=genai_types.FinishReason.MALFORMED_FUNCTION_CALL, content=None, invocation_id="test_invocation_4"
106
+ )
107
+ result4 = convert_event_to_a2a_events(
108
+ event4, invocation_context, task_id="test_task_4", context_id="test_context_4"
109
+ )
110
+ error_events4 = [
111
+ e for e in result4 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
112
+ ]
113
+ assert len(error_events4) == 1, f"Expected 1 error event for MALFORMED_FUNCTION_CALL, got {len(error_events4)}"
114
+
115
+ # Check that the error event has the correct error code in metadata
116
+ error_event = error_events4[0]
117
+ error_code_key = get_kagent_metadata_key("error_code")
118
+ assert error_code_key in error_event.metadata
119
+ assert error_event.metadata[error_code_key] == str(genai_types.FinishReason.MALFORMED_FUNCTION_CALL)
File without changes
File without changes
File without changes
File without changes