kagent-adk 0.6.11__tar.gz → 0.6.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kagent-adk might be problematic. Click here for more details.
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/PKG-INFO +1 -1
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/pyproject.toml +1 -1
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/_session_service.py +0 -4
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/models/_openai.py +50 -44
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/types.py +33 -13
- kagent_adk-0.6.14/tests/unittests/converters/test_event_converter.py +119 -0
- kagent_adk-0.6.14/tests/unittests/models/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/.gitignore +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/.python-version +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/README.md +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/_a2a.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/_agent_executor.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/_token.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/cli.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/converters/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/converters/error_mappings.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/converters/event_converter.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/converters/part_converter.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/converters/request_converter.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/src/kagent/adk/models/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/tests/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/tests/unittests/__init__.py +0 -0
- {kagent_adk-0.6.11/tests/unittests/models → kagent_adk-0.6.14/tests/unittests/converters}/__init__.py +0 -0
- {kagent_adk-0.6.11 → kagent_adk-0.6.14}/tests/unittests/models/test_openai.py +0 -0
|
@@ -24,10 +24,6 @@ class KAgentSessionService(BaseSessionService):
|
|
|
24
24
|
super().__init__()
|
|
25
25
|
self.client = client
|
|
26
26
|
|
|
27
|
-
async def _get_user_id(self) -> str:
|
|
28
|
-
"""Get the default user ID. Override this method to implement custom user ID logic."""
|
|
29
|
-
return "admin@kagent.dev"
|
|
30
|
-
|
|
31
27
|
@override
|
|
32
28
|
async def create_session(
|
|
33
29
|
self,
|
|
@@ -9,6 +9,7 @@ from typing import TYPE_CHECKING, Any, AsyncGenerator, Iterable, Literal, Option
|
|
|
9
9
|
from google.adk.models import BaseLlm
|
|
10
10
|
from google.adk.models.llm_response import LlmResponse
|
|
11
11
|
from google.genai import types
|
|
12
|
+
from google.genai.types import FunctionCall, FunctionResponse
|
|
12
13
|
from openai import AsyncAzureOpenAI, AsyncOpenAI
|
|
13
14
|
from openai.types.chat import (
|
|
14
15
|
ChatCompletion,
|
|
@@ -27,7 +28,7 @@ from openai.types.chat.chat_completion_message_tool_call_param import (
|
|
|
27
28
|
from openai.types.chat.chat_completion_message_tool_call_param import (
|
|
28
29
|
Function as ToolCallFunction,
|
|
29
30
|
)
|
|
30
|
-
from openai.types.shared_params import FunctionDefinition
|
|
31
|
+
from openai.types.shared_params import FunctionDefinition, FunctionParameters
|
|
31
32
|
from pydantic import Field
|
|
32
33
|
|
|
33
34
|
if TYPE_CHECKING:
|
|
@@ -56,7 +57,7 @@ def _convert_content_to_openai_messages(
|
|
|
56
57
|
messages.append(system_message)
|
|
57
58
|
|
|
58
59
|
# First pass: collect all function responses to match with tool calls
|
|
59
|
-
all_function_responses = {}
|
|
60
|
+
all_function_responses: dict[str, FunctionResponse] = {}
|
|
60
61
|
for content in contents:
|
|
61
62
|
for part in content.parts or []:
|
|
62
63
|
if part.function_response:
|
|
@@ -67,18 +68,18 @@ def _convert_content_to_openai_messages(
|
|
|
67
68
|
role = _convert_role_to_openai(content.role)
|
|
68
69
|
|
|
69
70
|
# Separate different types of parts
|
|
70
|
-
text_parts = []
|
|
71
|
-
function_calls = []
|
|
72
|
-
function_responses = []
|
|
71
|
+
text_parts: list[str] = []
|
|
72
|
+
function_calls: list[FunctionCall] = []
|
|
73
|
+
function_responses: list[FunctionResponse] = []
|
|
73
74
|
image_parts = []
|
|
74
75
|
|
|
75
76
|
for part in content.parts or []:
|
|
76
77
|
if part.text:
|
|
77
78
|
text_parts.append(part.text)
|
|
78
79
|
elif part.function_call:
|
|
79
|
-
function_calls.append(part)
|
|
80
|
+
function_calls.append(part.function_call)
|
|
80
81
|
elif part.function_response:
|
|
81
|
-
function_responses.append(part)
|
|
82
|
+
function_responses.append(part.function_response)
|
|
82
83
|
elif part.inline_data and part.inline_data.mime_type and part.inline_data.mime_type.startswith("image"):
|
|
83
84
|
if part.inline_data.data:
|
|
84
85
|
image_data = base64.b64encode(part.inline_data.data).decode()
|
|
@@ -98,43 +99,43 @@ def _convert_content_to_openai_messages(
|
|
|
98
99
|
|
|
99
100
|
for func_call in function_calls:
|
|
100
101
|
tool_call_function: ToolCallFunction = {
|
|
101
|
-
"name": func_call.
|
|
102
|
-
"arguments":
|
|
103
|
-
}
|
|
104
|
-
tool_call_id = func_call.function_call.id or "call_1"
|
|
105
|
-
tool_call: ChatCompletionMessageToolCallParam = {
|
|
106
|
-
"id": tool_call_id,
|
|
107
|
-
"type": "function",
|
|
108
|
-
"function": tool_call_function,
|
|
102
|
+
"name": func_call.name or "",
|
|
103
|
+
"arguments": json.dumps(func_call.args) if func_call.args else "{}",
|
|
109
104
|
}
|
|
105
|
+
tool_call_id = func_call.id or "call_1"
|
|
106
|
+
tool_call = ChatCompletionMessageToolCallParam(
|
|
107
|
+
id=tool_call_id,
|
|
108
|
+
type="function",
|
|
109
|
+
function=tool_call_function,
|
|
110
|
+
)
|
|
110
111
|
tool_calls.append(tool_call)
|
|
111
112
|
|
|
112
113
|
# Check if we have a response for this tool call
|
|
113
114
|
if tool_call_id in all_function_responses:
|
|
114
115
|
func_response = all_function_responses[tool_call_id]
|
|
115
|
-
tool_message
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
116
|
+
tool_message = ChatCompletionToolMessageParam(
|
|
117
|
+
role="tool",
|
|
118
|
+
tool_call_id=tool_call_id,
|
|
119
|
+
content=str(func_response.response.get("result", "")) if func_response.response else "",
|
|
120
|
+
)
|
|
120
121
|
tool_response_messages.append(tool_message)
|
|
121
122
|
else:
|
|
122
123
|
# If no response is available, create a placeholder response
|
|
123
124
|
# This prevents the OpenAI API error
|
|
124
|
-
tool_message
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
125
|
+
tool_message = ChatCompletionToolMessageParam(
|
|
126
|
+
role="tool",
|
|
127
|
+
tool_call_id=tool_call_id,
|
|
128
|
+
content="No response available for this function call.",
|
|
129
|
+
)
|
|
129
130
|
tool_response_messages.append(tool_message)
|
|
130
131
|
|
|
131
132
|
# Create assistant message with tool calls
|
|
132
133
|
text_content = "\n".join(text_parts) if text_parts else None
|
|
133
|
-
assistant_message
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
134
|
+
assistant_message = ChatCompletionAssistantMessageParam(
|
|
135
|
+
role="assistant",
|
|
136
|
+
content=text_content,
|
|
137
|
+
tool_calls=tool_calls,
|
|
138
|
+
)
|
|
138
139
|
messages.append(assistant_message)
|
|
139
140
|
|
|
140
141
|
# Add all tool response messages immediately after the assistant message
|
|
@@ -145,22 +146,22 @@ def _convert_content_to_openai_messages(
|
|
|
145
146
|
if role == "user":
|
|
146
147
|
if image_parts and text_parts:
|
|
147
148
|
# Multi-modal content
|
|
148
|
-
text_part
|
|
149
|
+
text_part = ChatCompletionContentPartTextParam(type="text", text="\n".join(text_parts))
|
|
149
150
|
content_parts = [text_part] + image_parts
|
|
150
|
-
user_message
|
|
151
|
+
user_message = ChatCompletionUserMessageParam(role="user", content=content_parts)
|
|
151
152
|
elif image_parts:
|
|
152
153
|
# Image only
|
|
153
|
-
user_message
|
|
154
|
+
user_message = ChatCompletionUserMessageParam(role="user", content=image_parts)
|
|
154
155
|
else:
|
|
155
156
|
# Text only
|
|
156
|
-
user_message
|
|
157
|
+
user_message = ChatCompletionUserMessageParam(role="user", content="\n".join(text_parts))
|
|
157
158
|
messages.append(user_message)
|
|
158
159
|
elif role == "assistant":
|
|
159
160
|
# Assistant messages with text (no tool calls)
|
|
160
|
-
assistant_message
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
161
|
+
assistant_message = ChatCompletionAssistantMessageParam(
|
|
162
|
+
role="assistant",
|
|
163
|
+
content="\n".join(text_parts),
|
|
164
|
+
)
|
|
164
165
|
messages.append(assistant_message)
|
|
165
166
|
|
|
166
167
|
return messages
|
|
@@ -197,10 +198,10 @@ def _convert_tools_to_openai(tools: list[types.Tool]) -> list[ChatCompletionTool
|
|
|
197
198
|
if tool.function_declarations:
|
|
198
199
|
for func_decl in tool.function_declarations:
|
|
199
200
|
# Build function definition
|
|
200
|
-
function_def
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
201
|
+
function_def = FunctionDefinition(
|
|
202
|
+
name=func_decl.name or "",
|
|
203
|
+
description=func_decl.description or "",
|
|
204
|
+
)
|
|
204
205
|
|
|
205
206
|
# Always include parameters field, even if empty
|
|
206
207
|
properties = {}
|
|
@@ -219,7 +220,7 @@ def _convert_tools_to_openai(tools: list[types.Tool]) -> list[ChatCompletionTool
|
|
|
219
220
|
function_def["parameters"] = {"type": "object", "properties": properties, "required": required}
|
|
220
221
|
|
|
221
222
|
# Create the tool param
|
|
222
|
-
openai_tool
|
|
223
|
+
openai_tool = ChatCompletionToolParam(type="function", function=function_def)
|
|
223
224
|
openai_tools.append(openai_tool)
|
|
224
225
|
|
|
225
226
|
return openai_tools
|
|
@@ -389,6 +390,7 @@ class AzureOpenAI(BaseOpenAI):
|
|
|
389
390
|
api_version: Optional[str] = None
|
|
390
391
|
azure_endpoint: Optional[str] = None
|
|
391
392
|
azure_deployment: Optional[str] = None
|
|
393
|
+
headers: Optional[dict[str, str]] = None
|
|
392
394
|
|
|
393
395
|
@cached_property
|
|
394
396
|
def _client(self) -> AsyncAzureOpenAI:
|
|
@@ -407,4 +409,8 @@ class AzureOpenAI(BaseOpenAI):
|
|
|
407
409
|
"API key must be provided either via api_key parameter or AZURE_OPENAI_API_KEY environment variable"
|
|
408
410
|
)
|
|
409
411
|
|
|
410
|
-
|
|
412
|
+
default_headers = self.headers or {}
|
|
413
|
+
|
|
414
|
+
return AsyncAzureOpenAI(
|
|
415
|
+
api_version=api_version, azure_endpoint=azure_endpoint, api_key=api_key, default_headers=default_headers
|
|
416
|
+
)
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
+
import httpx
|
|
1
2
|
import logging
|
|
2
|
-
|
|
3
|
+
|
|
4
|
+
from typing import Literal, Any, Union
|
|
3
5
|
|
|
4
6
|
from google.adk.agents import Agent
|
|
5
7
|
from google.adk.agents.base_agent import BaseAgent
|
|
6
8
|
from google.adk.agents.llm_agent import ToolUnion
|
|
7
|
-
from google.adk.agents.remote_a2a_agent import AGENT_CARD_WELL_KNOWN_PATH, RemoteA2aAgent
|
|
9
|
+
from google.adk.agents.remote_a2a_agent import AGENT_CARD_WELL_KNOWN_PATH, DEFAULT_TIMEOUT, RemoteA2aAgent
|
|
8
10
|
from google.adk.models.anthropic_llm import Claude as ClaudeLLM
|
|
9
11
|
from google.adk.models.google_llm import Gemini as GeminiLLM
|
|
10
12
|
from google.adk.models.lite_llm import LiteLlm
|
|
@@ -31,11 +33,14 @@ class SseMcpServerConfig(BaseModel):
|
|
|
31
33
|
class RemoteAgentConfig(BaseModel):
|
|
32
34
|
name: str
|
|
33
35
|
url: str
|
|
36
|
+
headers: dict[str, Any] | None = None
|
|
37
|
+
timeout: float = DEFAULT_TIMEOUT
|
|
34
38
|
description: str = ""
|
|
35
39
|
|
|
36
40
|
|
|
37
41
|
class BaseLLM(BaseModel):
|
|
38
42
|
model: str
|
|
43
|
+
headers: dict[str, str] | None = None
|
|
39
44
|
|
|
40
45
|
|
|
41
46
|
class OpenAI(BaseLLM):
|
|
@@ -76,41 +81,56 @@ class AgentConfig(BaseModel):
|
|
|
76
81
|
)
|
|
77
82
|
description: str
|
|
78
83
|
instruction: str
|
|
79
|
-
http_tools: list[HttpMcpServerConfig] | None = None #
|
|
80
|
-
sse_tools: list[SseMcpServerConfig] | None = None #
|
|
84
|
+
http_tools: list[HttpMcpServerConfig] | None = None # Streamable HTTP MCP tools
|
|
85
|
+
sse_tools: list[SseMcpServerConfig] | None = None # SSE MCP tools
|
|
81
86
|
remote_agents: list[RemoteAgentConfig] | None = None # remote agents
|
|
82
87
|
|
|
83
88
|
def to_agent(self, name: str) -> Agent:
|
|
84
89
|
if name is None or not str(name).strip():
|
|
85
90
|
raise ValueError("Agent name must be a non-empty string.")
|
|
86
|
-
|
|
91
|
+
tools: list[ToolUnion] = []
|
|
87
92
|
if self.http_tools:
|
|
88
93
|
for http_tool in self.http_tools: # add http tools
|
|
89
|
-
|
|
94
|
+
tools.append(MCPToolset(connection_params=http_tool.params, tool_filter=http_tool.tools))
|
|
90
95
|
if self.sse_tools:
|
|
91
96
|
for sse_tool in self.sse_tools: # add stdio tools
|
|
92
|
-
|
|
97
|
+
tools.append(MCPToolset(connection_params=sse_tool.params, tool_filter=sse_tool.tools))
|
|
93
98
|
if self.remote_agents:
|
|
94
99
|
for remote_agent in self.remote_agents: # Add remote agents as tools
|
|
95
|
-
|
|
100
|
+
client = None
|
|
101
|
+
|
|
102
|
+
if remote_agent.headers:
|
|
103
|
+
client = httpx.AsyncClient(
|
|
104
|
+
headers=remote_agent.headers, timeout=httpx.Timeout(timeout=remote_agent.timeout)
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
remote_a2a_agent = RemoteA2aAgent(
|
|
96
108
|
name=remote_agent.name,
|
|
97
109
|
agent_card=f"{remote_agent.url}/{AGENT_CARD_WELL_KNOWN_PATH}",
|
|
98
110
|
description=remote_agent.description,
|
|
111
|
+
httpx_client=client,
|
|
99
112
|
)
|
|
100
|
-
|
|
113
|
+
|
|
114
|
+
tools.append(
|
|
115
|
+
AgentTool(agent=remote_a2a_agent, skip_summarization=True)
|
|
116
|
+
) # Get headers from model config
|
|
117
|
+
|
|
118
|
+
extra_headers = self.model.headers or {}
|
|
101
119
|
|
|
102
120
|
if self.model.type == "openai":
|
|
103
121
|
model = OpenAINative(model=self.model.model, base_url=self.model.base_url, type="openai")
|
|
104
122
|
elif self.model.type == "anthropic":
|
|
105
|
-
model = LiteLlm(
|
|
123
|
+
model = LiteLlm(
|
|
124
|
+
model=f"anthropic/{self.model.model}", base_url=self.model.base_url, extra_headers=extra_headers
|
|
125
|
+
)
|
|
106
126
|
elif self.model.type == "gemini_vertex_ai":
|
|
107
127
|
model = GeminiLLM(model=self.model.model)
|
|
108
128
|
elif self.model.type == "gemini_anthropic":
|
|
109
129
|
model = ClaudeLLM(model=self.model.model)
|
|
110
130
|
elif self.model.type == "ollama":
|
|
111
|
-
model = LiteLlm(model=f"ollama_chat/{self.model.model}")
|
|
131
|
+
model = LiteLlm(model=f"ollama_chat/{self.model.model}", extra_headers=extra_headers)
|
|
112
132
|
elif self.model.type == "azure_openai":
|
|
113
|
-
model = OpenAIAzure(model=self.model.model, type="azure_openai")
|
|
133
|
+
model = OpenAIAzure(model=self.model.model, type="azure_openai", headers=extra_headers)
|
|
114
134
|
elif self.model.type == "gemini":
|
|
115
135
|
model = self.model.model
|
|
116
136
|
else:
|
|
@@ -120,5 +140,5 @@ class AgentConfig(BaseModel):
|
|
|
120
140
|
model=model,
|
|
121
141
|
description=self.description,
|
|
122
142
|
instruction=self.instruction,
|
|
123
|
-
tools=
|
|
143
|
+
tools=tools,
|
|
124
144
|
)
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
from unittest.mock import Mock
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from a2a.types import TaskState, TaskStatusUpdateEvent
|
|
5
|
+
from google.genai import types as genai_types
|
|
6
|
+
|
|
7
|
+
from kagent.adk.converters.event_converter import convert_event_to_a2a_events
|
|
8
|
+
from kagent.core.a2a import get_kagent_metadata_key
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _create_mock_invocation_context():
|
|
12
|
+
"""Create a mock invocation context for testing."""
|
|
13
|
+
context = Mock()
|
|
14
|
+
context.app_name = "test_app"
|
|
15
|
+
context.user_id = "test_user"
|
|
16
|
+
context.session.id = "test_session"
|
|
17
|
+
return context
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _create_mock_event(error_code=None, content=None, invocation_id="test_invocation", author="test_author"):
|
|
21
|
+
"""Create a mock event for testing."""
|
|
22
|
+
event = Mock()
|
|
23
|
+
event.error_code = error_code
|
|
24
|
+
event.content = content
|
|
25
|
+
event.invocation_id = invocation_id
|
|
26
|
+
event.author = author
|
|
27
|
+
event.branch = None
|
|
28
|
+
event.grounding_metadata = None
|
|
29
|
+
event.custom_metadata = None
|
|
30
|
+
event.usage_metadata = None
|
|
31
|
+
event.error_message = None
|
|
32
|
+
return event
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TestEventConverter:
|
|
36
|
+
"""Test cases for event converter functions."""
|
|
37
|
+
|
|
38
|
+
def test_convert_event_to_a2a_events(self):
|
|
39
|
+
"""Test that STOP error codes with empty content don't create any events, while actual error codes create error events."""
|
|
40
|
+
|
|
41
|
+
invocation_context = _create_mock_invocation_context()
|
|
42
|
+
|
|
43
|
+
# Test case 1: Empty content with STOP error code
|
|
44
|
+
event1 = _create_mock_event(
|
|
45
|
+
error_code=genai_types.FinishReason.STOP, content=None, invocation_id="test_invocation_1"
|
|
46
|
+
)
|
|
47
|
+
result1 = convert_event_to_a2a_events(
|
|
48
|
+
event1, invocation_context, task_id="test_task_1", context_id="test_context_1"
|
|
49
|
+
)
|
|
50
|
+
error_events1 = [
|
|
51
|
+
e for e in result1 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
|
|
52
|
+
]
|
|
53
|
+
working_events1 = [
|
|
54
|
+
e for e in result1 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
|
|
55
|
+
]
|
|
56
|
+
assert len(error_events1) == 0, (
|
|
57
|
+
f"Expected no error events for STOP with empty content, got {len(error_events1)}"
|
|
58
|
+
)
|
|
59
|
+
assert len(working_events1) == 0, (
|
|
60
|
+
f"Expected no working events for STOP with empty content (no content to convert), got {len(working_events1)}"
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Test case 2: Empty parts with STOP error code
|
|
64
|
+
content_mock = Mock()
|
|
65
|
+
content_mock.parts = []
|
|
66
|
+
event2 = _create_mock_event(
|
|
67
|
+
error_code=genai_types.FinishReason.STOP, content=content_mock, invocation_id="test_invocation_2"
|
|
68
|
+
)
|
|
69
|
+
result2 = convert_event_to_a2a_events(
|
|
70
|
+
event2, invocation_context, task_id="test_task_2", context_id="test_context_2"
|
|
71
|
+
)
|
|
72
|
+
error_events2 = [
|
|
73
|
+
e for e in result2 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
|
|
74
|
+
]
|
|
75
|
+
working_events2 = [
|
|
76
|
+
e for e in result2 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
|
|
77
|
+
]
|
|
78
|
+
assert len(error_events2) == 0, f"Expected no error events for STOP with empty parts, got {len(error_events2)}"
|
|
79
|
+
assert len(working_events2) == 0, (
|
|
80
|
+
f"Expected no working events for STOP with empty parts (no content to convert), got {len(working_events2)}"
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Test case 3: Missing content with STOP error code
|
|
84
|
+
event3 = _create_mock_event(
|
|
85
|
+
error_code=genai_types.FinishReason.STOP, content=None, invocation_id="test_invocation_3"
|
|
86
|
+
)
|
|
87
|
+
result3 = convert_event_to_a2a_events(
|
|
88
|
+
event3, invocation_context, task_id="test_task_3", context_id="test_context_3"
|
|
89
|
+
)
|
|
90
|
+
error_events3 = [
|
|
91
|
+
e for e in result3 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
|
|
92
|
+
]
|
|
93
|
+
working_events3 = [
|
|
94
|
+
e for e in result3 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.working
|
|
95
|
+
]
|
|
96
|
+
assert len(error_events3) == 0, (
|
|
97
|
+
f"Expected no error events for STOP with missing content, got {len(error_events3)}"
|
|
98
|
+
)
|
|
99
|
+
assert len(working_events3) == 0, (
|
|
100
|
+
f"Expected no working events for STOP with missing content (no content to convert), got {len(working_events3)}"
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Test case 4: Actual error code should create error event
|
|
104
|
+
event4 = _create_mock_event(
|
|
105
|
+
error_code=genai_types.FinishReason.MALFORMED_FUNCTION_CALL, content=None, invocation_id="test_invocation_4"
|
|
106
|
+
)
|
|
107
|
+
result4 = convert_event_to_a2a_events(
|
|
108
|
+
event4, invocation_context, task_id="test_task_4", context_id="test_context_4"
|
|
109
|
+
)
|
|
110
|
+
error_events4 = [
|
|
111
|
+
e for e in result4 if isinstance(e, TaskStatusUpdateEvent) and e.status.state == TaskState.failed
|
|
112
|
+
]
|
|
113
|
+
assert len(error_events4) == 1, f"Expected 1 error event for MALFORMED_FUNCTION_CALL, got {len(error_events4)}"
|
|
114
|
+
|
|
115
|
+
# Check that the error event has the correct error code in metadata
|
|
116
|
+
error_event = error_events4[0]
|
|
117
|
+
error_code_key = get_kagent_metadata_key("error_code")
|
|
118
|
+
assert error_code_key in error_event.metadata
|
|
119
|
+
assert error_event.metadata[error_code_key] == str(genai_types.FinishReason.MALFORMED_FUNCTION_CALL)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|