kagent-adk 0.6.15__tar.gz → 0.6.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kagent-adk might be problematic. Click here for more details.
- kagent_adk-0.6.17/.python-version +1 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/PKG-INFO +1 -1
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/pyproject.toml +1 -1
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/_agent_executor.py +8 -17
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/converters/part_converter.py +3 -3
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/models/_openai.py +31 -24
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/types.py +28 -8
- kagent_adk-0.6.15/.python-version +0 -1
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/.gitignore +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/README.md +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/_a2a.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/_session_service.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/_token.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/cli.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/converters/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/converters/error_mappings.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/converters/event_converter.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/converters/request_converter.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/src/kagent/adk/models/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/unittests/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/unittests/converters/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/unittests/converters/test_event_converter.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/unittests/models/__init__.py +0 -0
- {kagent_adk-0.6.15 → kagent_adk-0.6.17}/tests/unittests/models/test_openai.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.13.7
|
|
@@ -22,6 +22,7 @@ from a2a.types import (
|
|
|
22
22
|
TextPart,
|
|
23
23
|
)
|
|
24
24
|
from google.adk.runners import Runner
|
|
25
|
+
from google.adk.utils.context_utils import Aclosing
|
|
25
26
|
from opentelemetry import trace
|
|
26
27
|
from pydantic import BaseModel
|
|
27
28
|
from typing_extensions import override
|
|
@@ -145,17 +146,6 @@ class A2aAgentExecutor(AgentExecutor):
|
|
|
145
146
|
)
|
|
146
147
|
except Exception as enqueue_error:
|
|
147
148
|
logger.error("Failed to publish failure event: %s", enqueue_error, exc_info=True)
|
|
148
|
-
finally:
|
|
149
|
-
# Shield cleanup from external cancellation so toolsets (e.g., MCP) can
|
|
150
|
-
# gracefully close their sessions without being torn down mid-flight.
|
|
151
|
-
try:
|
|
152
|
-
await asyncio.wait_for(asyncio.shield(runner.close()), timeout=15.0)
|
|
153
|
-
except asyncio.CancelledError:
|
|
154
|
-
# Suppress cancellation during cleanup to avoid noisy tracebacks
|
|
155
|
-
# from libraries that assume non-cancelled close semantics.
|
|
156
|
-
logger.warning("Runner.close() was cancelled; suppressing during cleanup")
|
|
157
|
-
except Exception as close_error:
|
|
158
|
-
logger.error("Error during runner.close(): %s", close_error, exc_info=True)
|
|
159
149
|
|
|
160
150
|
async def _handle_request(
|
|
161
151
|
self,
|
|
@@ -203,12 +193,13 @@ class A2aAgentExecutor(AgentExecutor):
|
|
|
203
193
|
)
|
|
204
194
|
|
|
205
195
|
task_result_aggregator = TaskResultAggregator()
|
|
206
|
-
async
|
|
207
|
-
for
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
196
|
+
async with Aclosing(runner.run_async(**run_args)) as agen:
|
|
197
|
+
async for adk_event in agen:
|
|
198
|
+
for a2a_event in convert_event_to_a2a_events(
|
|
199
|
+
adk_event, invocation_context, context.task_id, context.context_id
|
|
200
|
+
):
|
|
201
|
+
task_result_aggregator.process_event(a2a_event)
|
|
202
|
+
await event_queue.enqueue_event(a2a_event)
|
|
212
203
|
|
|
213
204
|
# publish the task result event - this is final
|
|
214
205
|
if (
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
15
|
"""
|
|
16
|
-
module containing utilities for conversion
|
|
16
|
+
module containing utilities for conversion between A2A Part and Google GenAI Part
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
from __future__ import annotations
|
|
@@ -68,7 +68,7 @@ def convert_a2a_part_to_genai_part(
|
|
|
68
68
|
return None
|
|
69
69
|
|
|
70
70
|
if isinstance(part, a2a_types.DataPart):
|
|
71
|
-
#
|
|
71
|
+
# Convert the Data Part to funcall and function response.
|
|
72
72
|
# This is mainly for converting human in the loop and auth request and
|
|
73
73
|
# response.
|
|
74
74
|
# TODO once A2A defined how to suervice such information, migrate below
|
|
@@ -148,7 +148,7 @@ def convert_genai_part_to_a2a_part(
|
|
|
148
148
|
|
|
149
149
|
return a2a_types.Part(root=a2a_part)
|
|
150
150
|
|
|
151
|
-
#
|
|
151
|
+
# Convert the funcall and function response to A2A DataPart.
|
|
152
152
|
# This is mainly for converting human in the loop and auth request and
|
|
153
153
|
# response.
|
|
154
154
|
# TODO once A2A defined how to suervice such information, migrate below
|
|
@@ -276,10 +276,18 @@ class BaseOpenAI(BaseLlm):
|
|
|
276
276
|
"""Base class for OpenAI-compatible models."""
|
|
277
277
|
|
|
278
278
|
model: str
|
|
279
|
-
base_url: Optional[str] = None
|
|
280
279
|
api_key: Optional[str] = Field(default=None, exclude=True)
|
|
280
|
+
base_url: Optional[str] = None
|
|
281
|
+
frequency_penalty: Optional[float] = None
|
|
282
|
+
default_headers: Optional[dict[str, str]] = None
|
|
281
283
|
max_tokens: Optional[int] = None
|
|
284
|
+
n: Optional[int] = None
|
|
285
|
+
presence_penalty: Optional[float] = None
|
|
286
|
+
reasoning_effort: Optional[str] = None
|
|
287
|
+
seed: Optional[int] = None
|
|
282
288
|
temperature: Optional[float] = None
|
|
289
|
+
timeout: Optional[int] = None
|
|
290
|
+
top_p: Optional[float] = None
|
|
283
291
|
|
|
284
292
|
@classmethod
|
|
285
293
|
def supported_models(cls) -> list[str]:
|
|
@@ -289,13 +297,13 @@ class BaseOpenAI(BaseLlm):
|
|
|
289
297
|
@cached_property
|
|
290
298
|
def _client(self) -> AsyncOpenAI:
|
|
291
299
|
"""Get the OpenAI client."""
|
|
292
|
-
kwargs = {}
|
|
293
|
-
if self.base_url:
|
|
294
|
-
kwargs["base_url"] = self.base_url
|
|
295
|
-
if self.api_key:
|
|
296
|
-
kwargs["api_key"] = self.api_key
|
|
297
300
|
|
|
298
|
-
return AsyncOpenAI(
|
|
301
|
+
return AsyncOpenAI(
|
|
302
|
+
api_key=self.api_key,
|
|
303
|
+
base_url=self.base_url or None,
|
|
304
|
+
default_headers=self.default_headers,
|
|
305
|
+
timeout=self.timeout,
|
|
306
|
+
)
|
|
299
307
|
|
|
300
308
|
async def generate_content_async(
|
|
301
309
|
self, llm_request: LlmRequest, stream: bool = False
|
|
@@ -325,10 +333,22 @@ class BaseOpenAI(BaseLlm):
|
|
|
325
333
|
"messages": messages,
|
|
326
334
|
}
|
|
327
335
|
|
|
336
|
+
if self.frequency_penalty is not None:
|
|
337
|
+
kwargs["frequency_penalty"] = self.frequency_penalty
|
|
328
338
|
if self.max_tokens:
|
|
329
339
|
kwargs["max_tokens"] = self.max_tokens
|
|
340
|
+
if self.n is not None:
|
|
341
|
+
kwargs["n"] = self.n
|
|
342
|
+
if self.presence_penalty is not None:
|
|
343
|
+
kwargs["presence_penalty"] = self.presence_penalty
|
|
344
|
+
if self.reasoning_effort is not None:
|
|
345
|
+
kwargs["reasoning_effort"] = self.reasoning_effort
|
|
346
|
+
if self.seed is not None:
|
|
347
|
+
kwargs["seed"] = self.seed
|
|
330
348
|
if self.temperature is not None:
|
|
331
349
|
kwargs["temperature"] = self.temperature
|
|
350
|
+
if self.top_p is not None:
|
|
351
|
+
kwargs["top_p"] = self.top_p
|
|
332
352
|
|
|
333
353
|
# Handle tools
|
|
334
354
|
if llm_request.config and llm_request.config.tools:
|
|
@@ -369,19 +389,6 @@ class OpenAI(BaseOpenAI):
|
|
|
369
389
|
|
|
370
390
|
type: Literal["openai"]
|
|
371
391
|
|
|
372
|
-
@cached_property
|
|
373
|
-
def _client(self) -> AsyncOpenAI:
|
|
374
|
-
"""Get the OpenAI client."""
|
|
375
|
-
kwargs = {}
|
|
376
|
-
if self.base_url:
|
|
377
|
-
kwargs["base_url"] = self.base_url
|
|
378
|
-
if self.api_key:
|
|
379
|
-
kwargs["api_key"] = self.api_key
|
|
380
|
-
elif "OPENAI_API_KEY" in os.environ:
|
|
381
|
-
kwargs["api_key"] = os.environ["OPENAI_API_KEY"]
|
|
382
|
-
|
|
383
|
-
return AsyncOpenAI(**kwargs)
|
|
384
|
-
|
|
385
392
|
|
|
386
393
|
class AzureOpenAI(BaseOpenAI):
|
|
387
394
|
"""Azure OpenAI model implementation."""
|
|
@@ -390,7 +397,6 @@ class AzureOpenAI(BaseOpenAI):
|
|
|
390
397
|
api_version: Optional[str] = None
|
|
391
398
|
azure_endpoint: Optional[str] = None
|
|
392
399
|
azure_deployment: Optional[str] = None
|
|
393
|
-
headers: Optional[dict[str, str]] = None
|
|
394
400
|
|
|
395
401
|
@cached_property
|
|
396
402
|
def _client(self) -> AsyncAzureOpenAI:
|
|
@@ -409,8 +415,9 @@ class AzureOpenAI(BaseOpenAI):
|
|
|
409
415
|
"API key must be provided either via api_key parameter or AZURE_OPENAI_API_KEY environment variable"
|
|
410
416
|
)
|
|
411
417
|
|
|
412
|
-
default_headers = self.headers or {}
|
|
413
|
-
|
|
414
418
|
return AsyncAzureOpenAI(
|
|
415
|
-
|
|
419
|
+
api_key=api_key,
|
|
420
|
+
api_version=api_version,
|
|
421
|
+
azure_endpoint=azure_endpoint,
|
|
422
|
+
default_headers=self.default_headers,
|
|
416
423
|
)
|
|
@@ -1,8 +1,7 @@
|
|
|
1
|
-
import httpx
|
|
2
1
|
import logging
|
|
2
|
+
from typing import Any, Literal, Union
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
import httpx
|
|
6
5
|
from google.adk.agents import Agent
|
|
7
6
|
from google.adk.agents.base_agent import BaseAgent
|
|
8
7
|
from google.adk.agents.llm_agent import ToolUnion
|
|
@@ -45,6 +44,15 @@ class BaseLLM(BaseModel):
|
|
|
45
44
|
|
|
46
45
|
class OpenAI(BaseLLM):
|
|
47
46
|
base_url: str | None = None
|
|
47
|
+
frequency_penalty: float | None = None
|
|
48
|
+
max_tokens: int | None = None
|
|
49
|
+
n: int | None = None
|
|
50
|
+
presence_penalty: float | None = None
|
|
51
|
+
reasoning_effort: str | None = None
|
|
52
|
+
seed: int | None = None
|
|
53
|
+
temperature: float | None = None
|
|
54
|
+
timeout: int | None = None
|
|
55
|
+
top_p: float | None = None
|
|
48
56
|
|
|
49
57
|
type: Literal["openai"]
|
|
50
58
|
|
|
@@ -111,14 +119,26 @@ class AgentConfig(BaseModel):
|
|
|
111
119
|
httpx_client=client,
|
|
112
120
|
)
|
|
113
121
|
|
|
114
|
-
tools.append(
|
|
115
|
-
AgentTool(agent=remote_a2a_agent, skip_summarization=True)
|
|
116
|
-
) # Get headers from model config
|
|
122
|
+
tools.append(AgentTool(agent=remote_a2a_agent, skip_summarization=True))
|
|
117
123
|
|
|
118
124
|
extra_headers = self.model.headers or {}
|
|
119
125
|
|
|
120
126
|
if self.model.type == "openai":
|
|
121
|
-
model = OpenAINative(
|
|
127
|
+
model = OpenAINative(
|
|
128
|
+
type="openai",
|
|
129
|
+
base_url=self.model.base_url,
|
|
130
|
+
default_headers=extra_headers,
|
|
131
|
+
frequency_penalty=self.model.frequency_penalty,
|
|
132
|
+
max_tokens=self.model.max_tokens,
|
|
133
|
+
model=self.model.model,
|
|
134
|
+
n=self.model.n,
|
|
135
|
+
presence_penalty=self.model.presence_penalty,
|
|
136
|
+
reasoning_effort=self.model.reasoning_effort,
|
|
137
|
+
seed=self.model.seed,
|
|
138
|
+
temperature=self.model.temperature,
|
|
139
|
+
timeout=self.model.timeout,
|
|
140
|
+
top_p=self.model.top_p,
|
|
141
|
+
)
|
|
122
142
|
elif self.model.type == "anthropic":
|
|
123
143
|
model = LiteLlm(
|
|
124
144
|
model=f"anthropic/{self.model.model}", base_url=self.model.base_url, extra_headers=extra_headers
|
|
@@ -130,7 +150,7 @@ class AgentConfig(BaseModel):
|
|
|
130
150
|
elif self.model.type == "ollama":
|
|
131
151
|
model = LiteLlm(model=f"ollama_chat/{self.model.model}", extra_headers=extra_headers)
|
|
132
152
|
elif self.model.type == "azure_openai":
|
|
133
|
-
model = OpenAIAzure(model=self.model.model, type="azure_openai",
|
|
153
|
+
model = OpenAIAzure(model=self.model.model, type="azure_openai", default_headers=extra_headers)
|
|
134
154
|
elif self.model.type == "gemini":
|
|
135
155
|
model = self.model.model
|
|
136
156
|
else:
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
3.13.5
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|