agentic-layer-sdk-adk 0.7.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ .idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,95 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentic-layer-sdk-adk
3
+ Version: 0.7.2
4
+ Requires-Python: >=3.12
5
+ Requires-Dist: google-adk[a2a]
6
+ Requires-Dist: litellm
7
+ Requires-Dist: openinference-instrumentation-google-adk
8
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http
9
+ Requires-Dist: opentelemetry-instrumentation-httpx
10
+ Requires-Dist: opentelemetry-instrumentation-starlette
11
+ Description-Content-Type: text/markdown
12
+
13
+ # Agentic Layer Python SDK for Google ADK
14
+
15
+ SDK for Google ADK that helps to get agents configured in the Agentic Layer quickly.
16
+
17
+ ## Features
18
+
19
+ - Configures OTEL (Tracing, Metrics, Logging)
20
+ - Converts an ADK agent into an instrumented starlette app
21
+ - Configures A2A protocol for inter-agent communication
22
+ - Offers parsing methods for sub agents and tools
23
+ - Set log level via env var `LOGLEVEL` (default: `INFO`)
24
+
25
+ ## Usage
26
+
27
+ Dependencies can be installed via pip or the tool of your choice:
28
+
29
+ ```shell
30
+ pip install agentic-layer-sdk-adk
31
+ ```
32
+
33
+ Basic usage example:
34
+
35
+ ```python
36
+ from agenticlayer.agent_to_a2a import to_a2a
37
+ from agenticlayer.config import parse_sub_agents, parse_tools
38
+ from agenticlayer.otel import setup_otel
39
+ from google.adk.agents import LlmAgent
40
+
41
+ # Set up OpenTelemetry instrumentation, logging and metrics
42
+ setup_otel()
43
+
44
+ # Parse sub agents and tools from JSON configuration
45
+ sub_agent, agent_tools = parse_sub_agents("{}")
46
+ mcp_tools = parse_tools("{}")
47
+ tools = agent_tools + mcp_tools
48
+
49
+ # Declare your ADK root agent
50
+ root_agent = LlmAgent(
51
+ name="root-agent",
52
+ sub_agents=sub_agent,
53
+ tools=tools,
54
+ # [...]
55
+ )
56
+
57
+ # Define the URL where the agent will be available from outside
58
+ # This can not be determined automatically,
59
+ # because the port is only known at runtime,
60
+ # when the starlette app is started with Uvicorn.
61
+ rpc_url = "http://localhost:8000/"
62
+
63
+ # Create starlette app with A2A protocol
64
+ app = to_a2a(root_agent, rpc_url)
65
+ ```
66
+
67
+ ## Configuration
68
+
69
+ The JSON configuration for sub agents should follow this structure:
70
+ ```json5
71
+ {
72
+ "agent_name": {
73
+ "url": "http://agent-url/.well-known/agent-card.json",
74
+ // Optional: interaction type, defaults to "tool_call"
75
+ // "transfer" for full delegation, "tool_call" for tool-like usage
76
+ "interaction_type": "transfer|tool_call"
77
+ }
78
+ }
79
+ ```
80
+
81
+ The JSON configuration for `AGENT_TOOLS` should follow this structure:
82
+ ```json5
83
+ {
84
+ "tool_name": {
85
+ "url": "https://mcp-tool-endpoint:8000/mcp",
86
+ "timeout": 30 // Optional: connect timeout in seconds
87
+ }
88
+ }
89
+ ```
90
+
91
+ ## OpenTelemetry Configuration
92
+
93
+ The SDK automatically configures OpenTelemetry observability when running `setup_otel()`. You can customize the OTLP
94
+ exporters using standard OpenTelemetry environment variables:
95
+ https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/
@@ -0,0 +1,83 @@
1
+ # Agentic Layer Python SDK for Google ADK
2
+
3
+ SDK for Google ADK that helps to get agents configured in the Agentic Layer quickly.
4
+
5
+ ## Features
6
+
7
+ - Configures OTEL (Tracing, Metrics, Logging)
8
+ - Converts an ADK agent into an instrumented starlette app
9
+ - Configures A2A protocol for inter-agent communication
10
+ - Offers parsing methods for sub agents and tools
11
+ - Set log level via env var `LOGLEVEL` (default: `INFO`)
12
+
13
+ ## Usage
14
+
15
+ Dependencies can be installed via pip or the tool of your choice:
16
+
17
+ ```shell
18
+ pip install agentic-layer-sdk-adk
19
+ ```
20
+
21
+ Basic usage example:
22
+
23
+ ```python
24
+ from agenticlayer.agent_to_a2a import to_a2a
25
+ from agenticlayer.config import parse_sub_agents, parse_tools
26
+ from agenticlayer.otel import setup_otel
27
+ from google.adk.agents import LlmAgent
28
+
29
+ # Set up OpenTelemetry instrumentation, logging and metrics
30
+ setup_otel()
31
+
32
+ # Parse sub agents and tools from JSON configuration
33
+ sub_agent, agent_tools = parse_sub_agents("{}")
34
+ mcp_tools = parse_tools("{}")
35
+ tools = agent_tools + mcp_tools
36
+
37
+ # Declare your ADK root agent
38
+ root_agent = LlmAgent(
39
+ name="root-agent",
40
+ sub_agents=sub_agent,
41
+ tools=tools,
42
+ # [...]
43
+ )
44
+
45
+ # Define the URL where the agent will be available from outside
46
+ # This can not be determined automatically,
47
+ # because the port is only known at runtime,
48
+ # when the starlette app is started with Uvicorn.
49
+ rpc_url = "http://localhost:8000/"
50
+
51
+ # Create starlette app with A2A protocol
52
+ app = to_a2a(root_agent, rpc_url)
53
+ ```
54
+
55
+ ## Configuration
56
+
57
+ The JSON configuration for sub agents should follow this structure:
58
+ ```json5
59
+ {
60
+ "agent_name": {
61
+ "url": "http://agent-url/.well-known/agent-card.json",
62
+ // Optional: interaction type, defaults to "tool_call"
63
+ // "transfer" for full delegation, "tool_call" for tool-like usage
64
+ "interaction_type": "transfer|tool_call"
65
+ }
66
+ }
67
+ ```
68
+
69
+ The JSON configuration for `AGENT_TOOLS` should follow this structure:
70
+ ```json5
71
+ {
72
+ "tool_name": {
73
+ "url": "https://mcp-tool-endpoint:8000/mcp",
74
+ "timeout": 30 // Optional: connect timeout in seconds
75
+ }
76
+ }
77
+ ```
78
+
79
+ ## OpenTelemetry Configuration
80
+
81
+ The SDK automatically configures OpenTelemetry observability when running `setup_otel()`. You can customize the OTLP
82
+ exporters using standard OpenTelemetry environment variables:
83
+ https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/
@@ -0,0 +1,4 @@
1
+ """
2
+ SDK for the Agentic Layer.
3
+ Provides some utilities and configurations for integrating a Google ADK-based agent into the Agentic Layer.
4
+ """
@@ -0,0 +1,111 @@
1
+ """
2
+ Convert an ADK agent to an A2A Starlette application.
3
+ This is an adaption of google.adk.a2a.utils.agent_to_a2a.
4
+ """
5
+
6
+ import logging
7
+
8
+ from a2a.server.apps import A2AStarletteApplication
9
+ from a2a.server.request_handlers import DefaultRequestHandler
10
+ from a2a.server.tasks import InMemoryTaskStore
11
+ from a2a.types import AgentCapabilities, AgentCard
12
+ from a2a.utils.constants import AGENT_CARD_WELL_KNOWN_PATH
13
+ from google.adk.a2a.executor.a2a_agent_executor import A2aAgentExecutor
14
+ from google.adk.agents.base_agent import BaseAgent
15
+ from google.adk.apps.app import App
16
+ from google.adk.artifacts.in_memory_artifact_service import InMemoryArtifactService
17
+ from google.adk.auth.credential_service.in_memory_credential_service import InMemoryCredentialService
18
+ from google.adk.memory.in_memory_memory_service import InMemoryMemoryService
19
+ from google.adk.runners import Runner
20
+ from google.adk.sessions.in_memory_session_service import InMemorySessionService
21
+ from opentelemetry.instrumentation.starlette import StarletteInstrumentor
22
+ from starlette.applications import Starlette
23
+
24
+ from .callback_tracer_plugin import CallbackTracerPlugin
25
+
26
+ logger = logging.getLogger("agenticlayer")
27
+
28
+
29
+ class HealthCheckFilter(logging.Filter):
30
+ def filter(self, record: logging.LogRecord) -> bool:
31
+ # Check if the log message contains the well known path of the card, which is used for health checks
32
+ return record.getMessage().find(AGENT_CARD_WELL_KNOWN_PATH) == -1
33
+
34
+
35
+ def to_a2a(agent: BaseAgent, rpc_url: str) -> Starlette:
36
+ """Convert an ADK agent to a A2A Starlette application.
37
+ This is an adaption of google.adk.a2a.utils.agent_to_a2a.
38
+
39
+ Args:
40
+ agent: The ADK agent to convert
41
+ rpc_url: The URL where the agent will be available for A2A communication
42
+
43
+ Returns:
44
+ A Starlette application that can be run with uvicorn
45
+
46
+ Example:
47
+ agent = MyAgent()
48
+ rpc_url = "http://localhost:8000/"
49
+ app = to_a2a(root_agent, rpc_url)
50
+ # Then run with: uvicorn module:app
51
+ """
52
+
53
+ # Filter out health check logs from uvicorn access logger
54
+ uvicorn_access_logger = logging.getLogger("uvicorn.access")
55
+ uvicorn_access_logger.addFilter(HealthCheckFilter())
56
+
57
+ async def create_runner() -> Runner:
58
+ """Create a runner for the agent."""
59
+ return Runner(
60
+ app=App(
61
+ name=agent.name or "adk_agent",
62
+ root_agent=agent,
63
+ plugins=[CallbackTracerPlugin()],
64
+ ),
65
+ artifact_service=InMemoryArtifactService(),
66
+ session_service=InMemorySessionService(), # type: ignore
67
+ memory_service=InMemoryMemoryService(), # type: ignore
68
+ credential_service=InMemoryCredentialService(), # type: ignore
69
+ )
70
+
71
+ # Create A2A components
72
+ task_store = InMemoryTaskStore()
73
+
74
+ agent_executor = A2aAgentExecutor(
75
+ runner=create_runner,
76
+ )
77
+
78
+ request_handler = DefaultRequestHandler(agent_executor=agent_executor, task_store=task_store)
79
+
80
+ # Build agent card
81
+ agent_card = AgentCard(
82
+ name=agent.name,
83
+ description=agent.description,
84
+ url=rpc_url,
85
+ version="0.1.0",
86
+ capabilities=AgentCapabilities(),
87
+ skills=[],
88
+ default_input_modes=["text/plain"],
89
+ default_output_modes=["text/plain"],
90
+ supports_authenticated_extended_card=False,
91
+ )
92
+ logger.info("Built agent card: %s", agent_card.model_dump_json())
93
+
94
+ # Create the A2A Starlette application
95
+ a2a_app = A2AStarletteApplication(
96
+ agent_card=agent_card,
97
+ http_handler=request_handler,
98
+ )
99
+
100
+ # Create a Starlette app that will be configured during startup
101
+ starlette_app = Starlette()
102
+
103
+ # Add A2A routes to the main app
104
+ a2a_app.add_routes_to_app(
105
+ starlette_app,
106
+ )
107
+
108
+ # Instrument the Starlette app with OpenTelemetry
109
+ StarletteInstrumentor().instrument_app(starlette_app)
110
+
111
+ return starlette_app
@@ -0,0 +1,170 @@
1
+ """
2
+ A custom plugin that traces agent, model, and tool callbacks using OpenTelemetry.
3
+ This is an early draft, created in conjunction with the Observability Dashboard:
4
+ https://github.com/agentic-layer/observability-dashboard
5
+ """
6
+
7
+ import re
8
+ from typing import Any, Dict, Optional
9
+
10
+ from google.adk.agents import BaseAgent
11
+ from google.adk.agents.callback_context import CallbackContext
12
+ from google.adk.models.llm_request import LlmRequest
13
+ from google.adk.models.llm_response import LlmResponse
14
+ from google.adk.plugins.base_plugin import BasePlugin
15
+ from google.adk.tools.base_tool import BaseTool
16
+ from google.adk.tools.tool_context import ToolContext
17
+ from google.genai import types
18
+ from opentelemetry import trace
19
+
20
+ # Pattern to match any key containing 'structuredcontent' or 'structured_content', case-insensitive
21
+ STRUCTURED_CONTENT_PATTERN = re.compile(r"\.structured_?content", re.IGNORECASE)
22
+
23
+
24
+ def _span_attribute_item(key: str, data: Any) -> tuple[str, Any]:
25
+ """Convert data to a span attribute-compatible type."""
26
+ if isinstance(data, (str, bool, int, float)): # only these types are supported by span attributes
27
+ return key, data
28
+ else:
29
+ return key, str(data)
30
+
31
+
32
+ def _flatten_dict(
33
+ data: Any, parent_key: str = "", sep: str = ".", parent_key_lower: Optional[str] = None
34
+ ) -> Dict[str, Any]:
35
+ if parent_key_lower is None:
36
+ parent_key_lower = parent_key.lower()
37
+
38
+ if STRUCTURED_CONTENT_PATTERN.search(parent_key_lower):
39
+ return {} # skip structured content as it can add too many attributes
40
+
41
+ items: list[tuple[str, Any]] = []
42
+ if isinstance(data, dict):
43
+ for k, v in data.items():
44
+ new_key = f"{parent_key}{sep}{k}" if parent_key else k
45
+ new_key_lower = new_key.lower()
46
+ items.extend(_flatten_dict(v, new_key, sep=sep, parent_key_lower=new_key_lower).items())
47
+ elif isinstance(data, list):
48
+ for i, v in enumerate(data):
49
+ new_key = f"{parent_key}{sep}{i}"
50
+ new_key_lower = new_key.lower()
51
+ items.extend(_flatten_dict(v, new_key, sep=sep, parent_key_lower=new_key_lower).items())
52
+ elif data is not None:
53
+ items.append(_span_attribute_item(parent_key, data))
54
+ return dict(items)
55
+
56
+
57
+ def _set_span_attributes_from_callback_context(span: Any, callback_context: CallbackContext) -> None:
58
+ conversation_id = (
59
+ callback_context.state.to_dict().get("conversation_id") or callback_context._invocation_context.session.id
60
+ )
61
+ span.set_attribute("agent_name", callback_context.agent_name)
62
+ span.set_attribute("conversation_id", conversation_id)
63
+ span.set_attribute("invocation_id", callback_context.invocation_id)
64
+ span.set_attributes(callback_context.state.to_dict())
65
+
66
+ if callback_context.user_content:
67
+ span.set_attributes(_flatten_dict(callback_context.user_content.model_dump(), parent_key="user_content"))
68
+
69
+
70
+ def _set_span_attributes_for_tool(span: Any, tool: BaseTool, args: Dict[str, Any], tool_context: ToolContext) -> None:
71
+ _set_span_attributes_from_callback_context(span, tool_context)
72
+ span.set_attributes(_flatten_dict(tool_context.actions.model_dump(), parent_key="tool_context.actions"))
73
+ span.set_attribute("tool_name", tool.name)
74
+ span.set_attributes(_flatten_dict(args, parent_key="args"))
75
+
76
+
77
+ class CallbackTracerPlugin(BasePlugin):
78
+ """A custom ADK plugin that traces agent, model, and tool callbacks using OpenTelemetry."""
79
+
80
+ def __init__(self) -> None:
81
+ super().__init__("CallbackTracerPlugin")
82
+
83
+ async def before_agent_callback(
84
+ self, *, agent: BaseAgent, callback_context: CallbackContext
85
+ ) -> Optional[types.Content]:
86
+ with trace.get_tracer(__name__).start_as_current_span("before_agent_callback") as span:
87
+ _set_span_attributes_from_callback_context(span, callback_context)
88
+ return None
89
+
90
+ async def after_agent_callback(
91
+ self, *, agent: BaseAgent, callback_context: CallbackContext
92
+ ) -> Optional[types.Content]:
93
+ with trace.get_tracer(__name__).start_as_current_span("after_agent_callback") as span:
94
+ _set_span_attributes_from_callback_context(span, callback_context)
95
+ return None
96
+
97
+ async def before_model_callback(
98
+ self, *, callback_context: CallbackContext, llm_request: LlmRequest
99
+ ) -> Optional[LlmResponse]:
100
+ with trace.get_tracer(__name__).start_as_current_span("before_model_callback") as span:
101
+ _set_span_attributes_from_callback_context(span, callback_context)
102
+ span.set_attribute("model", llm_request.model or "unknown")
103
+ if llm_request.contents:
104
+ span.set_attributes(
105
+ _flatten_dict(llm_request.contents[-1].model_dump(), parent_key="llm_request.content")
106
+ ) # only send the last content part (last user input)
107
+ return None
108
+
109
+ async def after_model_callback(
110
+ self, *, callback_context: CallbackContext, llm_response: LlmResponse
111
+ ) -> Optional[LlmResponse]:
112
+ with trace.get_tracer(__name__).start_as_current_span("after_model_callback") as span:
113
+ _set_span_attributes_from_callback_context(span, callback_context)
114
+ span.set_attributes(_flatten_dict(llm_response.model_dump(), parent_key="llm_response"))
115
+ return None
116
+
117
+ async def before_tool_callback(
118
+ self,
119
+ *,
120
+ tool: BaseTool,
121
+ tool_args: Dict[str, Any],
122
+ tool_context: ToolContext,
123
+ ) -> Optional[Dict[str, Any]]:
124
+ with trace.get_tracer(__name__).start_as_current_span("before_tool_callback") as span:
125
+ _set_span_attributes_for_tool(span, tool, tool_args, tool_context)
126
+ return None
127
+
128
+ async def after_tool_callback(
129
+ self,
130
+ *,
131
+ tool: BaseTool,
132
+ tool_args: Dict[str, Any],
133
+ tool_context: ToolContext,
134
+ result: Dict[str, Any],
135
+ ) -> Optional[Dict[str, Any]]:
136
+ with trace.get_tracer(__name__).start_as_current_span("after_tool_callback") as span:
137
+ _set_span_attributes_for_tool(span, tool, tool_args, tool_context)
138
+ if isinstance(result, (dict, list)):
139
+ span.set_attributes(_flatten_dict(result, parent_key="tool_response"))
140
+ return None
141
+
142
+ async def on_model_error_callback(
143
+ self,
144
+ *,
145
+ callback_context: CallbackContext,
146
+ llm_request: LlmRequest,
147
+ error: Exception,
148
+ ) -> Optional[LlmResponse]:
149
+ with trace.get_tracer(__name__).start_as_current_span("on_model_error_callback") as span:
150
+ _set_span_attributes_from_callback_context(span, callback_context)
151
+ span.set_attribute("model", llm_request.model or "unknown")
152
+ if llm_request.contents:
153
+ span.set_attributes(
154
+ _flatten_dict(llm_request.contents[-1].model_dump(), parent_key="llm_request.content")
155
+ ) # only send the last content part (last user input)
156
+ span.set_attribute("error", str(error))
157
+ return None
158
+
159
+ async def on_tool_error_callback(
160
+ self,
161
+ *,
162
+ tool: BaseTool,
163
+ tool_args: Dict[str, Any],
164
+ tool_context: ToolContext,
165
+ error: Exception,
166
+ ) -> Optional[Dict[str, Any]]:
167
+ with trace.get_tracer(__name__).start_as_current_span("on_tool_error_callback") as span:
168
+ _set_span_attributes_for_tool(span, tool, tool_args, tool_context)
169
+ span.set_attribute("error", str(error))
170
+ return None
@@ -0,0 +1,78 @@
1
+ """
2
+ Configuration parsing for sub-agents and tools.
3
+ Parses JSON configurations to create RemoteA2aAgents, AgentTools and McpToolsets.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+
9
+ from google.adk.agents import BaseAgent
10
+ from google.adk.agents.llm_agent import ToolUnion
11
+ from google.adk.agents.remote_a2a_agent import RemoteA2aAgent
12
+ from google.adk.tools.agent_tool import AgentTool
13
+ from google.adk.tools.mcp_tool import StreamableHTTPConnectionParams
14
+ from google.adk.tools.mcp_tool.mcp_toolset import McpToolset
15
+
16
+
17
+ def parse_sub_agents(sub_agents_config: str) -> tuple[list[BaseAgent], list[ToolUnion]]:
18
+ """
19
+ Get sub agents from JSON string.
20
+ Format: {"agent_name": {"url": "http://agent_url", "interaction_type", "transfer|tool_call"}, ...}
21
+
22
+ :return: A tuple of:
23
+ - list of sub agents for transfer interaction type
24
+ - list of agent tools for tool_call interaction type
25
+ """
26
+
27
+ try:
28
+ agents_map = json.loads(sub_agents_config)
29
+ except json.JSONDecodeError as e:
30
+ raise ValueError("Warning: Invalid JSON in SUB_AGENTS environment variable: " + sub_agents_config, e)
31
+
32
+ sub_agents: list[BaseAgent] = []
33
+ tools: list[ToolUnion] = []
34
+ for agent_name, config in agents_map.items():
35
+ if "url" not in config:
36
+ raise ValueError(f"Missing 'url' for agent '{agent_name}': " + str(config))
37
+
38
+ interaction_type = config.get("interaction_type", "tool_call")
39
+
40
+ logging.info("Adding sub-agent: %s (%s) with URL: %s", agent_name, interaction_type, config["url"])
41
+ agent = RemoteA2aAgent(name=agent_name, agent_card=config["url"])
42
+ if interaction_type == "tool_call":
43
+ tools.append(AgentTool(agent=agent))
44
+ else:
45
+ sub_agents.append(agent)
46
+
47
+ return sub_agents, tools
48
+
49
+
50
+ def parse_tools(tools_config: str) -> list[ToolUnion]:
51
+ """
52
+ Get tools from JSON string.
53
+ Format: {"tool_name": {"url": "http://tool_url", "timeout": 30}, ...}
54
+
55
+ :return: A list of McpToolset tools
56
+ """
57
+
58
+ try:
59
+ tools_map = json.loads(tools_config)
60
+ except json.JSONDecodeError as e:
61
+ raise ValueError("Warning: Invalid JSON in AGENT_TOOLS environment variable: " + tools_config, e)
62
+
63
+ tools: list[ToolUnion] = []
64
+ for name, config in tools_map.items():
65
+ if "url" not in config:
66
+ raise ValueError(f"Missing 'url' for tool '{name}': " + str(config))
67
+
68
+ logging.info("Adding tool: %s with URL: %s", name, config["url"])
69
+ tools.append(
70
+ McpToolset(
71
+ connection_params=StreamableHTTPConnectionParams(
72
+ url=config["url"],
73
+ timeout=config.get("timeout", 30),
74
+ ),
75
+ )
76
+ )
77
+
78
+ return tools
@@ -0,0 +1,51 @@
1
+ """OpenTelemetry setup for a Google ADK Agent App."""
2
+
3
+ import logging
4
+
5
+ from openinference.instrumentation.google_adk import GoogleADKInstrumentor
6
+ from opentelemetry import metrics, trace
7
+ from opentelemetry._logs import set_logger_provider
8
+ from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
9
+ from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
10
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
11
+ from opentelemetry.instrumentation.httpx import HTTPXClientInstrumentor
12
+ from opentelemetry.sdk import trace as trace_sdk
13
+ from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
14
+ from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
15
+ from opentelemetry.sdk.metrics import MeterProvider
16
+ from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
17
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
18
+
19
+
20
+ def setup_otel() -> None:
21
+ """Set up OpenTelemetry tracing, logging and metrics."""
22
+
23
+ # Set log level for urllib to WARNING to reduce noise (like sending logs to OTLP)
24
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
25
+
26
+ # Traces
27
+ _tracer_provider = trace_sdk.TracerProvider()
28
+ _tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter()))
29
+ # Sets the global default tracer provider
30
+ trace.set_tracer_provider(_tracer_provider)
31
+
32
+ # Instrument Google ADK using openinference instrumentation
33
+ GoogleADKInstrumentor().instrument()
34
+ # Instrument HTTPX clients (this also transfers the trace context automatically)
35
+ HTTPXClientInstrumentor().instrument()
36
+
37
+ # Logs
38
+ logger_provider = LoggerProvider()
39
+ logger_provider.add_log_record_processor(BatchLogRecordProcessor(OTLPLogExporter()))
40
+ # Sets the global default logger provider
41
+ set_logger_provider(logger_provider)
42
+
43
+ # Attach OTLP handler to root logger
44
+ logging.getLogger().addHandler(LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider))
45
+
46
+ # Sets the global default meter provider
47
+ metrics.set_meter_provider(
48
+ MeterProvider(
49
+ metric_readers=[PeriodicExportingMetricReader(OTLPMetricExporter())],
50
+ )
51
+ )
File without changes
@@ -0,0 +1,20 @@
1
+ [project]
2
+ name = "agentic-layer-sdk-adk"
3
+ version = "0.7.2"
4
+ readme = "README.md"
5
+ requires-python = ">=3.12"
6
+ dependencies = [
7
+ "google-adk[a2a]",
8
+ "litellm",
9
+ "opentelemetry-exporter-otlp-proto-http",
10
+ "opentelemetry-instrumentation-starlette",
11
+ "openinference-instrumentation-google-adk",
12
+ "opentelemetry-instrumentation-httpx",
13
+ ]
14
+
15
+ [build-system]
16
+ requires = ["hatchling"]
17
+ build-backend = "hatchling.build"
18
+
19
+ [tool.hatch.build.targets.wheel]
20
+ packages = ["agenticlayer"]
@@ -0,0 +1,159 @@
1
+ import uuid
2
+ from typing import Any
3
+
4
+ import pytest
5
+ from agenticlayer.agent_to_a2a import to_a2a
6
+ from agenticlayer.config import parse_sub_agents, parse_tools
7
+ from google.adk.agents.llm_agent import LlmAgent
8
+ from google.adk.models.lite_llm import LiteLlm
9
+ from starlette.testclient import TestClient
10
+
11
+
12
+ def create_mock_agent_card(
13
+ agent_name: str,
14
+ base_url: str,
15
+ skills: list[dict[str, Any]] | None = None,
16
+ ) -> dict[str, Any]:
17
+ """Helper function to create a valid agent card response."""
18
+ return {
19
+ "name": agent_name,
20
+ "description": f"Mock agent {agent_name}",
21
+ "url": base_url,
22
+ "version": "1.0.0",
23
+ "capabilities": {},
24
+ "skills": skills or [],
25
+ "default_input_modes": ["text/plain"],
26
+ "default_output_modes": ["text/plain"],
27
+ "supports_authenticated_extended_card": False,
28
+ }
29
+
30
+
31
+ def create_send_message_request(
32
+ message_text: str = "Hello, agent!",
33
+ ) -> dict[str, Any]:
34
+ """Helper function to create a valid A2A send message request."""
35
+ message_id = str(uuid.uuid4())
36
+ context_id = str(uuid.uuid4())
37
+ return {
38
+ "jsonrpc": "2.0",
39
+ "id": 1,
40
+ "method": "message/send",
41
+ "params": {
42
+ "message": {
43
+ "role": "user",
44
+ "parts": [{"kind": "text", "text": message_text}],
45
+ "messageId": message_id,
46
+ "contextId": context_id,
47
+ },
48
+ "metadata": {},
49
+ },
50
+ }
51
+
52
+
53
+ def create_agent(
54
+ name: str = "test_agent",
55
+ sub_agents_config: str = "{}",
56
+ tools_config: str = "{}",
57
+ ) -> LlmAgent:
58
+ sub_agents, agent_tools = parse_sub_agents(sub_agents_config)
59
+ mcp_tools = parse_tools(tools_config)
60
+ tools = [*agent_tools, *mcp_tools]
61
+ return LlmAgent(
62
+ name=name,
63
+ model=LiteLlm(model="gemini/gemini-2.5-flash"),
64
+ description="Test agent",
65
+ instruction="You are a test agent.",
66
+ sub_agents=sub_agents,
67
+ tools=tools,
68
+ )
69
+
70
+
71
+ rpc_url = "http://localhost:80/"
72
+
73
+
74
+ class TestA2AStarlette:
75
+ @pytest.mark.asyncio
76
+ async def test_agent_card(self) -> None:
77
+ """Test that the agent card is available at /.well-known/agent-card.json"""
78
+
79
+ # Given:
80
+ agent = create_agent()
81
+ app = to_a2a(agent, rpc_url)
82
+ client = TestClient(app)
83
+
84
+ # When: Requesting the agent card endpoint
85
+ response = client.get("/.well-known/agent-card.json")
86
+
87
+ # Then: Agent card is returned
88
+ assert response.status_code == 200
89
+ data = response.json()
90
+ assert isinstance(data, dict), "Agent card should return a JSON object"
91
+ assert data.get("name") == agent.name
92
+ assert data.get("description") == agent.description
93
+
94
+ @pytest.mark.asyncio
95
+ async def test_agent_rpc_send_message(self) -> None:
96
+ """Test that the RPC url is working for send message."""
97
+
98
+ # Given:
99
+ agent = create_agent()
100
+ app = to_a2a(agent, rpc_url)
101
+ client = TestClient(app)
102
+
103
+ # When: Sending an A2A RPC request
104
+ rpc_response = client.post("", json=create_send_message_request())
105
+
106
+ # Then: RPC response is returned
107
+ assert rpc_response.status_code == 200
108
+ rpc_data = rpc_response.json()
109
+ assert rpc_data.get("jsonrpc") == "2.0"
110
+ assert rpc_data.get("id") == 1
111
+
112
+ @pytest.mark.asyncio
113
+ async def test_sub_agents(self) -> None:
114
+ """Test that sub-agents are parsed and integrated correctly."""
115
+
116
+ # When: Creating an agent with sub-agents
117
+ sub_agents_config = """{
118
+ "sub_agent_1": {
119
+ "url": "http://sub-agent-1.local/.well-known/agent-card.json",
120
+ "interaction_type": "transfer"
121
+ },
122
+ "sub_agent_2": {
123
+ "url": "http://sub-agent-2.local/.well-known/agent-card.json",
124
+ "interaction_type": "tool_call"
125
+ }
126
+ }"""
127
+ agent = create_agent(sub_agents_config=sub_agents_config)
128
+
129
+ # Then: Verify sub-agents and tools are parsed correctly
130
+ assert len(agent.sub_agents) == 1, "There should be 1 sub-agent for transfer interaction type"
131
+ assert len(agent.tools) == 1, "There should be 1 agent tool for tool_call interaction type"
132
+
133
+ # When: Requesting the agent card endpoint
134
+ app = to_a2a(agent, rpc_url)
135
+ client = TestClient(app)
136
+ response = client.get("/.well-known/agent-card.json")
137
+
138
+ # Then: Agent card is returned
139
+ assert response.status_code == 200
140
+
141
+ @pytest.mark.asyncio
142
+ async def test_tools(self) -> None:
143
+ """Test that tools are parsed and integrated correctly."""
144
+
145
+ # When: Creating an agent with tools
146
+ tools_config = """{
147
+ "tool_1": {
148
+ "url": "http://tool-1.local/mcp"
149
+ },
150
+ "tool_2": {
151
+ "url": "http://tool-2.local/mcp"
152
+ }
153
+ }"""
154
+ tools = parse_tools(tools_config)
155
+
156
+ # Then: Verify McpToolsets are created correctly
157
+ assert len(tools) == 2, "There should be 2 McpToolset tools"
158
+
159
+ # Note: Further integration tests would require mocking MCP tool behavior