agent-starter-pack 0.5.3__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/METADATA +1 -1
  2. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/RECORD +59 -33
  3. agents/adk_base/notebooks/adk_app_testing.ipynb +1 -1
  4. agents/adk_gemini_fullstack/README.md +148 -0
  5. agents/adk_gemini_fullstack/app/agent.py +363 -0
  6. src/frontends/streamlit_adk/frontend/style/app_markdown.py → agents/adk_gemini_fullstack/app/config.py +19 -23
  7. agents/adk_gemini_fullstack/notebooks/adk_app_testing.ipynb +353 -0
  8. agents/adk_gemini_fullstack/notebooks/evaluating_adk_agent.ipynb +1528 -0
  9. agents/adk_gemini_fullstack/template/.templateconfig.yaml +37 -0
  10. agents/adk_gemini_fullstack/tests/integration/test_agent.py +58 -0
  11. agents/agentic_rag/notebooks/adk_app_testing.ipynb +1 -1
  12. src/base_template/Makefile +21 -2
  13. src/base_template/README.md +8 -3
  14. src/base_template/pyproject.toml +1 -4
  15. src/cli/commands/create.py +17 -10
  16. src/cli/utils/template.py +13 -10
  17. src/deployment_targets/cloud_run/app/server.py +7 -1
  18. src/deployment_targets/cloud_run/tests/integration/test_server_e2e.py +1 -1
  19. src/deployment_targets/cloud_run/tests/load_test/.results/.placeholder +321 -0
  20. src/frontends/adk_gemini_fullstack/frontend/components.json +21 -0
  21. src/frontends/adk_gemini_fullstack/frontend/eslint.config.js +28 -0
  22. src/frontends/adk_gemini_fullstack/frontend/index.html +12 -0
  23. src/frontends/adk_gemini_fullstack/frontend/package-lock.json +5829 -0
  24. src/frontends/adk_gemini_fullstack/frontend/package.json +46 -0
  25. src/frontends/adk_gemini_fullstack/frontend/public/vite.svg +1 -0
  26. src/frontends/adk_gemini_fullstack/frontend/src/App.tsx +565 -0
  27. src/frontends/adk_gemini_fullstack/frontend/src/components/ActivityTimeline.tsx +244 -0
  28. src/frontends/adk_gemini_fullstack/frontend/src/components/ChatMessagesView.tsx +419 -0
  29. src/frontends/adk_gemini_fullstack/frontend/src/components/InputForm.tsx +60 -0
  30. src/frontends/adk_gemini_fullstack/frontend/src/components/WelcomeScreen.tsx +56 -0
  31. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/badge.tsx +46 -0
  32. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/button.tsx +59 -0
  33. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/card.tsx +92 -0
  34. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/input.tsx +21 -0
  35. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/scroll-area.tsx +56 -0
  36. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/select.tsx +183 -0
  37. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/tabs.tsx +64 -0
  38. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/textarea.tsx +18 -0
  39. src/frontends/adk_gemini_fullstack/frontend/src/global.css +154 -0
  40. src/frontends/adk_gemini_fullstack/frontend/src/main.tsx +13 -0
  41. src/frontends/adk_gemini_fullstack/frontend/src/utils.ts +7 -0
  42. src/frontends/adk_gemini_fullstack/frontend/src/vite-env.d.ts +1 -0
  43. src/frontends/adk_gemini_fullstack/frontend/tsconfig.json +28 -0
  44. src/frontends/adk_gemini_fullstack/frontend/tsconfig.node.json +24 -0
  45. src/frontends/adk_gemini_fullstack/frontend/vite.config.ts +37 -0
  46. src/resources/locks/uv-adk_base-agent_engine.lock +24 -24
  47. src/resources/locks/uv-adk_base-cloud_run.lock +24 -24
  48. src/resources/locks/uv-adk_gemini_fullstack-agent_engine.lock +3217 -0
  49. src/resources/locks/uv-adk_gemini_fullstack-cloud_run.lock +3513 -0
  50. src/resources/locks/uv-agentic_rag-agent_engine.lock +88 -85
  51. src/resources/locks/uv-agentic_rag-cloud_run.lock +124 -119
  52. src/resources/locks/uv-crewai_coding_crew-agent_engine.lock +94 -91
  53. src/resources/locks/uv-crewai_coding_crew-cloud_run.lock +130 -125
  54. src/resources/locks/uv-langgraph_base_react-agent_engine.lock +91 -88
  55. src/resources/locks/uv-langgraph_base_react-cloud_run.lock +130 -125
  56. src/resources/locks/uv-live_api-cloud_run.lock +121 -116
  57. src/frontends/streamlit_adk/frontend/side_bar.py +0 -214
  58. src/frontends/streamlit_adk/frontend/streamlit_app.py +0 -314
  59. src/frontends/streamlit_adk/frontend/utils/chat_utils.py +0 -84
  60. src/frontends/streamlit_adk/frontend/utils/local_chat_history.py +0 -110
  61. src/frontends/streamlit_adk/frontend/utils/message_editing.py +0 -61
  62. src/frontends/streamlit_adk/frontend/utils/multimodal_utils.py +0 -223
  63. src/frontends/streamlit_adk/frontend/utils/stream_handler.py +0 -311
  64. src/frontends/streamlit_adk/frontend/utils/title_summary.py +0 -129
  65. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/WHEEL +0 -0
  66. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/entry_points.txt +0 -0
  67. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,37 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ description: "A production-ready fullstack research agent that uses Gemini to strategize, research, and synthesize comprehensive reports with human-in-the-loop collaboration"
16
+ example_question: "A report on the latest Google I/O event"
17
+ settings:
18
+ requires_data_ingestion: false
19
+ deployment_targets: ["agent_engine", "cloud_run"]
20
+ extra_dependencies: ["google-adk~=1.3.0"]
21
+ tags: ["adk"]
22
+ frontend_type: "adk_gemini_fullstack"
23
+ commands:
24
+ override:
25
+ install: "npm --prefix frontend install"
26
+ extra:
27
+ dev:
28
+ command: 'make dev-backend & make dev-frontend'
29
+ description: "Start the ADK API server and React frontend development server simultaneously"
30
+ dev-backend:
31
+ command:
32
+ agent_engine: 'uv run adk api_server app --allow_origins="*"'
33
+ cloud_run: 'ALLOW_ORIGINS="*" uv run uvicorn app.server:app --host 0.0.0.0 --port 8000 --reload'
34
+ description: "Start the ADK API server"
35
+ dev-frontend:
36
+ command: 'npm --prefix frontend run dev'
37
+ description: "Start the React frontend development server"
@@ -0,0 +1,58 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # mypy: disable-error-code="union-attr"
16
+ from google.adk.agents.run_config import RunConfig, StreamingMode
17
+ from google.adk.runners import Runner
18
+ from google.adk.sessions import InMemorySessionService
19
+ from google.genai import types
20
+
21
+ from app.agent import root_agent
22
+
23
+
24
+ def test_agent_stream() -> None:
25
+ """
26
+ Integration test for the agent stream functionality.
27
+ Tests that the agent returns valid streaming responses.
28
+ """
29
+
30
+ session_service = InMemorySessionService()
31
+
32
+ session = session_service.create_session_sync(user_id="test_user", app_name="test")
33
+ runner = Runner(agent=root_agent, session_service=session_service, app_name="test")
34
+
35
+ message = types.Content(
36
+ role="user", parts=[types.Part.from_text(text="Why is the sky blue?")]
37
+ )
38
+
39
+ events = list(
40
+ runner.run(
41
+ new_message=message,
42
+ user_id="test_user",
43
+ session_id=session.id,
44
+ run_config=RunConfig(streaming_mode=StreamingMode.SSE),
45
+ )
46
+ )
47
+ assert len(events) > 0, "Expected at least one message"
48
+
49
+ has_text_content = False
50
+ for event in events:
51
+ if (
52
+ event.content
53
+ and event.content.parts
54
+ and any(part.text for part in event.content.parts)
55
+ ):
56
+ has_text_content = True
57
+ break
58
+ assert has_text_content, "Expected at least one message with text content"
@@ -261,7 +261,7 @@
261
261
  "source": [
262
262
  "### Local Testing\n",
263
263
  "\n",
264
- "> You can run the application locally via the `make backend` command."
264
+ "> You can run the application locally via the `make local-backend` command."
265
265
  ]
266
266
  },
267
267
  {
@@ -1,11 +1,26 @@
1
1
  install:
2
2
  @command -v uv >/dev/null 2>&1 || { echo "uv is not installed. Installing uv..."; curl -LsSf https://astral.sh/uv/0.6.12/install.sh | sh; source ~/.bashrc; }
3
3
  uv sync --dev{% if cookiecutter.agent_name != 'live_api' and "adk" not in cookiecutter.tags %} --extra streamlit{%- endif %} --extra jupyter --frozen{% if cookiecutter.agent_name == 'live_api' %} && npm --prefix frontend install{%- endif %}
4
+ {%- if cookiecutter.settings.get("commands", {}).get("override", {}).get("install") %} && {{cookiecutter.settings.get("commands", {}).get("override", {}).get("install")}}{%- endif %}
5
+ {%- if cookiecutter.settings.get("commands", {}).get("extra", {}) %}
6
+ {%- for cmd_name, cmd_value in cookiecutter.settings.get("commands", {}).get("extra", {}).items() %}
4
7
 
5
- test:
6
- uv run pytest tests/unit && uv run pytest tests/integration
8
+ {{ cmd_name }}:
9
+ {%- if cmd_value is mapping %}
10
+ {%- if cmd_value.command is mapping and cookiecutter.deployment_target in cmd_value.command %}
11
+ {{ cmd_value.command[cookiecutter.deployment_target] }}
12
+ {%- else %}
13
+ {{ cmd_value.command if cmd_value.command is string else "" }}
14
+ {%- endif %}
15
+ {%- else %}
16
+ {{ cmd_value }}
17
+ {%- endif %}
18
+ {%- endfor %}{%- endif %}
7
19
 
8
20
  playground:
21
+ {%- if cookiecutter.settings.get("commands", {}).get("override", {}).get("playground") %}
22
+ {{cookiecutter.settings.get("commands", {}).get("override", {}).get("playground")}}
23
+ {%- else %}
9
24
  @echo "==============================================================================="
10
25
  @echo "| 🚀 Starting your agent playground... |"
11
26
  @echo "| |"
@@ -27,6 +42,7 @@ playground:
27
42
  {% if cookiecutter.deployment_target == 'agent_engine' %}PYTHONPATH=. {% endif %}uv run streamlit run frontend/streamlit_app.py --browser.serverAddress=localhost --server.enableCORS=false --server.enableXsrfProtection=false
28
43
  {%- endif %}
29
44
  {%- endif %}
45
+ {%- endif %}
30
46
 
31
47
  backend:
32
48
  {%- if cookiecutter.deployment_target == 'cloud_run' %}
@@ -82,6 +98,9 @@ data-ingestion:
82
98
  --pipeline-name="data-ingestion-pipeline")
83
99
  {%- endif %}
84
100
 
101
+ test:
102
+ uv run pytest tests/unit && uv run pytest tests/integration
103
+
85
104
  lint:
86
105
  uv run codespell
87
106
  uv run ruff check . --diff
@@ -46,6 +46,11 @@ make install && make playground
46
46
  | Command | Description |
47
47
  | -------------------- | ------------------------------------------------------------------------------------------- |
48
48
  | `make install` | Install all required dependencies using uv |
49
+ {%- if cookiecutter.settings.get("commands", {}).get("extra", {}) %}
50
+ {%- for cmd_name, cmd_value in cookiecutter.settings.get("commands", {}).get("extra", {}).items() %}
51
+ | `make {{ cmd_name }}` | {% if cmd_value is mapping %}{% if cmd_value.description %}{{ cmd_value.description }}{% else %}{% if cookiecutter.deployment_target in cmd_value %}{{ cmd_value[cookiecutter.deployment_target] }}{% else %}{{ cmd_value.command if cmd_value.command is string else "" }}{% endif %}{% endif %}{% else %}{{ cmd_value }}{% endif %} |
52
+ {%- endfor %}
53
+ {%- endif %}
49
54
  {%- if cookiecutter.deployment_target == 'cloud_run' %}
50
55
  | `make playground` | Launch local development environment with backend and frontend{%- if "adk" in cookiecutter.tags %} - leveraging `adk web` command. {%- endif %}|
51
56
  | `make backend` | Deploy agent to Cloud Run |
@@ -84,7 +89,7 @@ Here’s the recommended workflow for local development:
84
89
  2. **Start the Backend Server:**
85
90
  Open a terminal and run:
86
91
  ```bash
87
- make backend
92
+ make local-backend
88
93
  ```
89
94
  The backend is ready when you see `INFO: Application startup complete.` Wait for this message before starting the frontend.
90
95
 
@@ -96,7 +101,7 @@ Here’s the recommended workflow for local development:
96
101
  ```bash
97
102
  export VERTEXAI=false
98
103
  export GOOGLE_API_KEY="your-google-api-key" # Replace with your actual key
99
- make backend
104
+ make local-backend
100
105
  ```
101
106
  Ensure `GOOGLE_API_KEY` is set correctly in your environment.
102
107
  </details>
@@ -130,7 +135,7 @@ To run the agent using Google Cloud Shell:
130
135
  2. **Start the Backend:**
131
136
  Open a *new* Cloud Shell tab. Set your project: `gcloud config set project [PROJECT_ID]`. Then run:
132
137
  ```bash
133
- make backend
138
+ make local-backend
134
139
  ```
135
140
 
136
141
  3. **Configure Backend Web Preview:**
@@ -99,11 +99,8 @@ exclude = [".venv"]
99
99
 
100
100
  [tool.codespell]
101
101
  ignore-words-list = "rouge"
102
- {% if cookiecutter.agent_name == 'live_api' %}
103
102
  skip = "./locust_env/*,uv.lock,.venv,./frontend,**/*.ipynb"
104
- {% else %}
105
- skip = "./locust_env/*,uv.lock,.venv,**/*.ipynb"
106
- {%- endif %}
103
+
107
104
 
108
105
  [build-system]
109
106
  requires = ["hatchling"]
@@ -191,6 +191,13 @@ def create(
191
191
  if debug:
192
192
  logging.debug(f"Selected agent: {agent}")
193
193
 
194
+ template_path = (
195
+ pathlib.Path(__file__).parent.parent.parent.parent
196
+ / "agents"
197
+ / final_agent
198
+ / "template"
199
+ )
200
+ config = load_template_config(template_path)
194
201
  # Data ingestion and datastore selection
195
202
  if include_data_ingestion or datastore:
196
203
  # If datastore is specified but include_data_ingestion is not, set it to True
@@ -206,13 +213,6 @@ def create(
206
213
  logging.debug(f"Selected datastore type: {datastore}")
207
214
  else:
208
215
  # Check if the agent requires data ingestion
209
- template_path = (
210
- pathlib.Path(__file__).parent.parent.parent.parent
211
- / "agents"
212
- / final_agent
213
- / "template"
214
- )
215
- config = load_template_config(template_path)
216
216
  if config and config.get("settings", {}).get("requires_data_ingestion"):
217
217
  include_data_ingestion = True
218
218
  datastore = prompt_datastore_selection(final_agent)
@@ -322,9 +322,16 @@ def create(
322
322
  )
323
323
  # Determine the correct path to display based on whether output_dir was specified
324
324
  console.print("\n🚀 To get started, run the following command:")
325
- console.print(
326
- f" [bold bright_green]cd {cd_path} && make install && make playground[/]"
327
- )
325
+
326
+ # Check if the agent has a 'dev' command in its settings
327
+ if config["settings"].get("commands", {}).get("extra", {}).get("dev"):
328
+ console.print(
329
+ f" [bold bright_green]cd {cd_path} && make install && make dev[/]"
330
+ )
331
+ else:
332
+ console.print(
333
+ f" [bold bright_green]cd {cd_path} && make install && make playground[/]"
334
+ )
328
335
  except Exception:
329
336
  if debug:
330
337
  logging.exception(
src/cli/utils/template.py CHANGED
@@ -80,7 +80,12 @@ def get_available_agents(deployment_target: str | None = None) -> dict:
80
80
  deployment_target: Optional deployment target to filter agents
81
81
  """
82
82
  # Define priority agents that should appear first
83
- PRIORITY_AGENTS = ["adk_base", "agentic_rag", "langgraph_base_react"]
83
+ PRIORITY_AGENTS = [
84
+ "adk_base",
85
+ "adk_gemini_fullstack",
86
+ "agentic_rag",
87
+ "langgraph_base_react",
88
+ ]
84
89
 
85
90
  agents_list = []
86
91
  priority_agents_dict = dict.fromkeys(PRIORITY_AGENTS) # Track priority agents
@@ -499,15 +504,12 @@ def process_template(
499
504
  copy_data_ingestion_files(project_template, datastore)
500
505
 
501
506
  # Create cookiecutter.json in the template root
502
- # Process extra dependencies
503
- extra_deps = template_config.get("settings", {}).get(
504
- "extra_dependencies", []
505
- )
506
- # Get frontend type from template config
507
- frontend_type = template_config.get("settings", {}).get(
508
- "frontend_type", DEFAULT_FRONTEND
509
- )
510
- tags = template_config.get("settings", {}).get("tags", ["None"])
507
+ # Get settings from template config
508
+ settings = template_config.get("settings", {})
509
+ extra_deps = settings.get("extra_dependencies", [])
510
+ frontend_type = settings.get("frontend_type", DEFAULT_FRONTEND)
511
+ tags = settings.get("tags", ["None"])
512
+
511
513
  cookiecutter_config = {
512
514
  "project_name": "my-project",
513
515
  "agent_name": agent_name,
@@ -516,6 +518,7 @@ def process_template(
516
518
  "example_question": template_config.get("example_question", "").ljust(
517
519
  61
518
520
  ),
521
+ "settings": settings,
519
522
  "tags": tags,
520
523
  "deployment_target": deployment_target or "",
521
524
  "frontend_type": frontend_type,
@@ -28,6 +28,9 @@ from app.utils.typing import Feedback
28
28
  _, project_id = google.auth.default()
29
29
  logging_client = google_cloud_logging.Client()
30
30
  logger = logging_client.logger(__name__)
31
+ allow_origins = (
32
+ os.getenv("ALLOW_ORIGINS", "").split(",") if os.getenv("ALLOW_ORIGINS") else None
33
+ )
31
34
 
32
35
  bucket_name = f"gs://{project_id}-{{cookiecutter.project_name}}-logs-data"
33
36
  create_bucket_if_not_exists(
@@ -41,7 +44,10 @@ trace.set_tracer_provider(provider)
41
44
 
42
45
  AGENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
43
46
  app: FastAPI = get_fast_api_app(
44
- agents_dir=AGENT_DIR, web=True, artifact_service_uri=bucket_name
47
+ agents_dir=AGENT_DIR,
48
+ web=True,
49
+ artifact_service_uri=bucket_name,
50
+ allow_origins=allow_origins,
45
51
  )
46
52
  app.title = "{{cookiecutter.project_name}}"
47
53
  app.description = "API for interacting with the Agent {{cookiecutter.project_name}}"
@@ -157,7 +157,7 @@ def test_chat_stream(server_fixture: subprocess.Popen[str]) -> None:
157
157
  }
158
158
  {% endif %}
159
159
  response = requests.post(
160
- STREAM_URL, headers=HEADERS, json=data, stream=True, timeout=10
160
+ STREAM_URL, headers=HEADERS, json=data, stream=True, timeout=60
161
161
  )
162
162
  assert response.status_code == 200
163
163
 
@@ -0,0 +1,321 @@
1
+ import datetime
2
+ import logging
3
+ import os
4
+ import re
5
+ from collections.abc import AsyncGenerator
6
+ from dataclasses import dataclass
7
+ from typing import Literal
8
+
9
+ import google.auth
10
+ from google.adk.agents import LlmAgent, LoopAgent, SequentialAgent
11
+ from google.adk.agents.callback_context import CallbackContext
12
+ from google.adk.planners import BuiltInPlanner
13
+ from google.adk.tools import google_search
14
+ from google.genai import types as genai_types
15
+ from pydantic import BaseModel, Field
16
+
17
+ from google.adk.tools.agent_tool import AgentTool
18
+ from google.adk.agents import BaseAgent
19
+ from google.adk.agents.invocation_context import InvocationContext
20
+ from google.adk.events import Event, EventActions
21
+
22
+
23
+ _, project_id = google.auth.default()
24
+ os.environ.setdefault("GOOGLE_CLOUD_PROJECT", project_id)
25
+ os.environ.setdefault("GOOGLE_CLOUD_LOCATION", "global")
26
+ os.environ.setdefault("GOOGLE_GENAI_USE_VERTEXAI", "True")
27
+
28
+
29
+ # --- Centralized Configuration ---
30
+ @dataclass
31
+ class ResearchConfiguration:
32
+ critic_model: str = "gemini-2.5-pro"
33
+ worker_model: str = "gemini-2.5-flash"
34
+ max_search_iterations: int = 5
35
+
36
+
37
+ config = ResearchConfiguration()
38
+
39
+
40
+ # --- Structured Output Models ---
41
+ class SearchQuery(BaseModel):
42
+ search_query: str = Field(
43
+ description="A highly specific and targeted query for web search."
44
+ )
45
+
46
+
47
+ class Feedback(BaseModel):
48
+ grade: Literal["pass", "fail"] = Field(
49
+ description="Evaluation result. 'pass' if the research is sufficient, 'fail' if it needs revision."
50
+ )
51
+ comment: str = Field(
52
+ description="Detailed explanation of the evaluation, highlighting strengths and/or weaknesses of the research."
53
+ )
54
+ follow_up_queries: list[SearchQuery] | None = Field(
55
+ default=None,
56
+ description="A list of specific, targeted follow-up search queries needed to fix research gaps. This should be null or empty if the grade is 'pass'.",
57
+ )
58
+
59
+ # --- Callbacks ---
60
+ def collect_research_sources_callback(callback_context: CallbackContext) -> None:
61
+ session = callback_context._invocation_context.session
62
+ url_to_short_id = callback_context.state.get("url_to_short_id", {})
63
+ sources = callback_context.state.get("sources", {})
64
+ id_counter = len(url_to_short_id) + 1
65
+ for event in session.events:
66
+ if not (event.grounding_metadata and event.grounding_metadata.grounding_chunks):
67
+ continue
68
+ chunks_info = {}
69
+ for idx, chunk in enumerate(event.grounding_metadata.grounding_chunks):
70
+ if not chunk.web:
71
+ continue
72
+ url = chunk.web.uri
73
+ title = (
74
+ chunk.web.title
75
+ if chunk.web.title != chunk.web.domain
76
+ else chunk.web.domain
77
+ )
78
+ if url not in url_to_short_id:
79
+ short_id = f"src-{id_counter}"
80
+ url_to_short_id[url] = short_id
81
+ sources[short_id] = {
82
+ "short_id": short_id,
83
+ "title": title,
84
+ "url": url,
85
+ "domain": chunk.web.domain,
86
+ "supported_claims": [],
87
+ }
88
+ id_counter += 1
89
+ chunks_info[idx] = url_to_short_id[url]
90
+ if event.grounding_metadata.grounding_supports:
91
+ for support in event.grounding_metadata.grounding_supports:
92
+ confidence_scores = support.confidence_scores or []
93
+ chunk_indices = support.grounding_chunk_indices or []
94
+ for i, chunk_idx in enumerate(chunk_indices):
95
+ if chunk_idx in chunks_info:
96
+ short_id = chunks_info[chunk_idx]
97
+ confidence = (
98
+ confidence_scores[i] if i < len(confidence_scores) else 0.5
99
+ )
100
+ text_segment = support.segment.text if support.segment else ""
101
+ sources[short_id]["supported_claims"].append(
102
+ {
103
+ "text_segment": text_segment,
104
+ "confidence": confidence,
105
+ }
106
+ )
107
+ callback_context.state["url_to_short_id"] = url_to_short_id
108
+ callback_context.state["sources"] = sources
109
+
110
+
111
+ def citation_replacement_callback(callback_context: CallbackContext) -> genai_types.Content:
112
+ final_report = callback_context.state.get("final_cited_report", "")
113
+ sources = callback_context.state.get("sources", {})
114
+ def tag_replacer(match: re.Match) -> str:
115
+ short_id = match.group(1)
116
+ if not (source_info := sources.get(short_id)):
117
+ logging.warning(f"Invalid citation tag found and removed: {match.group(0)}")
118
+ return ""
119
+ display_text = source_info.get("title", source_info.get("domain", short_id))
120
+ return f" [{display_text}]({source_info['url']})"
121
+ processed_report = re.sub(
122
+ r'<cite\s+source\s*=\s*["\']?\s*(src-\d+)\s*["\']?\s*/>', tag_replacer, final_report
123
+ )
124
+ processed_report = re.sub(r'\s+([.,;:])', r'\1', processed_report)
125
+ callback_context.state["final_report_with_citations"] = processed_report
126
+ return genai_types.Content(parts=[genai_types.Part(text=processed_report)])
127
+
128
+
129
+ # --- Custom Agent for Loop Control ---
130
+ class EscalationChecker(BaseAgent):
131
+ """Checks research evaluation and escalates to stop the loop if grade is 'pass'."""
132
+ def __init__(self, name: str):
133
+ super().__init__(name=name)
134
+ async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
135
+ evaluation_result = ctx.session.state.get("research_evaluation")
136
+ if evaluation_result and evaluation_result.get("grade") == "pass":
137
+ logging.info(f"[{self.name}] Research evaluation passed. Escalating to stop loop.")
138
+ yield Event(author=self.name, actions=EventActions(escalate=True))
139
+ else:
140
+ logging.info(f"[{self.name}] Research evaluation failed or not found. Loop will continue.")
141
+ # Yielding an event without content or actions just lets the flow continue.
142
+ yield Event(author=self.name)
143
+
144
+
145
+ # --- AGENT DEFINITIONS ---
146
+
147
+ plan_generator = LlmAgent(
148
+ model=config.worker_model,
149
+ name="plan_generator",
150
+ description="Generates a 4-5 line action-oriented research plan, using minimal search only for topic clarification.",
151
+ instruction=f"""
152
+ You are a research strategist. Your job is to create a high-level RESEARCH PLAN, not a summary.
153
+ **RULE: Your output MUST be a bulleted list of 4-5 action-oriented research goals or key questions.**
154
+ - A good goal starts with a verb like "Analyze," "Identify," "Investigate."
155
+ - A bad output is a statement of fact like "The event was in April 2024."
156
+ **TOOL USE IS STRICTLY LIMITED:**
157
+ Your goal is to create a generic, high-quality plan *without searching*.
158
+ Only use `google_search` if a topic is ambiguous or time-sensitive and you absolutely cannot create a plan without a key piece of identifying information.
159
+ You are explicitly forbidden from researching the *content* or *themes* of the topic. That is the next agent's job. Your search is only to identify the subject, not to investigate it.
160
+ Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}
161
+ """,
162
+ tools=[google_search],
163
+ )
164
+
165
+
166
+ section_planner = LlmAgent(
167
+ model=config.worker_model,
168
+ name="section_planner",
169
+ description="Breaks down the research plan into a structured markdown outline of report sections.",
170
+ instruction="""
171
+ You are an expert report architect. Using the research topic and the plan from the 'research_plan' state key, design a logical structure for the final report.
172
+ Your task is to create a markdown outline with 4-6 distinct sections that cover the topic comprehensively without overlap.
173
+ You can use any markdown format you prefer, but here's a suggested structure:
174
+ # Section Name
175
+ A brief overview of what this section covers
176
+ Feel free to add subsections or bullet points if needed to better organize the content.
177
+ Make sure your outline is clear and easy to follow.
178
+ Do not include a "References" or "Sources" section in your outline. Citations will be handled in-line.
179
+ """,
180
+ output_key="report_sections",
181
+ )
182
+
183
+
184
+ section_researcher = LlmAgent(
185
+ model=config.worker_model,
186
+ name="section_researcher",
187
+ description="Performs the crucial first pass of web research.",
188
+ planner=BuiltInPlanner(
189
+ thinking_config=genai_types.ThinkingConfig(include_thoughts=True)
190
+ ),
191
+ instruction="""
192
+ You are a diligent and exhaustive researcher. Your task is to perform the initial, broad information gathering for a report.
193
+ You will be provided with a list of sections in the 'report_sections' state key.
194
+ For each section where 'research' is marked as 'true', generate a comprehensive list of 4-5 targeted search queries to cover the topic from multiple angles.
195
+ Execute all of these queries using the 'google_search' tool and synthesize the results into a detailed summary for that section.
196
+ """,
197
+ tools=[google_search],
198
+ output_key="section_research_findings",
199
+ after_agent_callback=collect_research_sources_callback,
200
+ )
201
+
202
+ research_evaluator = LlmAgent(
203
+ model=config.critic_model,
204
+ name="research_evaluator",
205
+ description="Critically evaluates research and generates follow-up queries.",
206
+ instruction=f"""
207
+ You are a meticulous quality assurance analyst evaluating the research findings in 'section_research_findings'.
208
+
209
+ **CRITICAL RULES:**
210
+ 1. Assume the given research topic is correct. Do not question or try to verify the subject itself.
211
+ 2. Your ONLY job is to assess the quality, depth, and completeness of the research provided *for that topic*.
212
+ 3. Focus on evaluating: Comprehensiveness of coverage, logical flow and organization, use of credible sources, depth of analysis, and clarity of explanations.
213
+ 4. Do NOT fact-check or question the fundamental premise or timeline of the topic.
214
+ 5. If suggesting follow-up queries, they should dive deeper into the existing topic, not question its validity.
215
+
216
+ Be very critical about the QUALITY of research. If you find significant gaps in depth or coverage, assign a grade of "fail",
217
+ write a detailed comment about what's missing, and generate 5-7 specific follow-up queries to fill those gaps.
218
+ If the research thoroughly covers the topic, grade "pass".
219
+
220
+ Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}
221
+ Your response must be a single, raw JSON object validating against the 'Feedback' schema.
222
+ """,
223
+ output_schema=Feedback,
224
+ output_key="research_evaluation",
225
+ )
226
+
227
+ enhanced_search_executor = LlmAgent(
228
+ model=config.worker_model,
229
+ name="enhanced_search_executor",
230
+ description="Executes follow-up searches and integrates new findings.",
231
+ planner=BuiltInPlanner(
232
+ thinking_config=genai_types.ThinkingConfig(include_thoughts=True)
233
+ ),
234
+ instruction="""
235
+ You are a specialist researcher executing a refinement pass.
236
+ You have been activated because the previous research was graded as 'fail'.
237
+
238
+ 1. Review the 'research_evaluation' state key to understand the feedback and required fixes.
239
+ 2. Execute EVERY query listed in 'follow_up_queries' using the 'google_search' tool.
240
+ 3. Synthesize the new findings and COMBINE them with the existing information in 'section_research_findings'.
241
+ 4. Your output MUST be the new, complete, and improved set of research findings.
242
+ """,
243
+ tools=[google_search],
244
+ output_key="section_research_findings",
245
+ after_agent_callback=collect_research_sources_callback,
246
+ )
247
+
248
+ report_composer = LlmAgent(
249
+ model=config.critic_model,
250
+ name="report_composer_with_citations",
251
+ include_contents="none",
252
+ description="Transforms research data and a markdown outline into a final, cited report.",
253
+ instruction="""
254
+ Transform the provided data into a polished, professional, and meticulously cited research report.
255
+
256
+ ---
257
+ ### INPUT DATA
258
+ * Research Plan: `{research_plan}`
259
+ * Research Findings: `{section_research_findings}`
260
+ * Citation Sources: `{sources}`
261
+ * Report Structure: `{report_sections}`
262
+
263
+ ---
264
+ ### CRITICAL: Citation System
265
+ To cite a source, you MUST insert a special citation tag directly after the claim it supports.
266
+
267
+ **The only correct format is:** `<cite source="src-ID_NUMBER" />`
268
+
269
+ ---
270
+ ### Final Instructions
271
+ Generate a comprehensive report using ONLY the `<cite source="src-ID_NUMBER" />` tag system for all citations.
272
+ The final report must strictly follow the structure provided in the **Report Structure** markdown outline.
273
+ Do not include a "References" or "Sources" section; all citations must be in-line.
274
+ """,
275
+ output_key="final_cited_report",
276
+ after_agent_callback=citation_replacement_callback,
277
+ )
278
+
279
+ research_pipeline = SequentialAgent(
280
+ name="research_pipeline",
281
+ description="Executes a pre-approved research plan. It performs iterative research, evaluation, and composes a final, cited report.",
282
+ sub_agents=[
283
+ section_planner,
284
+ section_researcher,
285
+ LoopAgent(
286
+ name="iterative_refinement_loop",
287
+ max_iterations=config.max_search_iterations,
288
+ sub_agents=[
289
+ research_evaluator,
290
+ EscalationChecker(name="escalation_checker"),
291
+ enhanced_search_executor,
292
+ ],
293
+ ),
294
+ report_composer,
295
+ ],
296
+ )
297
+
298
+ interactive_planner_agent = LlmAgent(
299
+ name="interactive_planner_agent",
300
+ model=config.worker_model,
301
+ description="The primary research assistant. It collaborates with the user to create a research plan, and then executes it upon approval.",
302
+ instruction=f"""
303
+ You are a research planning assistant. Your primary function is to convert ANY user request into a research plan.
304
+
305
+ **CRITICAL RULE: Never answer a question directly or refuse a request.** Your one and only first step is to use the `plan_generator` tool to propose a research plan for the user's topic.
306
+ If the user asks a question, you MUST immediately call `plan_generator` to create a plan to answer the question.
307
+
308
+ Your workflow is:
309
+ 1. **Plan:** Use `plan_generator` to create a draft plan and present it to the user.
310
+ 2. **Refine:** Incorporate user feedback until the plan is approved.
311
+ 3. **Execute:** Once the user gives EXPLICIT approval (e.g., "looks good, run it"), you MUST delegate the task to the `research_pipeline` agent, passing the approved plan.
312
+
313
+ Current date: {datetime.datetime.now().strftime("%Y-%m-%d")}
314
+ Do not perform any research yourself. Your job is to Plan, Refine, and Delegate.
315
+ """,
316
+ sub_agents=[research_pipeline],
317
+ tools=[AgentTool(plan_generator)],
318
+ output_key="research_plan",
319
+ )
320
+
321
+ root_agent = interactive_planner_agent