agent-starter-pack 0.13.1__py3-none-any.whl → 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agent_starter_pack-0.13.1.dist-info → agent_starter_pack-0.14.1.dist-info}/METADATA +13 -4
- {agent_starter_pack-0.13.1.dist-info → agent_starter_pack-0.14.1.dist-info}/RECORD +23 -25
- agents/adk_base/notebooks/evaluating_adk_agent.ipynb +78 -71
- agents/agentic_rag/notebooks/evaluating_adk_agent.ipynb +78 -71
- llm.txt +87 -39
- src/base_template/Makefile +16 -1
- src/base_template/README.md +1 -1
- src/cli/commands/create.py +27 -5
- src/cli/commands/enhance.py +132 -6
- src/cli/commands/setup_cicd.py +91 -69
- src/cli/utils/cicd.py +105 -0
- src/cli/utils/gcp.py +19 -13
- src/cli/utils/logging.py +13 -1
- src/cli/utils/template.py +3 -0
- src/frontends/live_api_react/frontend/package-lock.json +9 -9
- src/frontends/live_api_react/frontend/src/App.tsx +12 -153
- src/frontends/live_api_react/frontend/src/components/side-panel/SidePanel.tsx +352 -3
- src/frontends/live_api_react/frontend/src/components/side-panel/side-panel.scss +249 -2
- src/frontends/live_api_react/frontend/src/utils/multimodal-live-client.ts +4 -1
- src/resources/docs/adk-cheatsheet.md +285 -38
- src/frontends/live_api_react/frontend/src/components/control-tray/ControlTray.tsx +0 -217
- src/frontends/live_api_react/frontend/src/components/control-tray/control-tray.scss +0 -201
- {agent_starter_pack-0.13.1.dist-info → agent_starter_pack-0.14.1.dist-info}/WHEEL +0 -0
- {agent_starter_pack-0.13.1.dist-info → agent_starter_pack-0.14.1.dist-info}/entry_points.txt +0 -0
- {agent_starter_pack-0.13.1.dist-info → agent_starter_pack-0.14.1.dist-info}/licenses/LICENSE +0 -0
@@ -234,6 +234,7 @@
|
|
234
234
|
"outputs": [],
|
235
235
|
"source": [
|
236
236
|
"import json\n",
|
237
|
+
"import asyncio\n",
|
237
238
|
"\n",
|
238
239
|
"# General\n",
|
239
240
|
"import random\n",
|
@@ -285,7 +286,7 @@
|
|
285
286
|
" return \"\".join(random.choices(string.ascii_lowercase + string.digits, k=length))\n",
|
286
287
|
"\n",
|
287
288
|
"\n",
|
288
|
-
"def parse_adk_output_to_dictionary(events: list[Event]
|
289
|
+
"def parse_adk_output_to_dictionary(events: list[Event], *, as_json: bool = False):\n",
|
289
290
|
" \"\"\"\n",
|
290
291
|
" Parse ADK event output into a structured dictionary format,\n",
|
291
292
|
" with the predicted trajectory dumped as a JSON string.\n",
|
@@ -293,63 +294,54 @@
|
|
293
294
|
" \"\"\"\n",
|
294
295
|
"\n",
|
295
296
|
" final_response = \"\"\n",
|
296
|
-
"
|
297
|
+
" trajectory = []\n",
|
297
298
|
"\n",
|
298
299
|
" for event in events:\n",
|
299
|
-
"
|
300
|
-
" if not event.content or not event.content.parts:\n",
|
300
|
+
" if not getattr(event, \"content\", None) or not getattr(event.content, \"parts\", None):\n",
|
301
301
|
" continue\n",
|
302
|
-
"\n",
|
303
|
-
" # Iterate through ALL parts in the event's content\n",
|
304
302
|
" for part in event.content.parts:\n",
|
305
|
-
" if part
|
306
|
-
"
|
303
|
+
" if getattr(part, \"function_call\", None):\n",
|
304
|
+
" info = {\n",
|
307
305
|
" \"tool_name\": part.function_call.name,\n",
|
308
306
|
" \"tool_input\": dict(part.function_call.args),\n",
|
309
307
|
" }\n",
|
310
|
-
"
|
311
|
-
"
|
312
|
-
"
|
313
|
-
"\n",
|
314
|
-
" # The final text response is usually in the last event from the model\n",
|
315
|
-
" if event.content.role == \"model\" and part.text:\n",
|
316
|
-
" # Overwrite response; the last text response found is likely the final one\n",
|
308
|
+
" if info not in trajectory:\n",
|
309
|
+
" trajectory.append(info)\n",
|
310
|
+
" if event.content.role == \"model\" and getattr(part, \"text\", None):\n",
|
317
311
|
" final_response = part.text.strip()\n",
|
318
312
|
"\n",
|
319
|
-
"
|
320
|
-
"
|
321
|
-
"
|
322
|
-
" \
|
323
|
-
" }\n",
|
313
|
+
" if as_json:\n",
|
314
|
+
" trajectory_out = json.dumps(trajectory)\n",
|
315
|
+
" else:\n",
|
316
|
+
" trajectory_out = trajectory\n",
|
324
317
|
"\n",
|
325
|
-
" return
|
318
|
+
" return {\"response\": final_response, \"predicted_trajectory\": trajectory_out}\n",
|
326
319
|
"\n",
|
327
320
|
"\n",
|
328
321
|
"def format_output_as_markdown(output: dict) -> str:\n",
|
329
322
|
" \"\"\"Convert the output dictionary to a formatted markdown string.\"\"\"\n",
|
330
|
-
" markdown = \"### AI Response\\n\"\n",
|
331
|
-
" markdown += f\"{output['response']}\\n\\n\"\n",
|
332
|
-
"\n",
|
323
|
+
" markdown = \"### AI Response\\n\" + output[\"response\"] + \"\\n\\n\"\n",
|
333
324
|
" if output[\"predicted_trajectory\"]:\n",
|
334
|
-
" output[\"predicted_trajectory\"] = json.loads(output[\"predicted_trajectory\"])\n",
|
335
325
|
" markdown += \"### Function Calls\\n\"\n",
|
336
326
|
" for call in output[\"predicted_trajectory\"]:\n",
|
337
327
|
" markdown += f\"- **Function**: `{call['tool_name']}`\\n\"\n",
|
338
|
-
" markdown += \" - **Arguments
|
328
|
+
" markdown += \" - **Arguments**\\n\"\n",
|
339
329
|
" for key, value in call[\"tool_input\"].items():\n",
|
340
330
|
" markdown += f\" - `{key}`: `{value}`\\n\"\n",
|
341
|
-
"\n",
|
342
331
|
" return markdown\n",
|
343
332
|
"\n",
|
344
333
|
"\n",
|
345
334
|
"def display_eval_report(eval_result: pd.DataFrame) -> None:\n",
|
346
335
|
" \"\"\"Display the evaluation results.\"\"\"\n",
|
347
|
-
" metrics_df = pd.DataFrame.from_dict(eval_result.summary_metrics, orient=\"index\").T\n",
|
348
336
|
" display(Markdown(\"### Summary Metrics\"))\n",
|
349
|
-
" display(
|
350
|
-
"\n",
|
351
|
-
"
|
352
|
-
"
|
337
|
+
" display(\n",
|
338
|
+
" pd.DataFrame(\n",
|
339
|
+
" eval_result.summary_metrics.items(), columns=[\"metric\", \"value\"]\n",
|
340
|
+
" )\n",
|
341
|
+
" )\n",
|
342
|
+
" if getattr(eval_result, \"metrics_table\", None) is not None:\n",
|
343
|
+
" display(Markdown(\"### Row‑wise Metrics\"))\n",
|
344
|
+
" display(eval_result.metrics_table.head())\n",
|
353
345
|
"\n",
|
354
346
|
"\n",
|
355
347
|
"def display_drilldown(row: pd.Series) -> None:\n",
|
@@ -586,37 +578,49 @@
|
|
586
578
|
},
|
587
579
|
"outputs": [],
|
588
580
|
"source": [
|
589
|
-
"def agent_parsed_outcome(query):\n",
|
590
|
-
"\n",
|
591
|
-
"
|
592
|
-
"
|
593
|
-
"
|
594
|
-
"\n",
|
595
|
-
"
|
596
|
-
"
|
597
|
-
"
|
598
|
-
"
|
599
|
-
"
|
600
|
-
"
|
601
|
-
"
|
602
|
-
"
|
603
|
-
"
|
604
|
-
"
|
605
|
-
"
|
606
|
-
"\n",
|
607
|
-
"
|
608
|
-
"
|
609
|
-
"
|
610
|
-
"
|
611
|
-
"
|
612
|
-
"
|
613
|
-
"
|
614
|
-
"\n",
|
615
|
-
"
|
616
|
-
"
|
617
|
-
"
|
618
|
-
"
|
619
|
-
|
581
|
+
"async def agent_parsed_outcome(query):\n",
|
582
|
+
" app_name = \"product_research_app\"\n",
|
583
|
+
" user_id = \"user_one\"\n",
|
584
|
+
" session_id = \"session_one\"\n",
|
585
|
+
" \n",
|
586
|
+
" product_research_agent = Agent(\n",
|
587
|
+
" name=\"ProductResearchAgent\",\n",
|
588
|
+
" model=model,\n",
|
589
|
+
" description=\"An agent that performs product research.\",\n",
|
590
|
+
" instruction=f\"\"\"\n",
|
591
|
+
" Analyze this user request: '{query}'.\n",
|
592
|
+
" If the request is about price, use get_product_price tool.\n",
|
593
|
+
" Otherwise, use get_product_details tool to get product information.\n",
|
594
|
+
" \"\"\",\n",
|
595
|
+
" tools=[get_product_details, get_product_price],\n",
|
596
|
+
" )\n",
|
597
|
+
"\n",
|
598
|
+
" session_service = InMemorySessionService()\n",
|
599
|
+
" await session_service.create_session(\n",
|
600
|
+
" app_name=app_name, user_id=user_id, session_id=session_id\n",
|
601
|
+
" )\n",
|
602
|
+
"\n",
|
603
|
+
" runner = Runner(\n",
|
604
|
+
" agent=product_research_agent, app_name=app_name, session_service=session_service\n",
|
605
|
+
" )\n",
|
606
|
+
"\n",
|
607
|
+
" content = types.Content(role=\"user\", parts=[types.Part(text=query)])\n",
|
608
|
+
" events = [event async for event in runner.run_async(user_id=user_id, session_id=session_id, new_message=content)]\n",
|
609
|
+
" \n",
|
610
|
+
" return parse_adk_output_to_dictionary(events)\n"
|
611
|
+
]
|
612
|
+
},
|
613
|
+
{
|
614
|
+
"cell_type": "code",
|
615
|
+
"execution_count": null,
|
616
|
+
"metadata": {},
|
617
|
+
"outputs": [],
|
618
|
+
"source": [
|
619
|
+
"# --- Sync wrapper for Vertex‑AI evaluation\n",
|
620
|
+
"def agent_parsed_outcome_sync(prompt: str):\n",
|
621
|
+
" result = asyncio.run(agent_parsed_outcome(prompt))\n",
|
622
|
+
" result[\"predicted_trajectory\"] = json.dumps(result[\"predicted_trajectory\"])\n",
|
623
|
+
" return result"
|
620
624
|
]
|
621
625
|
},
|
622
626
|
{
|
@@ -638,7 +642,7 @@
|
|
638
642
|
},
|
639
643
|
"outputs": [],
|
640
644
|
"source": [
|
641
|
-
"response = agent_parsed_outcome(query=\"Get product details for shoes\")\n",
|
645
|
+
"response = await agent_parsed_outcome(query=\"Get product details for shoes\")\n",
|
642
646
|
"display(Markdown(format_output_as_markdown(response)))"
|
643
647
|
]
|
644
648
|
},
|
@@ -650,7 +654,7 @@
|
|
650
654
|
},
|
651
655
|
"outputs": [],
|
652
656
|
"source": [
|
653
|
-
"response = agent_parsed_outcome(query=\"Get product price for shoes\")\n",
|
657
|
+
"response = await agent_parsed_outcome(query=\"Get product price for shoes\")\n",
|
654
658
|
"display(Markdown(format_output_as_markdown(response)))"
|
655
659
|
]
|
656
660
|
},
|
@@ -714,7 +718,7 @@
|
|
714
718
|
" \"Get product details and price for shoes\",\n",
|
715
719
|
" \"Get product details for speaker?\",\n",
|
716
720
|
" ],\n",
|
717
|
-
" \"
|
721
|
+
" \"predicted_trajectory\": [\n",
|
718
722
|
" [\n",
|
719
723
|
" {\n",
|
720
724
|
" \"tool_name\": \"get_product_price\",\n",
|
@@ -840,7 +844,7 @@
|
|
840
844
|
")\n",
|
841
845
|
"\n",
|
842
846
|
"single_tool_call_eval_result = single_tool_call_eval_task.evaluate(\n",
|
843
|
-
" runnable=
|
847
|
+
" runnable=agent_parsed_outcome_sync, experiment_run_name=EXPERIMENT_RUN\n",
|
844
848
|
")\n",
|
845
849
|
"\n",
|
846
850
|
"display_eval_report(single_tool_call_eval_result)"
|
@@ -948,7 +952,7 @@
|
|
948
952
|
")\n",
|
949
953
|
"\n",
|
950
954
|
"trajectory_eval_result = trajectory_eval_task.evaluate(\n",
|
951
|
-
" runnable=
|
955
|
+
" runnable=agent_parsed_outcome_sync, experiment_run_name=EXPERIMENT_RUN\n",
|
952
956
|
")\n",
|
953
957
|
"\n",
|
954
958
|
"display_eval_report(trajectory_eval_result)"
|
@@ -1055,7 +1059,7 @@
|
|
1055
1059
|
")\n",
|
1056
1060
|
"\n",
|
1057
1061
|
"response_eval_result = response_eval_task.evaluate(\n",
|
1058
|
-
" runnable=
|
1062
|
+
" runnable=agent_parsed_outcome_sync, experiment_run_name=EXPERIMENT_RUN\n",
|
1059
1063
|
")\n",
|
1060
1064
|
"\n",
|
1061
1065
|
"display_eval_report(response_eval_result)"
|
@@ -1240,7 +1244,9 @@
|
|
1240
1244
|
")\n",
|
1241
1245
|
"\n",
|
1242
1246
|
"response_eval_tool_result = response_eval_tool_task.evaluate(\n",
|
1243
|
-
"
|
1247
|
+
" # Uncomment the line below if you are providing the agent with an unparsed dataset\n",
|
1248
|
+
" #runnable=agent_parsed_outcome_sync, \n",
|
1249
|
+
" experiment_run_name=EXPERIMENT_RUN\n",
|
1244
1250
|
")\n",
|
1245
1251
|
"\n",
|
1246
1252
|
"display_eval_report(response_eval_tool_result)"
|
@@ -1410,7 +1416,8 @@
|
|
1410
1416
|
"].apply(json.dumps)\n",
|
1411
1417
|
"byod_eval_sample_dataset[\"reference_trajectory\"] = byod_eval_sample_dataset[\n",
|
1412
1418
|
" \"reference_trajectory\"\n",
|
1413
|
-
"].apply(json.dumps)"
|
1419
|
+
"].apply(json.dumps)\n",
|
1420
|
+
"byod_eval_sample_dataset[\"response\"] = byod_eval_sample_dataset[\"response\"].apply(json.dumps)"
|
1414
1421
|
]
|
1415
1422
|
},
|
1416
1423
|
{
|
llm.txt
CHANGED
@@ -31,9 +31,9 @@ Provides MLOps and infrastructure templates so developers focus on agent logic.
|
|
31
31
|
Establishes production patterns from day one, saving setup time.
|
32
32
|
|
33
33
|
---
|
34
|
-
### Section 2: Creating
|
34
|
+
### Section 2: Creating & Enhancing Agent Projects
|
35
35
|
|
36
|
-
Start by creating a new agent project from a predefined template.
|
36
|
+
Start by creating a new agent project from a predefined template, or enhance an existing project with agent capabilities. Both processes support interactive and fully automated setup.
|
37
37
|
|
38
38
|
**Prerequisites:**
|
39
39
|
Before you begin, ensure you have `uv`/`uvx`, `gcloud` CLI, `terraform`, `git`, and `gh` CLI (for automated CI/CD setup) installed and authenticated.
|
@@ -62,22 +62,30 @@ agent-starter-pack create PROJECT_NAME [OPTIONS]
|
|
62
62
|
```
|
63
63
|
|
64
64
|
**Arguments:**
|
65
|
-
* `PROJECT_NAME`: Name for your new project directory and base for GCP resource naming.
|
65
|
+
* `PROJECT_NAME`: Name for your new project directory and base for GCP resource naming (max 26 chars, converted to lowercase).
|
66
66
|
|
67
|
-
**
|
68
|
-
* `-a, --agent`: Agent template (e.g., `adk_base`, `agentic_rag`).
|
69
|
-
|
70
|
-
|
71
|
-
* `-
|
72
|
-
*
|
73
|
-
* `--
|
74
|
-
|
75
|
-
|
76
|
-
*
|
77
|
-
*
|
78
|
-
*
|
79
|
-
|
80
|
-
|
67
|
+
**Template Selection:**
|
68
|
+
* `-a, --agent`: Agent template - built-in agents (e.g., `adk_base`, `agentic_rag`), remote templates (`adk@gemini-fullstack`, `github.com/user/repo@branch`), or local projects (`local@./path`).
|
69
|
+
|
70
|
+
**Deployment Options:**
|
71
|
+
* `-d, --deployment-target`: Target environment (`cloud_run` or `agent_engine`).
|
72
|
+
* `--cicd-runner`: CI/CD runner (`google_cloud_build` or `github_actions`).
|
73
|
+
* `--region`: GCP region (default: `us-central1`).
|
74
|
+
|
75
|
+
**Data & Storage:**
|
76
|
+
* `-i, --include-data-ingestion`: Include data ingestion pipeline.
|
77
|
+
* `-ds, --datastore`: Datastore type (`vertex_ai_search`, `vertex_ai_vector_search`, `alloydb`).
|
78
|
+
* `--session-type`: Session storage (`in_memory`, `alloydb`, `agent_engine`).
|
79
|
+
|
80
|
+
**Project Creation:**
|
81
|
+
* `-o, --output-dir`: Output directory (default: current directory).
|
82
|
+
* `--agent-directory, -dir`: Agent code directory name (default: `app`).
|
83
|
+
* `--in-folder`: Create files in current directory instead of new subdirectory.
|
84
|
+
|
85
|
+
**Automation:**
|
86
|
+
* `--auto-approve`: **Skip all interactive prompts (crucial for automation).**
|
87
|
+
* `--skip-checks`: Skip GCP/Vertex AI verification checks.
|
88
|
+
* `--debug`: Enable debug logging.
|
81
89
|
|
82
90
|
**Automated Creation Example:**
|
83
91
|
```bash
|
@@ -90,6 +98,35 @@ uvx agent-starter-pack create my-automated-agent \
|
|
90
98
|
|
91
99
|
---
|
92
100
|
|
101
|
+
### `agent-starter-pack enhance` Command
|
102
|
+
|
103
|
+
Enhance your existing project with AI agent capabilities by adding agent-starter-pack features in-place. This command supports all the same options as `create` but templates directly into the current directory instead of creating a new project directory.
|
104
|
+
|
105
|
+
**Usage:**
|
106
|
+
```bash
|
107
|
+
agent-starter-pack enhance [TEMPLATE_PATH] [OPTIONS]
|
108
|
+
```
|
109
|
+
|
110
|
+
**Key Differences from `create`:**
|
111
|
+
* Templates into current directory (equivalent to `create --in-folder`)
|
112
|
+
* `TEMPLATE_PATH` defaults to current directory (`.`)
|
113
|
+
* Project name defaults to current directory name
|
114
|
+
* Additional `--base-template` option to override template inheritance
|
115
|
+
|
116
|
+
**Enhanced Project Example:**
|
117
|
+
```bash
|
118
|
+
# Enhance current directory with agent capabilities
|
119
|
+
uvx agent-starter-pack enhance . \
|
120
|
+
--base-template adk_base \
|
121
|
+
-d cloud_run \
|
122
|
+
--region us-central1 \
|
123
|
+
--auto-approve
|
124
|
+
```
|
125
|
+
|
126
|
+
**Project Structure:** Expects agent code in `app/` directory (configurable via `--agent-directory`).
|
127
|
+
|
128
|
+
---
|
129
|
+
|
93
130
|
### Available Agent Templates
|
94
131
|
|
95
132
|
Templates for the `create` command (via `-a` or `--agent`):
|
@@ -208,42 +245,53 @@ For stateful agents, the starter pack supports persistent sessions.
|
|
208
245
|
---
|
209
246
|
|
210
247
|
### `agent-starter-pack setup-cicd`
|
211
|
-
Automates the
|
248
|
+
Automates the complete CI/CD infrastructure setup for GitHub-based deployments. Intelligently detects your CI/CD runner (Google Cloud Build or GitHub Actions) and configures everything automatically.
|
212
249
|
|
213
250
|
**Usage:**
|
214
251
|
```bash
|
215
252
|
uvx agent-starter-pack setup-cicd [OPTIONS]
|
216
253
|
```
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
254
|
+
|
255
|
+
**Prerequisites:**
|
256
|
+
- Run from the project root (directory with `pyproject.toml`)
|
257
|
+
- Required tools: `gh` CLI (authenticated), `gcloud` CLI (authenticated), `terraform`
|
258
|
+
- `Owner` role on GCP projects
|
259
|
+
- GitHub token with `repo` and `workflow` scopes
|
260
|
+
|
261
|
+
**Key Options:**
|
262
|
+
* `--staging-project`, `--prod-project`: GCP project IDs (will prompt if omitted).
|
263
|
+
* `--repository-name`, `--repository-owner`: GitHub repo details (will prompt if omitted).
|
264
|
+
* `--cicd-project`: CI/CD resources project (defaults to prod project).
|
225
265
|
* `--dev-project`: Development project ID (optional).
|
226
|
-
* `--
|
227
|
-
* `--
|
228
|
-
* `--
|
229
|
-
* `--github-pat`: GitHub Personal Access Token for programmatic auth.
|
230
|
-
* `--github-app-installation-id`: GitHub App Installation ID for programmatic auth.
|
231
|
-
* `--local-state`: Use local Terraform state instead of remote GCS backend.
|
266
|
+
* `--region`: GCP region (default: `us-central1`).
|
267
|
+
* `--auto-approve`: Skip all interactive prompts.
|
268
|
+
* `--local-state`: Use local Terraform state instead of GCS backend.
|
232
269
|
* `--debug`: Enable debug logging.
|
233
|
-
* `--create-repository`: Flag indicating whether to create a new repository.
|
234
270
|
|
271
|
+
**What it does:**
|
272
|
+
1. Creates/connects GitHub repository
|
273
|
+
2. Sets up Terraform infrastructure with remote state
|
274
|
+
3. Configures CI/CD runner connection (Cloud Build or GitHub Actions with WIF)
|
275
|
+
4. Provisions staging/prod environments
|
276
|
+
5. Sets up local Git repository with origin remote
|
235
277
|
|
236
278
|
**Automated Example:**
|
237
279
|
```bash
|
238
280
|
uvx agent-starter-pack setup-cicd \
|
239
|
-
--staging-project
|
240
|
-
--prod-project
|
241
|
-
--repository-name
|
242
|
-
--repository-owner
|
281
|
+
--staging-project your-staging-project \
|
282
|
+
--prod-project your-prod-project \
|
283
|
+
--repository-name your-repo-name \
|
284
|
+
--repository-owner your-username \
|
243
285
|
--auto-approve
|
244
286
|
```
|
245
|
-
|
246
|
-
|
287
|
+
|
288
|
+
**After setup, push to trigger pipeline:**
|
289
|
+
```bash
|
290
|
+
git add . && git commit -m "Initial commit" && git push -u origin main
|
291
|
+
```
|
292
|
+
|
293
|
+
* Note: For coding agents - ask user for required project IDs and repo details before running with `--auto-approve`.
|
294
|
+
* Note: If user prefers different git provider, refer to `deployment/README.md` for manual deployment.
|
247
295
|
---
|
248
296
|
### Section 6: Operational Guidelines for Coding Agents
|
249
297
|
|
src/base_template/Makefile
CHANGED
@@ -76,7 +76,7 @@ backend:
|
|
76
76
|
--region "us-central1" \
|
77
77
|
--no-allow-unauthenticated \
|
78
78
|
--no-cpu-throttling \
|
79
|
-
--labels "created-by=adk" \
|
79
|
+
--labels "created-by=adk{%- if cookiecutter.agent_garden %},deployed-with=agent-garden{%- endif %}" \
|
80
80
|
--set-env-vars \
|
81
81
|
"COMMIT_SHA=$(shell git rev-parse HEAD){%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID={{cookiecutter.project_name}}-datastore,DATA_STORE_REGION=us{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX={{cookiecutter.project_name}}-vector-search,VECTOR_SEARCH_INDEX_ENDPOINT={{cookiecutter.project_name}}-vector-search-endpoint,VECTOR_SEARCH_BUCKET=$$PROJECT_ID-{{cookiecutter.project_name}}-vs{%- endif %}{%- endif %}" \
|
82
82
|
$(if $(IAP),--iap) \
|
@@ -98,6 +98,21 @@ local-backend:
|
|
98
98
|
# Start the frontend UI separately for development (requires backend running separately)
|
99
99
|
ui:
|
100
100
|
(cd frontend && PORT=8501 npm start)
|
101
|
+
|
102
|
+
# Launch dev playground with both frontend and backend hot-reload
|
103
|
+
playground-dev:
|
104
|
+
@echo "==============================================================================="
|
105
|
+
@echo "| 🚀 Starting your agent playground in DEV MODE... |"
|
106
|
+
@echo "| |"
|
107
|
+
@echo "| 🌐 Frontend: http://localhost:8501 |"
|
108
|
+
@echo "| 🌐 Backend: http://localhost:8000 |"
|
109
|
+
@echo "| 💡 Try asking: What's the weather in San Francisco? |"
|
110
|
+
@echo "| 🔄 Both frontend and backend will auto-reload on changes |"
|
111
|
+
@echo "==============================================================================="
|
112
|
+
@echo "Starting backend server..."
|
113
|
+
$(MAKE) local-backend &
|
114
|
+
@echo "Starting frontend dev server..."
|
115
|
+
$(MAKE) ui
|
101
116
|
{%- endif %}
|
102
117
|
{%- endif %}
|
103
118
|
|
src/base_template/README.md
CHANGED
@@ -33,7 +33,7 @@ This project is organized as follows:
|
|
33
33
|
## Requirements
|
34
34
|
|
35
35
|
Before you begin, ensure you have:
|
36
|
-
- **uv**: Python package manager - [Install](https://docs.astral.sh/uv/getting-started/installation/)
|
36
|
+
- **uv**: Python package manager (used for all dependency management in this project) - [Install](https://docs.astral.sh/uv/getting-started/installation/) ([add packages](https://docs.astral.sh/uv/concepts/dependencies/) with `uv add <package>`)
|
37
37
|
- **Google Cloud SDK**: For GCP services - [Install](https://cloud.google.com/sdk/docs/install)
|
38
38
|
- **Terraform**: For infrastructure deployment - [Install](https://developer.hashicorp.com/terraform/downloads)
|
39
39
|
- **make**: Build automation tool - [Install](https://www.gnu.org/software/make/) (pre-installed on most Unix-based systems)
|
src/cli/commands/create.py
CHANGED
@@ -57,6 +57,13 @@ __all__ = ["create", "shared_template_options"]
|
|
57
57
|
def shared_template_options(f: Callable) -> Callable:
|
58
58
|
"""Decorator to add shared options for template-based commands."""
|
59
59
|
# Apply options in reverse order since decorators are applied bottom-up
|
60
|
+
f = click.option(
|
61
|
+
"-ag",
|
62
|
+
"--agent-garden",
|
63
|
+
is_flag=True,
|
64
|
+
help="Deployed from Agent Garden - customizes welcome messages",
|
65
|
+
default=False,
|
66
|
+
)(f)
|
60
67
|
f = click.option(
|
61
68
|
"--skip-checks",
|
62
69
|
is_flag=True,
|
@@ -237,6 +244,7 @@ def create(
|
|
237
244
|
skip_checks: bool,
|
238
245
|
in_folder: bool,
|
239
246
|
agent_directory: str | None,
|
247
|
+
agent_garden: bool = False,
|
240
248
|
base_template: str | None = None,
|
241
249
|
skip_welcome: bool = False,
|
242
250
|
cli_overrides: dict | None = None,
|
@@ -247,7 +255,7 @@ def create(
|
|
247
255
|
|
248
256
|
# Display welcome banner (unless skipped)
|
249
257
|
if not skip_welcome:
|
250
|
-
display_welcome_banner(agent)
|
258
|
+
display_welcome_banner(agent, agent_garden=agent_garden)
|
251
259
|
# Validate project name
|
252
260
|
if len(project_name) > 26:
|
253
261
|
console.print(
|
@@ -625,6 +633,7 @@ def create(
|
|
625
633
|
skip_checks=skip_checks,
|
626
634
|
region=region,
|
627
635
|
debug=debug,
|
636
|
+
agent_garden=agent_garden,
|
628
637
|
)
|
629
638
|
except Exception as e:
|
630
639
|
if debug:
|
@@ -680,6 +689,7 @@ def create(
|
|
680
689
|
remote_config=config,
|
681
690
|
in_folder=in_folder,
|
682
691
|
cli_overrides=final_cli_overrides,
|
692
|
+
agent_garden=agent_garden,
|
683
693
|
)
|
684
694
|
|
685
695
|
# Replace region in all files if a different region was specified
|
@@ -902,7 +912,11 @@ def set_gcp_project(project_id: str, set_quota_project: bool = True) -> None:
|
|
902
912
|
|
903
913
|
|
904
914
|
def setup_gcp_environment(
|
905
|
-
auto_approve: bool,
|
915
|
+
auto_approve: bool,
|
916
|
+
skip_checks: bool,
|
917
|
+
region: str,
|
918
|
+
debug: bool,
|
919
|
+
agent_garden: bool = False,
|
906
920
|
) -> dict:
|
907
921
|
"""Set up the GCP environment with proper credentials and project.
|
908
922
|
|
@@ -911,6 +925,7 @@ def setup_gcp_environment(
|
|
911
925
|
skip_checks: Whether to skip verification checks
|
912
926
|
region: GCP region for deployment
|
913
927
|
debug: Whether debug logging is enabled
|
928
|
+
agent_garden: Whether this deployment is from Agent Garden
|
914
929
|
|
915
930
|
Returns:
|
916
931
|
Dictionary with credential information
|
@@ -934,12 +949,16 @@ def setup_gcp_environment(
|
|
934
949
|
console.print("> Skipping Vertex AI connection test", style="yellow")
|
935
950
|
else:
|
936
951
|
# Test Vertex AI connection
|
937
|
-
_test_vertex_ai_connection(
|
952
|
+
_test_vertex_ai_connection(
|
953
|
+
creds_info["project"], region, agent_garden=agent_garden
|
954
|
+
)
|
938
955
|
else:
|
939
956
|
# Even with auto_approve, we should still set the GCP project
|
940
957
|
set_gcp_project(creds_info["project"], set_quota_project=True)
|
941
958
|
# Test Vertex AI connection
|
942
|
-
_test_vertex_ai_connection(
|
959
|
+
_test_vertex_ai_connection(
|
960
|
+
creds_info["project"], region, agent_garden=agent_garden
|
961
|
+
)
|
943
962
|
|
944
963
|
return creds_info
|
945
964
|
|
@@ -1027,7 +1046,7 @@ def _handle_credential_verification(creds_info: dict) -> dict:
|
|
1027
1046
|
|
1028
1047
|
|
1029
1048
|
def _test_vertex_ai_connection(
|
1030
|
-
project_id: str, region: str, auto_approve: bool = False
|
1049
|
+
project_id: str, region: str, auto_approve: bool = False, agent_garden: bool = False
|
1031
1050
|
) -> None:
|
1032
1051
|
"""Test connection to Vertex AI.
|
1033
1052
|
|
@@ -1035,13 +1054,16 @@ def _test_vertex_ai_connection(
|
|
1035
1054
|
project_id: GCP project ID
|
1036
1055
|
region: GCP region for deployment
|
1037
1056
|
auto_approve: Whether to auto-approve API enablement
|
1057
|
+
agent_garden: Whether this deployment is from Agent Garden
|
1038
1058
|
"""
|
1039
1059
|
console.print("> Testing GCP and Vertex AI Connection...")
|
1040
1060
|
try:
|
1061
|
+
context = "agent-garden" if agent_garden else None
|
1041
1062
|
verify_vertex_connection(
|
1042
1063
|
project_id=project_id,
|
1043
1064
|
location=region,
|
1044
1065
|
auto_approve=auto_approve,
|
1066
|
+
context=context,
|
1045
1067
|
)
|
1046
1068
|
console.print(
|
1047
1069
|
f"> ✓ Successfully verified connection to Vertex AI in project {project_id}"
|