agent-starter-pack 0.1.7__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-starter-pack might be problematic. Click here for more details.
- {agent_starter_pack-0.1.7.dist-info → agent_starter_pack-0.2.0.dist-info}/METADATA +6 -6
- {agent_starter_pack-0.1.7.dist-info → agent_starter_pack-0.2.0.dist-info}/RECORD +77 -77
- agents/{agentic_rag_vertexai_search → agentic_rag}/README.md +3 -3
- agents/{agentic_rag_vertexai_search → agentic_rag}/app/agent.py +22 -6
- agents/agentic_rag/app/retrievers.py +132 -0
- agents/{agentic_rag_vertexai_search → agentic_rag}/notebooks/evaluating_langgraph_agent.ipynb +3 -3
- agents/{agentic_rag_vertexai_search → agentic_rag}/template/.templateconfig.yaml +3 -5
- agents/crewai_coding_crew/notebooks/evaluating_crewai_agent.ipynb +4 -4
- agents/crewai_coding_crew/notebooks/evaluating_langgraph_agent.ipynb +3 -3
- agents/langgraph_base_react/notebooks/evaluating_langgraph_agent.ipynb +3 -3
- agents/{multimodal_live_api → live_api}/README.md +7 -0
- agents/{multimodal_live_api → live_api}/app/agent.py +3 -11
- agents/{multimodal_live_api → live_api}/app/server.py +3 -2
- agents/{multimodal_live_api → live_api}/template/.templateconfig.yaml +2 -2
- src/base_template/Makefile +12 -7
- src/base_template/README.md +71 -71
- src/base_template/app/utils/tracing.py +3 -1
- src/base_template/app/utils/typing.py +1 -0
- src/base_template/deployment/cd/deploy-to-prod.yaml +10 -4
- src/base_template/deployment/cd/staging.yaml +11 -10
- src/base_template/deployment/ci/pr_checks.yaml +1 -1
- src/base_template/deployment/terraform/apis.tf +6 -0
- src/base_template/deployment/terraform/build_triggers.tf +34 -21
- src/base_template/deployment/terraform/dev/iam.tf +13 -6
- src/base_template/deployment/terraform/dev/log_sinks.tf +25 -28
- src/base_template/deployment/terraform/dev/providers.tf +1 -0
- src/base_template/deployment/terraform/dev/storage.tf +69 -11
- src/base_template/deployment/terraform/dev/variables.tf +50 -53
- src/base_template/deployment/terraform/dev/vars/env.tfvars +13 -11
- src/base_template/deployment/terraform/iam.tf +3 -3
- src/base_template/deployment/terraform/log_sinks.tf +24 -26
- src/base_template/deployment/terraform/providers.tf +2 -0
- src/base_template/deployment/terraform/service_accounts.tf +7 -7
- src/base_template/deployment/terraform/storage.tf +123 -11
- src/base_template/deployment/terraform/variables.tf +49 -70
- src/base_template/deployment/terraform/vars/env.tfvars +12 -17
- src/base_template/pyproject.toml +4 -3
- src/cli/commands/create.py +79 -19
- src/cli/commands/setup_cicd.py +91 -22
- src/cli/main.py +3 -1
- src/cli/utils/__init__.py +9 -2
- src/cli/utils/cicd.py +12 -0
- src/cli/utils/datastores.py +32 -0
- src/cli/utils/gcp.py +4 -6
- src/cli/utils/template.py +127 -45
- src/cli/utils/version.py +87 -0
- src/data_ingestion/README.md +24 -19
- src/data_ingestion/data_ingestion_pipeline/components/ingest_data.py +135 -2
- src/data_ingestion/data_ingestion_pipeline/components/process_data.py +276 -2
- src/data_ingestion/data_ingestion_pipeline/pipeline.py +28 -5
- src/data_ingestion/data_ingestion_pipeline/submit_pipeline.py +49 -14
- src/data_ingestion/pyproject.toml +1 -0
- src/deployment_targets/agent_engine/app/agent_engine_app.py +3 -1
- src/deployment_targets/cloud_run/tests/unit/test_server.py +15 -33
- src/frontends/live_api_react/frontend/package-lock.json +208 -168
- src/frontends/live_api_react/frontend/package.json +1 -1
- src/resources/containers/data_processing/Dockerfile +3 -1
- src/resources/locks/{uv-agentic_rag_vertexai_search-agent_engine.lock → uv-agentic_rag-agent_engine.lock} +747 -694
- src/resources/locks/{uv-agentic_rag_vertexai_search-cloud_run.lock → uv-agentic_rag-cloud_run.lock} +944 -806
- src/resources/locks/uv-crewai_coding_crew-agent_engine.lock +651 -694
- src/resources/locks/uv-crewai_coding_crew-cloud_run.lock +813 -789
- src/resources/locks/uv-langgraph_base_react-agent_engine.lock +666 -686
- src/resources/locks/uv-langgraph_base_react-cloud_run.lock +848 -798
- src/resources/locks/{uv-multimodal_live_api-cloud_run.lock → uv-live_api-cloud_run.lock} +856 -791
- src/resources/setup_cicd/cicd_variables.tf +5 -0
- src/resources/setup_cicd/github.tf +4 -2
- src/utils/watch_and_rebuild.py +14 -0
- agents/agentic_rag_vertexai_search/app/retrievers.py +0 -79
- src/deployment_targets/cloud_run/deployment/terraform/artifact_registry.tf +0 -22
- src/deployment_targets/cloud_run/deployment/terraform/dev/service_accounts.tf +0 -20
- {agent_starter_pack-0.1.7.dist-info → agent_starter_pack-0.2.0.dist-info}/WHEEL +0 -0
- {agent_starter_pack-0.1.7.dist-info → agent_starter_pack-0.2.0.dist-info}/entry_points.txt +0 -0
- {agent_starter_pack-0.1.7.dist-info → agent_starter_pack-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /agents/{agentic_rag_vertexai_search → agentic_rag}/app/templates.py +0 -0
- /agents/{agentic_rag_vertexai_search → agentic_rag}/tests/integration/test_agent.py +0 -0
- /agents/{multimodal_live_api → live_api}/app/templates.py +0 -0
- /agents/{multimodal_live_api → live_api}/app/vector_store.py +0 -0
- /agents/{multimodal_live_api → live_api}/tests/integration/test_server_e2e.py +0 -0
- /agents/{multimodal_live_api → live_api}/tests/load_test/load_test.py +0 -0
- /agents/{multimodal_live_api → live_api}/tests/unit/test_server.py +0 -0
|
@@ -28,3 +28,10 @@ Explore these resources to learn more about the Multimodal Live API and see exam
|
|
|
28
28
|
- [Google Cloud Multimodal Live API demos and samples](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/gemini/multimodal-live-api): Collection of code samples and demo applications leveraging multimodal live API in Vertex AI
|
|
29
29
|
- [Gemini 2 Cookbook](https://github.com/google-gemini/cookbook/tree/main/gemini-2): Practical examples and tutorials for working with Gemini 2
|
|
30
30
|
- [Multimodal Live API Web Console](https://github.com/google-gemini/multimodal-live-api-web-console): Interactive React-based web interface for testing and experimenting with Gemini Multimodal Live API.
|
|
31
|
+
|
|
32
|
+
## Current Status & Future Work
|
|
33
|
+
|
|
34
|
+
This pattern is under active development. Key areas planned for future enhancement include:
|
|
35
|
+
|
|
36
|
+
* **Observability:** Implementing comprehensive monitoring and tracing features.
|
|
37
|
+
* **Load Testing:** Integrating load testing capabilities.
|
|
@@ -19,9 +19,7 @@ import vertexai
|
|
|
19
19
|
from google import genai
|
|
20
20
|
from google.genai.types import (
|
|
21
21
|
Content,
|
|
22
|
-
FunctionDeclaration,
|
|
23
22
|
LiveConnectConfig,
|
|
24
|
-
Tool,
|
|
25
23
|
)
|
|
26
24
|
from langchain_google_vertexai import VertexAIEmbeddings
|
|
27
25
|
|
|
@@ -32,7 +30,7 @@ from app.vector_store import get_vector_store
|
|
|
32
30
|
VERTEXAI = os.getenv("VERTEXAI", "true").lower() == "true"
|
|
33
31
|
LOCATION = "us-central1"
|
|
34
32
|
EMBEDDING_MODEL = "text-embedding-004"
|
|
35
|
-
MODEL_ID = "gemini-2.0-flash-
|
|
33
|
+
MODEL_ID = "gemini-2.0-flash-exp"
|
|
36
34
|
URLS = [
|
|
37
35
|
"https://cloud.google.com/architecture/deploy-operate-generative-ai-applications"
|
|
38
36
|
]
|
|
@@ -70,17 +68,11 @@ def retrieve_docs(query: str) -> dict[str, str]:
|
|
|
70
68
|
return {"output": formatted_docs}
|
|
71
69
|
|
|
72
70
|
|
|
73
|
-
# Configure tools and live connection
|
|
74
|
-
retrieve_docs_tool = Tool(
|
|
75
|
-
function_declarations=[
|
|
76
|
-
FunctionDeclaration.from_callable(client=genai_client, callable=retrieve_docs)
|
|
77
|
-
]
|
|
78
|
-
)
|
|
79
|
-
|
|
71
|
+
# Configure tools available to the agent and live connection
|
|
80
72
|
tool_functions = {"retrieve_docs": retrieve_docs}
|
|
81
73
|
|
|
82
74
|
live_connect_config = LiveConnectConfig(
|
|
83
75
|
response_modalities=["AUDIO"],
|
|
84
|
-
tools=[
|
|
76
|
+
tools=[retrieve_docs],
|
|
85
77
|
system_instruction=Content(parts=[{"text": SYSTEM_INSTRUCTION}]),
|
|
86
78
|
)
|
|
@@ -121,8 +121,9 @@ class GeminiSession:
|
|
|
121
121
|
"""
|
|
122
122
|
while result := await self.session._ws.recv(decode=False):
|
|
123
123
|
await self.websocket.send_bytes(result)
|
|
124
|
-
|
|
125
|
-
if
|
|
124
|
+
raw_message = json.loads(result)
|
|
125
|
+
if "toolCall" in raw_message:
|
|
126
|
+
message = types.LiveServerMessage.model_validate(raw_message)
|
|
126
127
|
tool_call = LiveServerToolCall.model_validate(message.tool_call)
|
|
127
128
|
await self._handle_tool_call(self.session, tool_call)
|
|
128
129
|
|
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
|
|
15
|
-
description: "A real-time multimodal RAG agent powered by Gemini, supporting audio/video/text chat with vector DB-backed responses"
|
|
15
|
+
description: "A real-time multimodal RAG agent powered by Gemini Live API, supporting audio/video/text chat with vector DB-backed responses"
|
|
16
16
|
settings:
|
|
17
17
|
requires_data_ingestion: false
|
|
18
18
|
frontend_type: "live_api_react"
|
|
@@ -20,7 +20,7 @@ settings:
|
|
|
20
20
|
extra_dependencies: [
|
|
21
21
|
"backoff~=2.2.1",
|
|
22
22
|
"beautifulsoup4~=4.12.3",
|
|
23
|
-
"google-genai~=1.
|
|
23
|
+
"google-genai~=1.8.0",
|
|
24
24
|
"jinja2~=3.1.4",
|
|
25
25
|
"langchain~=0.3.13",
|
|
26
26
|
"langchain-community~=0.3.13",
|
src/base_template/Makefile
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
install:
|
|
2
2
|
@command -v uv >/dev/null 2>&1 || { echo "uv is not installed. Installing uv..."; curl -LsSf https://astral.sh/uv/install.sh | sh; source ~/.bashrc; }
|
|
3
|
-
uv sync --dev {% if cookiecutter.agent_name != '
|
|
3
|
+
uv sync --dev {% if cookiecutter.agent_name != 'live_api' %}--extra streamlit{%- endif %} --extra jupyter --frozen{% if cookiecutter.agent_name == 'live_api' %} && npm --prefix frontend install{%- endif %}
|
|
4
4
|
|
|
5
5
|
test:
|
|
6
6
|
uv run pytest tests/unit && uv run pytest tests/integration
|
|
@@ -9,7 +9,7 @@ playground:
|
|
|
9
9
|
{%- if cookiecutter.deployment_target == 'cloud_run' %}
|
|
10
10
|
uv run uvicorn app.server:app --host 0.0.0.0 --port 8000 --reload &
|
|
11
11
|
{%- endif %}
|
|
12
|
-
{%- if cookiecutter.agent_name == '
|
|
12
|
+
{%- if cookiecutter.agent_name == 'live_api' %}
|
|
13
13
|
npm --prefix frontend start
|
|
14
14
|
{%- else %}
|
|
15
15
|
{% if cookiecutter.deployment_target == 'agent_engine' %}PYTHONPATH=. {% endif %}uv run streamlit run frontend/streamlit_app.py --browser.serverAddress=localhost --server.enableCORS=false --server.enableXsrfProtection=false
|
|
@@ -24,7 +24,7 @@ backend:
|
|
|
24
24
|
|
|
25
25
|
{% if cookiecutter.deployment_target == 'cloud_run' -%}
|
|
26
26
|
ui:
|
|
27
|
-
{%- if cookiecutter.agent_name == '
|
|
27
|
+
{%- if cookiecutter.agent_name == 'live_api' %}
|
|
28
28
|
npm --prefix frontend start
|
|
29
29
|
{%- else %}
|
|
30
30
|
uv run streamlit run streamlit/streamlit_app.py --browser.serverAddress=localhost --server.enableCORS=false --server.enableXsrfProtection=false
|
|
@@ -34,18 +34,23 @@ ui:
|
|
|
34
34
|
setup-dev-env:
|
|
35
35
|
@if [ -z "$$PROJECT_ID" ]; then echo "Error: PROJECT_ID environment variable is not set"; exit 1; fi
|
|
36
36
|
(cd deployment/terraform/dev && terraform init && terraform apply --var-file vars/env.tfvars --var dev_project_id=$$PROJECT_ID --auto-approve)
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
{%- if cookiecutter.data_ingestion%}
|
|
39
39
|
data-ingestion:
|
|
40
40
|
@if [ -z "$$PROJECT_ID" ]; then echo "Error: PROJECT_ID environment variable is not set"; exit 1; fi
|
|
41
41
|
$(MAKE) install
|
|
42
42
|
(cd data_ingestion && uv run data_ingestion_pipeline/submit_pipeline.py \
|
|
43
43
|
--project-id=$$PROJECT_ID \
|
|
44
|
-
--data-store-id="sample-datastore" \
|
|
45
44
|
--region="us-central1" \
|
|
45
|
+
{%- if cookiecutter.datastore_type == "vertex_ai_search" %}
|
|
46
|
+
--data-store-id="{{cookiecutter.project_name}}-datastore" \
|
|
46
47
|
--data-store-region="us" \
|
|
47
|
-
|
|
48
|
-
--
|
|
48
|
+
{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %}
|
|
49
|
+
--vector-search-index="{{cookiecutter.project_name}}-vector-search" \
|
|
50
|
+
--vector-search-index-endpoint="{{cookiecutter.project_name}}-vector-search-endpoint" \
|
|
51
|
+
{%- endif %}
|
|
52
|
+
--service-account="{{cookiecutter.project_name}}-rag@$$PROJECT_ID.iam.gserviceaccount.com" \
|
|
53
|
+
--pipeline-root="gs://$$PROJECT_ID-{{cookiecutter.project_name}}-rag" \
|
|
49
54
|
--pipeline-name="data-ingestion-pipeline")
|
|
50
55
|
{%- endif %}
|
|
51
56
|
|
src/base_template/README.md
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
# {{cookiecutter.project_name}}
|
|
2
2
|
|
|
3
3
|
{{cookiecutter.agent_description}}
|
|
4
|
-
|
|
5
|
-
Agent generated with [`googleCloudPlatform/agent-starter-pack`](https://github.com/GoogleCloudPlatform/agent-starter-pack)
|
|
4
|
+
Agent generated with [`googleCloudPlatform/agent-starter-pack`](https://github.com/GoogleCloudPlatform/agent-starter-pack) version `{{ cookiecutter.package_version }}`
|
|
6
5
|
|
|
7
6
|
## Project Structure
|
|
8
7
|
|
|
@@ -34,24 +33,12 @@ Before you begin, ensure you have:
|
|
|
34
33
|
- **make**: Build automation tool - [Install](https://www.gnu.org/software/make/) (pre-installed on most Unix-based systems)
|
|
35
34
|
|
|
36
35
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
Install required packages using uv:
|
|
40
|
-
|
|
41
|
-
```bash
|
|
42
|
-
make install
|
|
43
|
-
```
|
|
44
|
-
|
|
45
|
-
### Setup
|
|
36
|
+
## Quick Start (Local Testing)
|
|
46
37
|
|
|
47
|
-
|
|
38
|
+
Install required packages and launch the local development environment:
|
|
48
39
|
|
|
49
40
|
```bash
|
|
50
|
-
|
|
51
|
-
export LOCATION="us-central1"
|
|
52
|
-
gcloud config set project $PROJECT_ID
|
|
53
|
-
gcloud auth application-default login
|
|
54
|
-
gcloud auth application-default set-quota-project $PROJECT_ID
|
|
41
|
+
make install && make playground
|
|
55
42
|
```
|
|
56
43
|
|
|
57
44
|
## Commands
|
|
@@ -73,104 +60,90 @@ gcloud auth application-default set-quota-project $PROJECT_ID
|
|
|
73
60
|
|
|
74
61
|
For full command options and usage, refer to the [Makefile](Makefile).
|
|
75
62
|
|
|
76
|
-
{% if cookiecutter.agent_name == '
|
|
63
|
+
{% if cookiecutter.agent_name == 'live_api' %}
|
|
77
64
|
## Usage
|
|
78
65
|
|
|
79
|
-
|
|
66
|
+
This template follows a "bring your own agent" approach - you focus on your business logic in `app/agent.py`, and the template handles the surrounding components (UI, infrastructure, deployment, monitoring).
|
|
67
|
+
|
|
68
|
+
Here’s the recommended workflow for local development:
|
|
80
69
|
|
|
70
|
+
1. **Install Dependencies (if needed):**
|
|
81
71
|
```bash
|
|
82
72
|
make install
|
|
83
73
|
```
|
|
84
74
|
|
|
85
|
-
2. **Start the Backend
|
|
86
|
-
|
|
87
|
-
**Backend:**
|
|
75
|
+
2. **Start the Backend Server:**
|
|
76
|
+
Open a terminal and run:
|
|
88
77
|
```bash
|
|
89
78
|
make backend
|
|
90
79
|
```
|
|
91
|
-
|
|
92
|
-
The backend will be ready when you see `INFO: Application startup complete.` in the console.
|
|
80
|
+
The backend is ready when you see `INFO: Application startup complete.` Wait for this message before starting the frontend.
|
|
93
81
|
|
|
94
82
|
<details>
|
|
95
|
-
<summary><b>
|
|
83
|
+
<summary><b>Optional: Use AI Studio / API Key instead of Vertex AI</b></summary>
|
|
84
|
+
|
|
85
|
+
By default, the backend uses Vertex AI and Application Default Credentials. If you prefer to use Google AI Studio and an API key:
|
|
96
86
|
|
|
97
87
|
```bash
|
|
98
88
|
export VERTEXAI=false
|
|
99
|
-
export GOOGLE_API_KEY=your-google-api-key
|
|
89
|
+
export GOOGLE_API_KEY="your-google-api-key" # Replace with your actual key
|
|
90
|
+
make backend
|
|
100
91
|
```
|
|
101
|
-
|
|
92
|
+
Ensure `GOOGLE_API_KEY` is set correctly in your environment.
|
|
102
93
|
</details>
|
|
103
94
|
<br>
|
|
104
|
-
|
|
105
|
-
|
|
95
|
+
|
|
96
|
+
3. **Start the Frontend UI:**
|
|
97
|
+
Open *another* terminal and run:
|
|
106
98
|
```bash
|
|
107
|
-
# In a different shell
|
|
108
99
|
make ui
|
|
109
100
|
```
|
|
101
|
+
This launches the Streamlit application, which connects to the backend server (by default at `http://localhost:8000`).
|
|
110
102
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
**Remote access**:
|
|
117
|
-
Use [Cloud Run proxy](https://cloud.google.com/sdk/gcloud/reference/run/services/proxy) for local access. The backend will be accessible at `http://localhost:8000`:
|
|
118
|
-
|
|
119
|
-
```bash
|
|
120
|
-
gcloud run services proxy genai-app-sample --port 8000 --project $PROJECT_ID --region $REGION
|
|
121
|
-
```
|
|
122
|
-
|
|
123
|
-
You can then use the same frontend setup described above to interact with your Cloud Run deployment.
|
|
103
|
+
4. **Interact and Iterate:**
|
|
104
|
+
* Open the Streamlit UI in your browser (usually `http://localhost:3000` or `http://localhost:3001`).
|
|
105
|
+
* Click the play button in the UI to connect to the backend.
|
|
106
|
+
* Interact with the agent! Try prompts like: *"Using the tool you have, define Governance in the context MLOPs"*
|
|
107
|
+
* Modify the agent logic in `app/agent.py`. The backend server (FastAPI with `uvicorn --reload`) should automatically restart when you save changes. Refresh the frontend if needed to see behavioral changes.
|
|
124
108
|
|
|
125
109
|
<details>
|
|
126
|
-
<summary><b>Cloud Shell
|
|
110
|
+
<summary><b>Cloud Shell Usage</b></summary>
|
|
127
111
|
|
|
128
|
-
To
|
|
112
|
+
To run the agent using Google Cloud Shell:
|
|
129
113
|
|
|
130
114
|
1. **Start the Frontend:**
|
|
131
|
-
|
|
115
|
+
In a Cloud Shell tab, run:
|
|
132
116
|
```bash
|
|
133
117
|
make ui
|
|
134
118
|
```
|
|
135
|
-
|
|
136
|
-
You may be prompted to run the app on a different port if port 3000 is in use. Accept by pressing Enter. You'll see a message similar to:
|
|
137
|
-
|
|
138
|
-
```
|
|
139
|
-
You can now view multimodal-live-api-web-console in the browser.
|
|
140
|
-
|
|
141
|
-
Local: http://localhost:3001
|
|
142
|
-
On Your Network: http://10.88.0.4:3001
|
|
143
|
-
```
|
|
144
|
-
|
|
145
|
-
Click the `localhost` link to open a web preview in Cloud Shell.
|
|
119
|
+
Accept prompts to use a different port if 3000 is busy. Click the `localhost:PORT` link for the web preview.
|
|
146
120
|
|
|
147
121
|
2. **Start the Backend:**
|
|
148
|
-
|
|
149
|
-
Open a *new* Cloud Shell terminal tab. Remember to set your Cloud Platform project in this new session using `gcloud config set project [PROJECT_ID]`. Then from the root of the repository, run:
|
|
150
|
-
|
|
122
|
+
Open a *new* Cloud Shell tab. Set your project: `gcloud config set project [PROJECT_ID]`. Then run:
|
|
151
123
|
```bash
|
|
152
124
|
make backend
|
|
153
125
|
```
|
|
154
126
|
|
|
155
|
-
3. **Configure Web Preview
|
|
156
|
-
|
|
157
|
-
Trigger a web preview for port 8000 - you'll need to change the default port which is `8080`. See [Cloud Shell Web Preview documentation](https://cloud.google.com/shell/docs/using-web-preview#preview_the_application) for details.
|
|
127
|
+
3. **Configure Backend Web Preview:**
|
|
128
|
+
Use the Cloud Shell Web Preview feature to expose port 8000. Change the default port from 8080 to 8000. See [Cloud Shell Web Preview documentation](https://cloud.google.com/shell/docs/using-web-preview#preview_the_application).
|
|
158
129
|
|
|
159
130
|
4. **Connect Frontend to Backend:**
|
|
131
|
+
* Copy the URL generated by the backend web preview (e.g., `https://8000-cs-....cloudshell.dev/`).
|
|
132
|
+
* Paste this URL into the "Server URL" field in the frontend UI settings (in the first tab).
|
|
133
|
+
* Click the "Play button" to connect.
|
|
160
134
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
* Paste the copied URL into the frontend's "Server URL" connection settings.
|
|
164
|
-
* Click the "Play button" to connect. Start interacting with it!
|
|
135
|
+
* **Note:** The feedback feature in the frontend might not work reliably in Cloud Shell due to cross-origin issues between the preview URLs.
|
|
136
|
+
</details>
|
|
165
137
|
|
|
166
|
-
* When using Cloud Shell there is a known limitation when using the feedback feature in the Frontend. Feedback submission might fail due to different origins between the frontend and backend in the Cloud Shell environment.
|
|
167
138
|
</details>
|
|
168
139
|
{%- else %}
|
|
169
140
|
## Usage
|
|
170
141
|
|
|
142
|
+
This template follows a "bring your own agent" approach - you focus on your business logic, and the template handles everything else (UI, infrastructure, deployment, monitoring).
|
|
143
|
+
|
|
171
144
|
1. **Prototype:** Build your Generative AI Agent using the intro notebooks in `notebooks/` for guidance. Use Vertex AI Evaluation to assess performance.
|
|
172
|
-
2. **Integrate:** Import your
|
|
173
|
-
3. **Test:** Explore your
|
|
145
|
+
2. **Integrate:** Import your agent into the app by editing `app/agent.py`.
|
|
146
|
+
3. **Test:** Explore your agent functionality using the Streamlit playground with `make playground`. The playground offers features like chat history, user feedback, and various input types, and automatically reloads your agent on code changes.
|
|
174
147
|
4. **Deploy:** Configure and trigger the CI/CD pipelines, editing tests if needed. See the [deployment section](#deployment) for details.
|
|
175
148
|
5. **Monitor:** Track performance and gather insights using Cloud Logging, Tracing, and the Looker Studio dashboard to iterate on your application.
|
|
176
149
|
{% endif %}
|
|
@@ -186,6 +159,31 @@ You can test deployment towards a Dev Environment using the following command:
|
|
|
186
159
|
gcloud config set project <your-dev-project-id>
|
|
187
160
|
make backend
|
|
188
161
|
```
|
|
162
|
+
{%- elif cookiecutter.deployment_target == 'cloud_run' %}
|
|
163
|
+
Deploy the application directly to Cloud Run from your source code using the following `gcloud` command:
|
|
164
|
+
|
|
165
|
+
```bash
|
|
166
|
+
gcloud run deploy genai-app-sample \
|
|
167
|
+
--source . \
|
|
168
|
+
--project YOUR_PROJECT_ID \
|
|
169
|
+
--region YOUR_GCP_REGION \
|
|
170
|
+
--memory "4Gi" \
|
|
171
|
+
```
|
|
172
|
+
Replace `YOUR_PROJECT_ID` with your Google Cloud project ID and `YOUR_GCP_REGION` with the desired region (e.g., `us-central1`). Adjust memory and other flags as needed for your environment.
|
|
173
|
+
{% if cookiecutter.agent_name == 'live_api' %}
|
|
174
|
+
**Accessing the Deployed Backend Locally:**
|
|
175
|
+
|
|
176
|
+
To connect your local frontend (`make ui`) to the backend deployed on Cloud Run, use the `gcloud` proxy:
|
|
177
|
+
|
|
178
|
+
1. **Start the proxy:**
|
|
179
|
+
```bash
|
|
180
|
+
# Replace with your actual service name, project, and region
|
|
181
|
+
gcloud run services proxy gemini-agent-service --port 8000 --project $PROJECT_ID --region $REGION
|
|
182
|
+
```
|
|
183
|
+
Keep this terminal running.
|
|
184
|
+
|
|
185
|
+
2. **Connect Frontend:** Your deployed backend is now accessible locally at `http://localhost:8000`. Point your Streamlit UI to this address.
|
|
186
|
+
{%- endif %}
|
|
189
187
|
{%- endif %}
|
|
190
188
|
|
|
191
189
|
The repository includes a Terraform configuration for the setup of the Dev Google Cloud project.
|
|
@@ -195,8 +193,10 @@ See [deployment/README.md](deployment/README.md) for instructions.
|
|
|
195
193
|
|
|
196
194
|
The repository includes a Terraform configuration for the setup of a production Google Cloud project. Refer to [deployment/README.md](deployment/README.md) for detailed instructions on how to deploy the infrastructure and application.
|
|
197
195
|
|
|
196
|
+
{% if cookiecutter.agent_name != 'live_api' %}
|
|
198
197
|
## Monitoring and Observability
|
|
199
198
|
|
|
200
|
-
|
|
199
|
+
> You can use [this Looker Studio dashboard](https://lookerstudio.google.com/c/reporting/fa742264-4b4b-4c56-81e6-a667dd0f853f/page/tEnnC) template for visualizing events being logged in BigQuery. See the "Setup Instructions" tab to getting started.
|
|
201
200
|
|
|
202
201
|
The application uses OpenTelemetry for comprehensive observability with all events being sent to Google Cloud Trace and Logging for monitoring and to BigQuery for long term storage.
|
|
202
|
+
{%- endif %}
|
|
@@ -57,7 +57,9 @@ class CloudTraceLoggingSpanExporter(CloudTraceSpanExporter):
|
|
|
57
57
|
)
|
|
58
58
|
self.logger = self.logging_client.logger(__name__)
|
|
59
59
|
self.storage_client = storage_client or storage.Client(project=self.project_id)
|
|
60
|
-
self.bucket_name =
|
|
60
|
+
self.bucket_name = (
|
|
61
|
+
bucket_name or f"{self.project_id}-{{cookiecutter.project_name}}-logs-data"
|
|
62
|
+
)
|
|
61
63
|
self.bucket = self.storage_client.bucket(self.bucket_name)
|
|
62
64
|
|
|
63
65
|
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
@@ -62,6 +62,7 @@ class Feedback(BaseModel):
|
|
|
62
62
|
text: str | None = ""
|
|
63
63
|
run_id: str
|
|
64
64
|
log_type: Literal["feedback"] = "feedback"
|
|
65
|
+
service_name: Literal["{{cookiecutter.project_name}}"] = "{{cookiecutter.project_name}}"
|
|
65
66
|
|
|
66
67
|
|
|
67
68
|
def ensure_valid_config(config: RunnableConfig | None) -> RunnableConfig:
|
|
@@ -25,8 +25,14 @@ steps:
|
|
|
25
25
|
env:
|
|
26
26
|
- "PIPELINE_ROOT=${_PIPELINE_GCS_ROOT}"
|
|
27
27
|
- "REGION=${_REGION}"
|
|
28
|
+
{%- if cookiecutter.datastore_type == "vertex_ai_search" %}
|
|
28
29
|
- "DATA_STORE_REGION=${_DATA_STORE_REGION}"
|
|
29
30
|
- "DATA_STORE_ID=${_DATA_STORE_ID}"
|
|
31
|
+
{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %}
|
|
32
|
+
- "VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX}"
|
|
33
|
+
- "VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT}"
|
|
34
|
+
- "VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}"
|
|
35
|
+
{%- endif %}
|
|
30
36
|
- "PROJECT_ID=${_PROD_PROJECT_ID}"
|
|
31
37
|
- "SERVICE_ACCOUNT=${_PIPELINE_SA_EMAIL}"
|
|
32
38
|
- "PIPELINE_NAME=${_PIPELINE_NAME}"
|
|
@@ -59,9 +65,9 @@ steps:
|
|
|
59
65
|
- "--concurrency"
|
|
60
66
|
- "40"
|
|
61
67
|
- "--service-account"
|
|
62
|
-
- "${
|
|
68
|
+
- "${_CLOUD_RUN_APP_SA_EMAIL}"
|
|
63
69
|
- "--set-env-vars"
|
|
64
|
-
- "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- endif %}"
|
|
70
|
+
- "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
|
|
65
71
|
{%- elif cookiecutter.deployment_target == 'agent_engine' %}
|
|
66
72
|
- name: "python:3.11-slim"
|
|
67
73
|
id: install-dependencies
|
|
@@ -83,7 +89,7 @@ steps:
|
|
|
83
89
|
uv run app/agent_engine_app.py \
|
|
84
90
|
--project ${_PROD_PROJECT_ID} \
|
|
85
91
|
--location ${_REGION} \
|
|
86
|
-
--set-env-vars
|
|
92
|
+
--set-env-vars="COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
|
|
87
93
|
env:
|
|
88
94
|
- 'PATH=/usr/local/bin:/usr/bin:~/.local/bin'
|
|
89
95
|
{%- endif %}
|
|
@@ -92,7 +98,7 @@ substitutions:
|
|
|
92
98
|
_PROD_PROJECT_ID: YOUR_PROD_PROJECT_ID
|
|
93
99
|
_REGION: us-central1
|
|
94
100
|
|
|
95
|
-
logsBucket: gs://${PROJECT_ID}-logs-data/build-logs
|
|
101
|
+
logsBucket: gs://${PROJECT_ID}-{{cookiecutter.project_name}}-logs-data/build-logs
|
|
96
102
|
options:
|
|
97
103
|
substitutionOption: ALLOW_LOOSE
|
|
98
104
|
defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET
|
|
@@ -25,8 +25,14 @@ steps:
|
|
|
25
25
|
env:
|
|
26
26
|
- "PIPELINE_ROOT=${_PIPELINE_GCS_ROOT}"
|
|
27
27
|
- "REGION=${_REGION}"
|
|
28
|
+
{%- if cookiecutter.datastore_type == "vertex_ai_search" %}
|
|
28
29
|
- "DATA_STORE_REGION=${_DATA_STORE_REGION}"
|
|
29
30
|
- "DATA_STORE_ID=${_DATA_STORE_ID}"
|
|
31
|
+
{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %}
|
|
32
|
+
- "VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX}"
|
|
33
|
+
- "VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT}"
|
|
34
|
+
- "VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}"
|
|
35
|
+
{%- endif %}
|
|
30
36
|
- "PROJECT_ID=${_STAGING_PROJECT_ID}"
|
|
31
37
|
- "SERVICE_ACCOUNT=${_PIPELINE_SA_EMAIL}"
|
|
32
38
|
- "PIPELINE_NAME=${_PIPELINE_NAME}"
|
|
@@ -73,9 +79,9 @@ steps:
|
|
|
73
79
|
- "--concurrency"
|
|
74
80
|
- "40"
|
|
75
81
|
- "--service-account"
|
|
76
|
-
- "${
|
|
82
|
+
- "${_CLOUD_RUN_APP_SA_EMAIL}"
|
|
77
83
|
- "--set-env-vars"
|
|
78
|
-
- "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- endif %}"
|
|
84
|
+
- "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
|
|
79
85
|
|
|
80
86
|
# Fetch Staging Service URL
|
|
81
87
|
- name: "gcr.io/cloud-builders/gcloud"
|
|
@@ -116,7 +122,7 @@ steps:
|
|
|
116
122
|
uv run app/agent_engine_app.py \
|
|
117
123
|
--project ${_STAGING_PROJECT_ID} \
|
|
118
124
|
--location ${_REGION} \
|
|
119
|
-
--set-env-vars
|
|
125
|
+
--set-env-vars="COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
|
|
120
126
|
env:
|
|
121
127
|
- 'PATH=/usr/local/bin:/usr/bin:~/.local/bin'
|
|
122
128
|
|
|
@@ -181,7 +187,7 @@ steps:
|
|
|
181
187
|
- "builds"
|
|
182
188
|
- "triggers"
|
|
183
189
|
- "run"
|
|
184
|
-
- "deploy-
|
|
190
|
+
- "deploy-{{cookiecutter.project_name}}"
|
|
185
191
|
- "--region"
|
|
186
192
|
- "$LOCATION"
|
|
187
193
|
- "--project"
|
|
@@ -202,14 +208,9 @@ steps:
|
|
|
202
208
|
|
|
203
209
|
substitutions:
|
|
204
210
|
_STAGING_PROJECT_ID: YOUR_STAGING_PROJECT_ID
|
|
205
|
-
_BUCKET_NAME_LOAD_TEST_RESULTS: ${PROJECT_ID}-cicd-load-test-results
|
|
206
211
|
_REGION: us-central1
|
|
207
|
-
{%- if cookiecutter.data_ingestion %}
|
|
208
|
-
_DATA_STORE_REGION: us
|
|
209
|
-
_PIPELINE_NAME: genai_sample_data_ingestion
|
|
210
|
-
{%- endif %}
|
|
211
212
|
|
|
212
|
-
logsBucket: gs://${PROJECT_ID}-logs-data/build-logs
|
|
213
|
+
logsBucket: gs://${PROJECT_ID}-{{cookiecutter.project_name}}-logs-data/build-logs
|
|
213
214
|
options:
|
|
214
215
|
substitutionOption: ALLOW_LOOSE
|
|
215
216
|
defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET
|
|
@@ -46,6 +46,6 @@ steps:
|
|
|
46
46
|
env:
|
|
47
47
|
- 'PATH=/usr/local/bin:/usr/bin:~/.local/bin'
|
|
48
48
|
|
|
49
|
-
logsBucket: gs://${PROJECT_ID}-logs-data/build-logs
|
|
49
|
+
logsBucket: gs://${PROJECT_ID}-{{cookiecutter.project_name}}-logs-data/build-logs
|
|
50
50
|
options:
|
|
51
51
|
defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET
|
|
@@ -32,3 +32,9 @@ resource "google_project_service" "shared_services" {
|
|
|
32
32
|
disable_on_destroy = false
|
|
33
33
|
}
|
|
34
34
|
|
|
35
|
+
# Enable Cloud Resource Manager API for the CICD runner project
|
|
36
|
+
resource "google_project_service" "cicd_cloud_resource_manager_api" {
|
|
37
|
+
project = var.cicd_runner_project_id
|
|
38
|
+
service = "cloudresourcemanager.googleapis.com"
|
|
39
|
+
disable_on_destroy = false
|
|
40
|
+
}
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
# a. Create PR checks trigger
|
|
16
16
|
resource "google_cloudbuild_trigger" "pr_checks" {
|
|
17
|
-
name = "pr
|
|
17
|
+
name = "pr-${var.project_name}"
|
|
18
18
|
project = var.cicd_runner_project_id
|
|
19
19
|
location = var.region
|
|
20
20
|
description = "Trigger for PR checks"
|
|
@@ -34,16 +34,16 @@ resource "google_cloudbuild_trigger" "pr_checks" {
|
|
|
34
34
|
"tests/**",
|
|
35
35
|
"deployment/**",
|
|
36
36
|
"uv.lock",
|
|
37
|
-
{
|
|
37
|
+
{% if cookiecutter.data_ingestion %}
|
|
38
38
|
"data_ingestion/**",
|
|
39
|
-
{
|
|
39
|
+
{% endif %}
|
|
40
40
|
]
|
|
41
41
|
depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services]
|
|
42
42
|
}
|
|
43
43
|
|
|
44
44
|
# b. Create CD pipeline trigger
|
|
45
45
|
resource "google_cloudbuild_trigger" "cd_pipeline" {
|
|
46
|
-
name = "cd
|
|
46
|
+
name = "cd-${var.project_name}"
|
|
47
47
|
project = var.cicd_runner_project_id
|
|
48
48
|
location = var.region
|
|
49
49
|
service_account = resource.google_service_account.cicd_runner_sa.id
|
|
@@ -68,18 +68,25 @@ resource "google_cloudbuild_trigger" "cd_pipeline" {
|
|
|
68
68
|
_STAGING_PROJECT_ID = var.staging_project_id
|
|
69
69
|
_BUCKET_NAME_LOAD_TEST_RESULTS = resource.google_storage_bucket.bucket_load_test_results.name
|
|
70
70
|
_REGION = var.region
|
|
71
|
-
{
|
|
72
|
-
_CONTAINER_NAME = var.
|
|
73
|
-
_ARTIFACT_REGISTRY_REPO_NAME =
|
|
74
|
-
|
|
75
|
-
{
|
|
76
|
-
{
|
|
71
|
+
{% if cookiecutter.deployment_target == 'cloud_run' %}
|
|
72
|
+
_CONTAINER_NAME = var.project_name
|
|
73
|
+
_ARTIFACT_REGISTRY_REPO_NAME = resource.google_artifact_registry_repository.repo-artifacts-genai.repository_id
|
|
74
|
+
_CLOUD_RUN_APP_SA_EMAIL = resource.google_service_account.cloud_run_app_sa["staging"].email
|
|
75
|
+
{% endif %}
|
|
76
|
+
{% if cookiecutter.data_ingestion %}
|
|
77
77
|
_PIPELINE_GCS_ROOT = "gs://${resource.google_storage_bucket.data_ingestion_pipeline_gcs_root["staging"].name}"
|
|
78
|
-
_PIPELINE_SA_EMAIL =
|
|
78
|
+
_PIPELINE_SA_EMAIL = resource.google_service_account.vertexai_pipeline_app_sa["staging"].email
|
|
79
79
|
_PIPELINE_CRON_SCHEDULE = var.pipeline_cron_schedule
|
|
80
|
+
{% if cookiecutter.datastore_type == "vertex_ai_search" %}
|
|
80
81
|
_DATA_STORE_ID = resource.google_discovery_engine_data_store.data_store_staging.data_store_id
|
|
81
82
|
_DATA_STORE_REGION = var.data_store_region
|
|
82
|
-
{
|
|
83
|
+
{% elif cookiecutter.datastore_type == "vertex_ai_vector_search" %}
|
|
84
|
+
_VECTOR_SEARCH_INDEX = resource.google_vertex_ai_index.vector_search_index_staging.id
|
|
85
|
+
_VECTOR_SEARCH_INDEX_ENDPOINT = resource.google_vertex_ai_index_endpoint.vector_search_index_endpoint_staging.id
|
|
86
|
+
_VECTOR_SEARCH_BUCKET = "gs://${resource.google_storage_bucket.vector_search_data_bucket["staging"].name}"
|
|
87
|
+
|
|
88
|
+
{% endif %}
|
|
89
|
+
{% endif %}
|
|
83
90
|
# Your other CD Pipeline substitutions
|
|
84
91
|
}
|
|
85
92
|
depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services]
|
|
@@ -88,7 +95,7 @@ resource "google_cloudbuild_trigger" "cd_pipeline" {
|
|
|
88
95
|
|
|
89
96
|
# c. Create Deploy to production trigger
|
|
90
97
|
resource "google_cloudbuild_trigger" "deploy_to_prod_pipeline" {
|
|
91
|
-
name = "deploy
|
|
98
|
+
name = "deploy-${var.project_name}"
|
|
92
99
|
project = var.cicd_runner_project_id
|
|
93
100
|
location = var.region
|
|
94
101
|
description = "Trigger for deployment to production"
|
|
@@ -103,18 +110,24 @@ resource "google_cloudbuild_trigger" "deploy_to_prod_pipeline" {
|
|
|
103
110
|
substitutions = {
|
|
104
111
|
_PROD_PROJECT_ID = var.prod_project_id
|
|
105
112
|
_REGION = var.region
|
|
106
|
-
{
|
|
107
|
-
_CONTAINER_NAME = var.
|
|
108
|
-
_ARTIFACT_REGISTRY_REPO_NAME =
|
|
109
|
-
|
|
110
|
-
{
|
|
111
|
-
{
|
|
113
|
+
{% if cookiecutter.deployment_target == 'cloud_run' %}
|
|
114
|
+
_CONTAINER_NAME = var.project_name
|
|
115
|
+
_ARTIFACT_REGISTRY_REPO_NAME = resource.google_artifact_registry_repository.repo-artifacts-genai.repository_id
|
|
116
|
+
_CLOUD_RUN_APP_SA_EMAIL = resource.google_service_account.cloud_run_app_sa["prod"].email
|
|
117
|
+
{% endif %}
|
|
118
|
+
{% if cookiecutter.data_ingestion %}
|
|
112
119
|
_PIPELINE_GCS_ROOT = "gs://${resource.google_storage_bucket.data_ingestion_pipeline_gcs_root["prod"].name}"
|
|
113
|
-
_PIPELINE_SA_EMAIL =
|
|
120
|
+
_PIPELINE_SA_EMAIL = resource.google_service_account.vertexai_pipeline_app_sa["prod"].email
|
|
114
121
|
_PIPELINE_CRON_SCHEDULE = var.pipeline_cron_schedule
|
|
122
|
+
{% if cookiecutter.datastore_type == "vertex_ai_search" %}
|
|
115
123
|
_DATA_STORE_ID = resource.google_discovery_engine_data_store.data_store_prod.data_store_id
|
|
116
124
|
_DATA_STORE_REGION = var.data_store_region
|
|
117
|
-
{
|
|
125
|
+
{% elif cookiecutter.datastore_type == "vertex_ai_vector_search" %}
|
|
126
|
+
_VECTOR_SEARCH_INDEX = resource.google_vertex_ai_index.vector_search_index_prod.id
|
|
127
|
+
_VECTOR_SEARCH_INDEX_ENDPOINT = resource.google_vertex_ai_index_endpoint.vector_search_index_endpoint_prod.id
|
|
128
|
+
_VECTOR_SEARCH_BUCKET = "gs://${resource.google_storage_bucket.vector_search_data_bucket["prod"].name}"
|
|
129
|
+
{% endif %}
|
|
130
|
+
{% endif %}
|
|
118
131
|
# Your other Deploy to Prod Pipeline substitutions
|
|
119
132
|
}
|
|
120
133
|
depends_on = [resource.google_project_service.cicd_services, resource.google_project_service.shared_services]
|