agent-starter-pack 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-starter-pack might be problematic. Click here for more details.

Files changed (25) hide show
  1. {agent_starter_pack-0.3.2.dist-info → agent_starter_pack-0.3.4.dist-info}/METADATA +16 -4
  2. {agent_starter_pack-0.3.2.dist-info → agent_starter_pack-0.3.4.dist-info}/RECORD +25 -20
  3. agents/adk_base/notebooks/evaluating_adk_agent.ipynb +1528 -0
  4. agents/agentic_rag/notebooks/evaluating_adk_agent.ipynb +1528 -0
  5. agents/crewai_coding_crew/notebooks/evaluating_langgraph_agent.ipynb +30 -17
  6. agents/langgraph_base_react/notebooks/evaluating_langgraph_agent.ipynb +30 -17
  7. src/base_template/Makefile +1 -1
  8. src/base_template/deployment/cd/deploy-to-prod.yaml +1 -0
  9. src/base_template/deployment/cd/staging.yaml +1 -0
  10. src/base_template/pyproject.toml +2 -0
  11. src/resources/idx/.idx/dev.nix +57 -0
  12. src/resources/idx/idx-template.json +21 -0
  13. src/resources/idx/idx-template.nix +26 -0
  14. src/resources/locks/uv-adk_base-agent_engine.lock +145 -142
  15. src/resources/locks/uv-adk_base-cloud_run.lock +181 -178
  16. src/resources/locks/uv-agentic_rag-agent_engine.lock +151 -148
  17. src/resources/locks/uv-agentic_rag-cloud_run.lock +187 -184
  18. src/resources/locks/uv-crewai_coding_crew-agent_engine.lock +151 -148
  19. src/resources/locks/uv-crewai_coding_crew-cloud_run.lock +187 -184
  20. src/resources/locks/uv-langgraph_base_react-agent_engine.lock +145 -142
  21. src/resources/locks/uv-langgraph_base_react-cloud_run.lock +181 -178
  22. src/resources/locks/uv-live_api-cloud_run.lock +172 -169
  23. {agent_starter_pack-0.3.2.dist-info → agent_starter_pack-0.3.4.dist-info}/WHEEL +0 -0
  24. {agent_starter_pack-0.3.2.dist-info → agent_starter_pack-0.3.4.dist-info}/entry_points.txt +0 -0
  25. {agent_starter_pack-0.3.2.dist-info → agent_starter_pack-0.3.4.dist-info}/licenses/LICENSE +0 -0
@@ -143,12 +143,8 @@
143
143
  },
144
144
  "outputs": [],
145
145
  "source": [
146
- "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[evaluation]\" \\\n",
147
- " \"langchain_google_vertexai\" \\\n",
148
- " \"langgraph\" \\\n",
149
- " \"cloudpickle==3.0.0\" \\\n",
150
- " \"pydantic==2.7.4\" \\\n",
151
- " \"requests\""
146
+ "%pip install \"langchain_google_vertexai\" \"langgraph\"\n",
147
+ "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[evaluation]\""
152
148
  ]
153
149
  },
154
150
  {
@@ -273,6 +269,8 @@
273
269
  },
274
270
  "outputs": [],
275
271
  "source": [
272
+ "import json\n",
273
+ "\n",
276
274
  "# General\n",
277
275
  "import random\n",
278
276
  "import string\n",
@@ -355,7 +353,7 @@
355
353
  " }\n",
356
354
  " )\n",
357
355
  "\n",
358
- " final_output[\"predicted_trajectory\"] = function_calls\n",
356
+ " final_output[\"predicted_trajectory\"] = json.dumps(function_calls)\n",
359
357
  " return final_output\n",
360
358
  "\n",
361
359
  "\n",
@@ -365,6 +363,7 @@
365
363
  " markdown += f\"{output['response']}\\n\\n\"\n",
366
364
  "\n",
367
365
  " if output[\"predicted_trajectory\"]:\n",
366
+ " output[\"predicted_trajectory\"] = json.loads(output[\"predicted_trajectory\"])\n",
368
367
  " markdown += \"### Function Calls\\n\"\n",
369
368
  " for call in output[\"predicted_trajectory\"]:\n",
370
369
  " markdown += f\"- **Function**: `{call['tool_name']}`\\n\"\n",
@@ -381,7 +380,7 @@
381
380
  " display(Markdown(\"### Summary Metrics\"))\n",
382
381
  " display(metrics_df)\n",
383
382
  "\n",
384
- " display(Markdown(f\"### Row-wise Metrics\"))\n",
383
+ " display(Markdown(\"### Row-wise Metrics\"))\n",
385
384
  " display(eval_result.metrics_table)\n",
386
385
  "\n",
387
386
  "\n",
@@ -599,7 +598,7 @@
599
598
  "source": [
600
599
  "def router(\n",
601
600
  " state: list[BaseMessage],\n",
602
- ") -> Literal[\"get_product_details\", \"get_product_price\", \"__end__\"]:\n",
601
+ ") -> Literal[\"get_product_details\", \"get_product_price\", END]:\n",
603
602
  " \"\"\"Initiates product details or price retrieval if the user asks for a product.\"\"\"\n",
604
603
  " # Get the tool_calls from the last message in the conversation history.\n",
605
604
  " tool_calls = state[-1].tool_calls\n",
@@ -614,7 +613,7 @@
614
613
  " return \"get_product_details\"\n",
615
614
  " else:\n",
616
615
  " # End the conversation flow.\n",
617
- " return \"__end__\""
616
+ " return END"
618
617
  ]
619
618
  },
620
619
  {
@@ -636,7 +635,7 @@
636
635
  },
637
636
  "outputs": [],
638
637
  "source": [
639
- "llm = \"gemini-1.5-pro\""
638
+ "llm = \"gemini-2.0-flash\""
640
639
  ]
641
640
  },
642
641
  {
@@ -899,6 +898,7 @@
899
898
  " dataset=eval_sample_dataset,\n",
900
899
  " metrics=single_tool_usage_metrics,\n",
901
900
  " experiment=EXPERIMENT_NAME,\n",
901
+ " output_uri_prefix=BUCKET_URI + \"/single-metric-eval\",\n",
902
902
  ")\n",
903
903
  "\n",
904
904
  "single_tool_call_eval_result = single_tool_call_eval_task.evaluate(\n",
@@ -1003,7 +1003,10 @@
1003
1003
  "EXPERIMENT_RUN = f\"trajectory-{get_id()}\"\n",
1004
1004
  "\n",
1005
1005
  "trajectory_eval_task = EvalTask(\n",
1006
- " dataset=eval_sample_dataset, metrics=trajectory_metrics, experiment=EXPERIMENT_NAME\n",
1006
+ " dataset=eval_sample_dataset,\n",
1007
+ " metrics=trajectory_metrics,\n",
1008
+ " experiment=EXPERIMENT_NAME,\n",
1009
+ " output_uri_prefix=BUCKET_URI + \"/multiple-metric-eval\",\n",
1007
1010
  ")\n",
1008
1011
  "\n",
1009
1012
  "trajectory_eval_result = trajectory_eval_task.evaluate(\n",
@@ -1107,7 +1110,10 @@
1107
1110
  "EXPERIMENT_RUN = f\"response-{get_id()}\"\n",
1108
1111
  "\n",
1109
1112
  "response_eval_task = EvalTask(\n",
1110
- " dataset=eval_sample_dataset, metrics=response_metrics, experiment=EXPERIMENT_NAME\n",
1113
+ " dataset=eval_sample_dataset,\n",
1114
+ " metrics=response_metrics,\n",
1115
+ " experiment=EXPERIMENT_NAME,\n",
1116
+ " output_uri_prefix=BUCKET_URI + \"/response-metric-eval\",\n",
1111
1117
  ")\n",
1112
1118
  "\n",
1113
1119
  "response_eval_result = response_eval_task.evaluate(\n",
@@ -1292,6 +1298,7 @@
1292
1298
  " dataset=eval_sample_dataset,\n",
1293
1299
  " metrics=response_tool_metrics,\n",
1294
1300
  " experiment=EXPERIMENT_NAME,\n",
1301
+ " output_uri_prefix=BUCKET_URI + \"/reasoning-metric-eval\",\n",
1295
1302
  ")\n",
1296
1303
  "\n",
1297
1304
  "response_eval_tool_result = response_eval_tool_task.evaluate(\n",
@@ -1436,15 +1443,21 @@
1436
1443
  " ],\n",
1437
1444
  " ],\n",
1438
1445
  " \"response\": [\n",
1439
- " 500,\n",
1440
- " 50,\n",
1446
+ " \"500\",\n",
1447
+ " \"50\",\n",
1441
1448
  " \"A super fast and light usb charger\",\n",
1442
- " 100,\n",
1449
+ " \"100\",\n",
1443
1450
  " \"A voice-controlled smart speaker that plays music, sets alarms, and controls smart home devices.\",\n",
1444
1451
  " ],\n",
1445
1452
  "}\n",
1446
1453
  "\n",
1447
- "byod_eval_sample_dataset = pd.DataFrame(byod_eval_data)"
1454
+ "byod_eval_sample_dataset = pd.DataFrame(byod_eval_data)\n",
1455
+ "byod_eval_sample_dataset[\"predicted_trajectory\"] = byod_eval_sample_dataset[\n",
1456
+ " \"predicted_trajectory\"\n",
1457
+ "].apply(json.dumps)\n",
1458
+ "byod_eval_sample_dataset[\"reference_trajectory\"] = byod_eval_sample_dataset[\n",
1459
+ " \"reference_trajectory\"\n",
1460
+ "].apply(json.dumps)"
1448
1461
  ]
1449
1462
  },
1450
1463
  {
@@ -143,12 +143,8 @@
143
143
  },
144
144
  "outputs": [],
145
145
  "source": [
146
- "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[evaluation]\" \\\n",
147
- " \"langchain_google_vertexai\" \\\n",
148
- " \"langgraph\" \\\n",
149
- " \"cloudpickle==3.0.0\" \\\n",
150
- " \"pydantic==2.7.4\" \\\n",
151
- " \"requests\""
146
+ "%pip install \"langchain_google_vertexai\" \"langgraph\"\n",
147
+ "%pip install --upgrade --user --quiet \"google-cloud-aiplatform[evaluation]\""
152
148
  ]
153
149
  },
154
150
  {
@@ -273,6 +269,8 @@
273
269
  },
274
270
  "outputs": [],
275
271
  "source": [
272
+ "import json\n",
273
+ "\n",
276
274
  "# General\n",
277
275
  "import random\n",
278
276
  "import string\n",
@@ -355,7 +353,7 @@
355
353
  " }\n",
356
354
  " )\n",
357
355
  "\n",
358
- " final_output[\"predicted_trajectory\"] = function_calls\n",
356
+ " final_output[\"predicted_trajectory\"] = json.dumps(function_calls)\n",
359
357
  " return final_output\n",
360
358
  "\n",
361
359
  "\n",
@@ -365,6 +363,7 @@
365
363
  " markdown += f\"{output['response']}\\n\\n\"\n",
366
364
  "\n",
367
365
  " if output[\"predicted_trajectory\"]:\n",
366
+ " output[\"predicted_trajectory\"] = json.loads(output[\"predicted_trajectory\"])\n",
368
367
  " markdown += \"### Function Calls\\n\"\n",
369
368
  " for call in output[\"predicted_trajectory\"]:\n",
370
369
  " markdown += f\"- **Function**: `{call['tool_name']}`\\n\"\n",
@@ -381,7 +380,7 @@
381
380
  " display(Markdown(\"### Summary Metrics\"))\n",
382
381
  " display(metrics_df)\n",
383
382
  "\n",
384
- " display(Markdown(f\"### Row-wise Metrics\"))\n",
383
+ " display(Markdown(\"### Row-wise Metrics\"))\n",
385
384
  " display(eval_result.metrics_table)\n",
386
385
  "\n",
387
386
  "\n",
@@ -599,7 +598,7 @@
599
598
  "source": [
600
599
  "def router(\n",
601
600
  " state: list[BaseMessage],\n",
602
- ") -> Literal[\"get_product_details\", \"get_product_price\", \"__end__\"]:\n",
601
+ ") -> Literal[\"get_product_details\", \"get_product_price\", END]:\n",
603
602
  " \"\"\"Initiates product details or price retrieval if the user asks for a product.\"\"\"\n",
604
603
  " # Get the tool_calls from the last message in the conversation history.\n",
605
604
  " tool_calls = state[-1].tool_calls\n",
@@ -614,7 +613,7 @@
614
613
  " return \"get_product_details\"\n",
615
614
  " else:\n",
616
615
  " # End the conversation flow.\n",
617
- " return \"__end__\""
616
+ " return END"
618
617
  ]
619
618
  },
620
619
  {
@@ -636,7 +635,7 @@
636
635
  },
637
636
  "outputs": [],
638
637
  "source": [
639
- "llm = \"gemini-1.5-pro\""
638
+ "llm = \"gemini-2.0-flash\""
640
639
  ]
641
640
  },
642
641
  {
@@ -899,6 +898,7 @@
899
898
  " dataset=eval_sample_dataset,\n",
900
899
  " metrics=single_tool_usage_metrics,\n",
901
900
  " experiment=EXPERIMENT_NAME,\n",
901
+ " output_uri_prefix=BUCKET_URI + \"/single-metric-eval\",\n",
902
902
  ")\n",
903
903
  "\n",
904
904
  "single_tool_call_eval_result = single_tool_call_eval_task.evaluate(\n",
@@ -1003,7 +1003,10 @@
1003
1003
  "EXPERIMENT_RUN = f\"trajectory-{get_id()}\"\n",
1004
1004
  "\n",
1005
1005
  "trajectory_eval_task = EvalTask(\n",
1006
- " dataset=eval_sample_dataset, metrics=trajectory_metrics, experiment=EXPERIMENT_NAME\n",
1006
+ " dataset=eval_sample_dataset,\n",
1007
+ " metrics=trajectory_metrics,\n",
1008
+ " experiment=EXPERIMENT_NAME,\n",
1009
+ " output_uri_prefix=BUCKET_URI + \"/multiple-metric-eval\",\n",
1007
1010
  ")\n",
1008
1011
  "\n",
1009
1012
  "trajectory_eval_result = trajectory_eval_task.evaluate(\n",
@@ -1107,7 +1110,10 @@
1107
1110
  "EXPERIMENT_RUN = f\"response-{get_id()}\"\n",
1108
1111
  "\n",
1109
1112
  "response_eval_task = EvalTask(\n",
1110
- " dataset=eval_sample_dataset, metrics=response_metrics, experiment=EXPERIMENT_NAME\n",
1113
+ " dataset=eval_sample_dataset,\n",
1114
+ " metrics=response_metrics,\n",
1115
+ " experiment=EXPERIMENT_NAME,\n",
1116
+ " output_uri_prefix=BUCKET_URI + \"/response-metric-eval\",\n",
1111
1117
  ")\n",
1112
1118
  "\n",
1113
1119
  "response_eval_result = response_eval_task.evaluate(\n",
@@ -1292,6 +1298,7 @@
1292
1298
  " dataset=eval_sample_dataset,\n",
1293
1299
  " metrics=response_tool_metrics,\n",
1294
1300
  " experiment=EXPERIMENT_NAME,\n",
1301
+ " output_uri_prefix=BUCKET_URI + \"/reasoning-metric-eval\",\n",
1295
1302
  ")\n",
1296
1303
  "\n",
1297
1304
  "response_eval_tool_result = response_eval_tool_task.evaluate(\n",
@@ -1436,15 +1443,21 @@
1436
1443
  " ],\n",
1437
1444
  " ],\n",
1438
1445
  " \"response\": [\n",
1439
- " 500,\n",
1440
- " 50,\n",
1446
+ " \"500\",\n",
1447
+ " \"50\",\n",
1441
1448
  " \"A super fast and light usb charger\",\n",
1442
- " 100,\n",
1449
+ " \"100\",\n",
1443
1450
  " \"A voice-controlled smart speaker that plays music, sets alarms, and controls smart home devices.\",\n",
1444
1451
  " ],\n",
1445
1452
  "}\n",
1446
1453
  "\n",
1447
- "byod_eval_sample_dataset = pd.DataFrame(byod_eval_data)"
1454
+ "byod_eval_sample_dataset = pd.DataFrame(byod_eval_data)\n",
1455
+ "byod_eval_sample_dataset[\"predicted_trajectory\"] = byod_eval_sample_dataset[\n",
1456
+ " \"predicted_trajectory\"\n",
1457
+ "].apply(json.dumps)\n",
1458
+ "byod_eval_sample_dataset[\"reference_trajectory\"] = byod_eval_sample_dataset[\n",
1459
+ " \"reference_trajectory\"\n",
1460
+ "].apply(json.dumps)"
1448
1461
  ]
1449
1462
  },
1450
1463
  {
@@ -1,6 +1,6 @@
1
1
  install:
2
2
  @command -v uv >/dev/null 2>&1 || { echo "uv is not installed. Installing uv..."; curl -LsSf https://astral.sh/uv/0.6.12/install.sh | sh; source ~/.bashrc; }
3
- uv sync --dev {% if cookiecutter.agent_name != 'live_api' %}--extra streamlit{%- endif %} --extra jupyter --frozen{% if cookiecutter.agent_name == 'live_api' %} && npm --prefix frontend install{%- endif %}
3
+ uv sync --dev{% if cookiecutter.agent_name != 'live_api' and "adk" not in cookiecutter.tags %} --extra streamlit{%- endif %} --extra jupyter --frozen{% if cookiecutter.agent_name == 'live_api' %} && npm --prefix frontend install{%- endif %}
4
4
 
5
5
  test:
6
6
  uv run pytest tests/unit && uv run pytest tests/integration
@@ -66,6 +66,7 @@ steps:
66
66
  - "40"
67
67
  - "--service-account"
68
68
  - "${_CLOUD_RUN_APP_SA_EMAIL}"
69
+ - "--session-affinity"
69
70
  - "--set-env-vars"
70
71
  - "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
71
72
  {%- elif cookiecutter.deployment_target == 'agent_engine' %}
@@ -80,6 +80,7 @@ steps:
80
80
  - "40"
81
81
  - "--service-account"
82
82
  - "${_CLOUD_RUN_APP_SA_EMAIL}"
83
+ - "--session-affinity"
83
84
  - "--set-env-vars"
84
85
  - "COMMIT_SHA=${COMMIT_SHA}{%- if cookiecutter.data_ingestion %}{%- if cookiecutter.datastore_type == "vertex_ai_search" %},DATA_STORE_ID=${_DATA_STORE_ID},DATA_STORE_REGION=${_DATA_STORE_REGION}{%- elif cookiecutter.datastore_type == "vertex_ai_vector_search" %},VECTOR_SEARCH_INDEX=${_VECTOR_SEARCH_INDEX},VECTOR_SEARCH_INDEX_ENDPOINT=${_VECTOR_SEARCH_INDEX_ENDPOINT},VECTOR_SEARCH_BUCKET=${_VECTOR_SEARCH_BUCKET}{%- endif %}{%- endif %}"
85
86
 
@@ -37,12 +37,14 @@ dev = [
37
37
  ]
38
38
 
39
39
  [project.optional-dependencies]
40
+ {% if cookiecutter.agent_name != 'live_api' and "adk" not in cookiecutter.tags %}
40
41
  streamlit = [
41
42
  "streamlit~=1.42.0",
42
43
  "streamlit-extras~=0.4.3",
43
44
  "extra-streamlit-components~=0.1.71",
44
45
  "streamlit-feedback~=0.1.3",
45
46
  ]
47
+ {% endif %}
46
48
  jupyter = [
47
49
  "jupyter~=1.0.0",
48
50
  ]
@@ -0,0 +1,57 @@
1
+
2
+ # To learn more about how to use Nix to configure your environment
3
+ # see: https://firebase.google.com/docs/studio/customize-workspace
4
+ { pkgs, ... }: {
5
+ # Which nixpkgs channel to use.
6
+ channel = "stable-24.11"; # or "unstable"
7
+
8
+ # Use https://search.nixos.org/packages to find packages
9
+ packages = [
10
+ pkgs.python311
11
+ pkgs.python311Packages.pip
12
+ pkgs.python311Packages.uv
13
+ pkgs.gnumake
14
+ pkgs.terraform
15
+ pkgs.gh
16
+ ];
17
+ # Sets environment variables in the workspace
18
+ env = {};
19
+ idx = {
20
+ # Search for the extensions you want on https://open-vsx.org/ and use "publisher.id"
21
+ extensions = [
22
+ "ms-toolsai.jupyter"
23
+ "ms-python.python"
24
+ "krish-r.vscode-toggle-terminal"
25
+ ];
26
+ workspace = {
27
+ # Runs when a workspace is first created with this `dev.nix` file
28
+ onCreate = {
29
+ create-venv = ''
30
+ # Load environment variables from .env file if it exists
31
+ source .env
32
+ echo "Logging into gcloud..."
33
+ echo "Please authenticate with Google Cloud by following the prompts."
34
+ gcloud auth login --update-adc --brief --quiet
35
+
36
+ echo "Setting gcloud project..."
37
+ gcloud config set project $GOOGLE_CLOUD_PROJECT
38
+
39
+ echo "Creating Python virtual environment and installing packages..."
40
+ uv venv && uv pip install agent-starter-pack
41
+ echo "alias agent-starter-pack=\"~/$WS_NAME/.venv/bin/agent-starter-pack\"" >> ~/.bashrc
42
+ source ~/.bashrc
43
+
44
+ echo "Running agent starter pack creation..."
45
+ uv run agent-starter-pack create $AGENT_NAME
46
+ code ~/$WS_NAME/$AGENT_NAME/README.md
47
+ exec bash
48
+ '';
49
+ # Open editors for the following files by default, if they exist:
50
+ default.openFiles = [];
51
+ };
52
+ # To run something each time the workspace is (re)started, use the `onStart` hook
53
+ };
54
+ # Enable previews and customize configuration
55
+ previews = {};
56
+ };
57
+ }
@@ -0,0 +1,21 @@
1
+ {
2
+ "name": "Agent Starter Pack",
3
+ "description": "Production-ready Gen AI Agent templates for Google Cloud. Addressing common challenges (Deployment & Operations, Evaluation, Customization, Observability) in building and deploying GenAI agents.",
4
+ "icon": "https://github.com/GoogleCloudPlatform/agent-starter-pack/blob/main/docs/images/icon.png?raw=true",
5
+ "params": [
6
+ {
7
+ "id": "agent_name",
8
+ "name": "Agent Name",
9
+ "description": "The name of the agent being created.",
10
+ "type": "string",
11
+ "required": true
12
+ },
13
+ {
14
+ "id": "google_cloud_project_id",
15
+ "name": "Your Google Cloud Project ID",
16
+ "description": "The Google Cloud Platform Project ID where resources will be managed.",
17
+ "type": "string",
18
+ "required": true
19
+ }
20
+ ]
21
+ }
@@ -0,0 +1,26 @@
1
+ # No user-configurable parameters
2
+ # Accept additional arguments to this template corresponding to template
3
+ # parameter IDs
4
+ { pkgs, agent_name ? "", google_cloud_project_id ? "", ... }: {
5
+ # Shell script that produces the final environment
6
+ bootstrap = ''
7
+ # Copy the folder containing the `idx-template` files to the final
8
+ # project folder for the new workspace. ${./.} inserts the directory
9
+ # of the checked-out Git folder containing this template.
10
+ cp -rf ${./.} "$out"
11
+
12
+ # Set some permissions
13
+ chmod -R +w "$out"
14
+
15
+ # Create .env file with the parameter values
16
+ cat > "$out/.env" << EOF
17
+ AGENT_NAME=${agent_name}
18
+ GOOGLE_CLOUD_PROJECT=${google_cloud_project_id}
19
+ WS_NAME=$WS_NAME
20
+ EOF
21
+
22
+ # Remove the template files themselves and any connection to the template's
23
+ # Git repository
24
+ rm -rf "$out/.git" "$out/idx-template".{nix,json}
25
+ '';
26
+ }