@aws/agentcore 0.3.0-preview.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +175 -0
- package/README.md +147 -0
- package/dist/assets/README.md +79 -0
- package/dist/assets/__tests__/__snapshots__/assets.snapshot.test.ts.snap +2862 -0
- package/dist/assets/__tests__/assets.snapshot.test.ts +139 -0
- package/dist/assets/agents/AGENTS.md +102 -0
- package/dist/assets/cdk/.prettierrc +8 -0
- package/dist/assets/cdk/README.md +14 -0
- package/dist/assets/cdk/bin/cdk.ts +50 -0
- package/dist/assets/cdk/cdk.json +88 -0
- package/dist/assets/cdk/gitignore.template +9 -0
- package/dist/assets/cdk/jest.config.js +9 -0
- package/dist/assets/cdk/lib/cdk-stack.ts +38 -0
- package/dist/assets/cdk/npmignore.template +6 -0
- package/dist/assets/cdk/package.json +30 -0
- package/dist/assets/cdk/test/cdk.test.ts +16 -0
- package/dist/assets/cdk/tsconfig.json +28 -0
- package/dist/assets/mcp/python/README.md +27 -0
- package/dist/assets/mcp/python/pyproject.toml +22 -0
- package/dist/assets/mcp/python/server.py +117 -0
- package/dist/assets/mcp/python-lambda/README.md +22 -0
- package/dist/assets/mcp/python-lambda/handler.py +144 -0
- package/dist/assets/mcp/python-lambda/pyproject.toml +15 -0
- package/dist/assets/python/autogen/base/README.md +41 -0
- package/dist/assets/python/autogen/base/gitignore.template +40 -0
- package/dist/assets/python/autogen/base/main.py +52 -0
- package/dist/assets/python/autogen/base/mcp_client/client.py +18 -0
- package/dist/assets/python/autogen/base/model/load.py +136 -0
- package/dist/assets/python/autogen/base/pyproject.toml +35 -0
- package/dist/assets/python/crewai/base/README.md +41 -0
- package/dist/assets/python/crewai/base/gitignore.template +40 -0
- package/dist/assets/python/crewai/base/main.py +55 -0
- package/dist/assets/python/crewai/base/model/load.py +133 -0
- package/dist/assets/python/crewai/base/pyproject.toml +32 -0
- package/dist/assets/python/googleadk/base/README.md +39 -0
- package/dist/assets/python/googleadk/base/gitignore.template +40 -0
- package/dist/assets/python/googleadk/base/main.py +84 -0
- package/dist/assets/python/googleadk/base/mcp_client/client.py +15 -0
- package/dist/assets/python/googleadk/base/model/load.py +41 -0
- package/dist/assets/python/googleadk/base/pyproject.toml +21 -0
- package/dist/assets/python/langchain_langgraph/base/README.md +41 -0
- package/dist/assets/python/langchain_langgraph/base/gitignore.template +40 -0
- package/dist/assets/python/langchain_langgraph/base/main.py +51 -0
- package/dist/assets/python/langchain_langgraph/base/mcp_client/client.py +19 -0
- package/dist/assets/python/langchain_langgraph/base/model/load.py +123 -0
- package/dist/assets/python/langchain_langgraph/base/pyproject.toml +37 -0
- package/dist/assets/python/openaiagents/base/README.md +39 -0
- package/dist/assets/python/openaiagents/base/gitignore.template +40 -0
- package/dist/assets/python/openaiagents/base/main.py +56 -0
- package/dist/assets/python/openaiagents/base/mcp_client/client.py +14 -0
- package/dist/assets/python/openaiagents/base/model/load.py +37 -0
- package/dist/assets/python/openaiagents/base/pyproject.toml +20 -0
- package/dist/assets/python/strands/base/README.md +41 -0
- package/dist/assets/python/strands/base/gitignore.template +41 -0
- package/dist/assets/python/strands/base/main.py +76 -0
- package/dist/assets/python/strands/base/mcp_client/client.py +12 -0
- package/dist/assets/python/strands/base/model/load.py +123 -0
- package/dist/assets/python/strands/base/pyproject.toml +23 -0
- package/dist/assets/python/strands/capabilities/memory/session.py +39 -0
- package/dist/assets/typescript/.gitkeep +0 -0
- package/dist/cli/index.mjs +985 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +27 -0
- package/dist/index.js.map +1 -0
- package/dist/lib/constants.d.ts +25 -0
- package/dist/lib/constants.d.ts.map +1 -0
- package/dist/lib/constants.js +49 -0
- package/dist/lib/constants.js.map +1 -0
- package/dist/lib/errors/config.d.ts +49 -0
- package/dist/lib/errors/config.d.ts.map +1 -0
- package/dist/lib/errors/config.js +167 -0
- package/dist/lib/errors/config.js.map +1 -0
- package/dist/lib/errors/index.d.ts +2 -0
- package/dist/lib/errors/index.d.ts.map +1 -0
- package/dist/lib/errors/index.js +18 -0
- package/dist/lib/errors/index.js.map +1 -0
- package/dist/lib/index.d.ts +7 -0
- package/dist/lib/index.d.ts.map +1 -0
- package/dist/lib/index.js +39 -0
- package/dist/lib/index.js.map +1 -0
- package/dist/lib/packaging/errors.d.ts +16 -0
- package/dist/lib/packaging/errors.d.ts.map +1 -0
- package/dist/lib/packaging/errors.js +36 -0
- package/dist/lib/packaging/errors.js.map +1 -0
- package/dist/lib/packaging/helpers.d.ts +54 -0
- package/dist/lib/packaging/helpers.d.ts.map +1 -0
- package/dist/lib/packaging/helpers.js +461 -0
- package/dist/lib/packaging/helpers.js.map +1 -0
- package/dist/lib/packaging/index.d.ts +36 -0
- package/dist/lib/packaging/index.d.ts.map +1 -0
- package/dist/lib/packaging/index.js +89 -0
- package/dist/lib/packaging/index.js.map +1 -0
- package/dist/lib/packaging/node.d.ts +17 -0
- package/dist/lib/packaging/node.d.ts.map +1 -0
- package/dist/lib/packaging/node.js +108 -0
- package/dist/lib/packaging/node.js.map +1 -0
- package/dist/lib/packaging/python.d.ts +17 -0
- package/dist/lib/packaging/python.d.ts.map +1 -0
- package/dist/lib/packaging/python.js +162 -0
- package/dist/lib/packaging/python.js.map +1 -0
- package/dist/lib/packaging/types/index.d.ts +2 -0
- package/dist/lib/packaging/types/index.d.ts.map +1 -0
- package/dist/lib/packaging/types/index.js +3 -0
- package/dist/lib/packaging/types/index.js.map +1 -0
- package/dist/lib/packaging/types/packaging.d.ts +57 -0
- package/dist/lib/packaging/types/packaging.d.ts.map +1 -0
- package/dist/lib/packaging/types/packaging.js +3 -0
- package/dist/lib/packaging/types/packaging.js.map +1 -0
- package/dist/lib/packaging/uv.d.ts +7 -0
- package/dist/lib/packaging/uv.d.ts.map +1 -0
- package/dist/lib/packaging/uv.js +40 -0
- package/dist/lib/packaging/uv.js.map +1 -0
- package/dist/lib/schemas/io/config-io.d.ts +106 -0
- package/dist/lib/schemas/io/config-io.d.ts.map +1 -0
- package/dist/lib/schemas/io/config-io.js +293 -0
- package/dist/lib/schemas/io/config-io.js.map +1 -0
- package/dist/lib/schemas/io/index.d.ts +3 -0
- package/dist/lib/schemas/io/index.d.ts.map +1 -0
- package/dist/lib/schemas/io/index.js +17 -0
- package/dist/lib/schemas/io/index.js.map +1 -0
- package/dist/lib/schemas/io/path-resolver.d.ts +112 -0
- package/dist/lib/schemas/io/path-resolver.d.ts.map +1 -0
- package/dist/lib/schemas/io/path-resolver.js +195 -0
- package/dist/lib/schemas/io/path-resolver.js.map +1 -0
- package/dist/lib/utils/aws-account.d.ts +7 -0
- package/dist/lib/utils/aws-account.d.ts.map +1 -0
- package/dist/lib/utils/aws-account.js +24 -0
- package/dist/lib/utils/aws-account.js.map +1 -0
- package/dist/lib/utils/credentials.d.ts +86 -0
- package/dist/lib/utils/credentials.d.ts.map +1 -0
- package/dist/lib/utils/credentials.js +153 -0
- package/dist/lib/utils/credentials.js.map +1 -0
- package/dist/lib/utils/env.d.ts +22 -0
- package/dist/lib/utils/env.d.ts.map +1 -0
- package/dist/lib/utils/env.js +65 -0
- package/dist/lib/utils/env.js.map +1 -0
- package/dist/lib/utils/index.d.ts +7 -0
- package/dist/lib/utils/index.d.ts.map +1 -0
- package/dist/lib/utils/index.js +23 -0
- package/dist/lib/utils/index.js.map +1 -0
- package/dist/lib/utils/platform.d.ts +63 -0
- package/dist/lib/utils/platform.d.ts.map +1 -0
- package/dist/lib/utils/platform.js +88 -0
- package/dist/lib/utils/platform.js.map +1 -0
- package/dist/lib/utils/subprocess.d.ts +29 -0
- package/dist/lib/utils/subprocess.d.ts.map +1 -0
- package/dist/lib/utils/subprocess.js +94 -0
- package/dist/lib/utils/subprocess.js.map +1 -0
- package/dist/lib/utils/zod.d.ts +14 -0
- package/dist/lib/utils/zod.d.ts.map +1 -0
- package/dist/lib/utils/zod.js +32 -0
- package/dist/lib/utils/zod.js.map +1 -0
- package/dist/schema/constants.d.ts +82 -0
- package/dist/schema/constants.d.ts.map +1 -0
- package/dist/schema/constants.js +117 -0
- package/dist/schema/constants.js.map +1 -0
- package/dist/schema/index.d.ts +4 -0
- package/dist/schema/index.d.ts.map +1 -0
- package/dist/schema/index.js +21 -0
- package/dist/schema/index.js.map +1 -0
- package/dist/schema/schemas/agent-env.d.ts +75 -0
- package/dist/schema/schemas/agent-env.d.ts.map +1 -0
- package/dist/schema/schemas/agent-env.js +84 -0
- package/dist/schema/schemas/agent-env.js.map +1 -0
- package/dist/schema/schemas/agentcore-project.d.ts +88 -0
- package/dist/schema/schemas/agentcore-project.d.ts.map +1 -0
- package/dist/schema/schemas/agentcore-project.js +83 -0
- package/dist/schema/schemas/agentcore-project.js.map +1 -0
- package/dist/schema/schemas/aws-targets.d.ts +50 -0
- package/dist/schema/schemas/aws-targets.d.ts.map +1 -0
- package/dist/schema/schemas/aws-targets.js +49 -0
- package/dist/schema/schemas/aws-targets.js.map +1 -0
- package/dist/schema/schemas/deployed-state.d.ts +260 -0
- package/dist/schema/schemas/deployed-state.d.ts.map +1 -0
- package/dist/schema/schemas/deployed-state.js +100 -0
- package/dist/schema/schemas/deployed-state.js.map +1 -0
- package/dist/schema/schemas/index.d.ts +8 -0
- package/dist/schema/schemas/index.d.ts.map +1 -0
- package/dist/schema/schemas/index.js +25 -0
- package/dist/schema/schemas/index.js.map +1 -0
- package/dist/schema/schemas/mcp-defs.d.ts +52 -0
- package/dist/schema/schemas/mcp-defs.d.ts.map +1 -0
- package/dist/schema/schemas/mcp-defs.js +50 -0
- package/dist/schema/schemas/mcp-defs.js.map +1 -0
- package/dist/schema/schemas/mcp.d.ts +659 -0
- package/dist/schema/schemas/mcp.d.ts.map +1 -0
- package/dist/schema/schemas/mcp.js +283 -0
- package/dist/schema/schemas/mcp.js.map +1 -0
- package/dist/schema/schemas/primitives/index.d.ts +3 -0
- package/dist/schema/schemas/primitives/index.d.ts.map +1 -0
- package/dist/schema/schemas/primitives/index.js +9 -0
- package/dist/schema/schemas/primitives/index.js.map +1 -0
- package/dist/schema/schemas/primitives/memory.d.ts +42 -0
- package/dist/schema/schemas/primitives/memory.d.ts.map +1 -0
- package/dist/schema/schemas/primitives/memory.js +50 -0
- package/dist/schema/schemas/primitives/memory.js.map +1 -0
- package/dist/schema/schemas/zod-util.d.ts +10 -0
- package/dist/schema/schemas/zod-util.d.ts.map +1 -0
- package/dist/schema/schemas/zod-util.js +23 -0
- package/dist/schema/schemas/zod-util.js.map +1 -0
- package/dist/schema/types/index.d.ts +2 -0
- package/dist/schema/types/index.d.ts.map +1 -0
- package/dist/schema/types/index.js +18 -0
- package/dist/schema/types/index.js.map +1 -0
- package/dist/schema/types/path.d.ts +27 -0
- package/dist/schema/types/path.d.ts.map +1 -0
- package/dist/schema/types/path.js +13 -0
- package/dist/schema/types/path.js.map +1 -0
- package/package.json +111 -0
- package/scripts/bump-version.ts +442 -0
- package/scripts/check-old-cli.mjs +26 -0
- package/scripts/copy-assets.mjs +50 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
{{#if (eq modelProvider "Bedrock")}}
|
|
2
|
+
from langchain_aws import ChatBedrock
|
|
3
|
+
|
|
4
|
+
# Uses global inference profile for Claude Sonnet 4.5
|
|
5
|
+
# https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html
|
|
6
|
+
MODEL_ID = "global.anthropic.claude-sonnet-4-5-20250929-v1:0"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def load_model() -> ChatBedrock:
|
|
10
|
+
"""Get Bedrock model client using IAM credentials."""
|
|
11
|
+
return ChatBedrock(model_id=MODEL_ID)
|
|
12
|
+
{{/if}}
|
|
13
|
+
{{#if (eq modelProvider "Anthropic")}}
|
|
14
|
+
import os
|
|
15
|
+
from langchain_anthropic import ChatAnthropic
|
|
16
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
17
|
+
|
|
18
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
19
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
23
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
24
|
+
"""Fetch API key from AgentCore Identity."""
|
|
25
|
+
return api_key
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _get_api_key() -> str:
|
|
29
|
+
"""
|
|
30
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
31
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
32
|
+
"""
|
|
33
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
34
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
35
|
+
if not api_key:
|
|
36
|
+
raise RuntimeError(
|
|
37
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
38
|
+
)
|
|
39
|
+
return api_key
|
|
40
|
+
return _agentcore_identity_api_key_provider()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def load_model() -> ChatAnthropic:
|
|
44
|
+
"""Get authenticated Anthropic model client."""
|
|
45
|
+
return ChatAnthropic(
|
|
46
|
+
model="claude-sonnet-4-5-20250929",
|
|
47
|
+
api_key=_get_api_key()
|
|
48
|
+
)
|
|
49
|
+
{{/if}}
|
|
50
|
+
{{#if (eq modelProvider "OpenAI")}}
|
|
51
|
+
import os
|
|
52
|
+
from langchain_openai import ChatOpenAI
|
|
53
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
54
|
+
|
|
55
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
56
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
60
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
61
|
+
"""Fetch API key from AgentCore Identity."""
|
|
62
|
+
return api_key
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _get_api_key() -> str:
|
|
66
|
+
"""
|
|
67
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
68
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
69
|
+
"""
|
|
70
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
71
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
72
|
+
if not api_key:
|
|
73
|
+
raise RuntimeError(
|
|
74
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
75
|
+
)
|
|
76
|
+
return api_key
|
|
77
|
+
return _agentcore_identity_api_key_provider()
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def load_model() -> ChatOpenAI:
|
|
81
|
+
"""Get authenticated OpenAI model client."""
|
|
82
|
+
return ChatOpenAI(
|
|
83
|
+
model="gpt-4o",
|
|
84
|
+
api_key=_get_api_key()
|
|
85
|
+
)
|
|
86
|
+
{{/if}}
|
|
87
|
+
{{#if (eq modelProvider "Gemini")}}
|
|
88
|
+
import os
|
|
89
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
90
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
91
|
+
|
|
92
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
93
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
97
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
98
|
+
"""Fetch API key from AgentCore Identity."""
|
|
99
|
+
return api_key
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _get_api_key() -> str:
|
|
103
|
+
"""
|
|
104
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
105
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
106
|
+
"""
|
|
107
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
108
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
109
|
+
if not api_key:
|
|
110
|
+
raise RuntimeError(
|
|
111
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
112
|
+
)
|
|
113
|
+
return api_key
|
|
114
|
+
return _agentcore_identity_api_key_provider()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def load_model() -> ChatGoogleGenerativeAI:
|
|
118
|
+
"""Get authenticated Gemini model client."""
|
|
119
|
+
return ChatGoogleGenerativeAI(
|
|
120
|
+
model="gemini-2.0-flash",
|
|
121
|
+
api_key=_get_api_key()
|
|
122
|
+
)
|
|
123
|
+
{{/if}}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "{{ name }}"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "AgentCore Runtime Application using LangChain/LangGraph"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
dependencies = [
|
|
12
|
+
"opentelemetry-distro",
|
|
13
|
+
"opentelemetry-exporter-otlp",
|
|
14
|
+
"langgraph >= 1.0.2",
|
|
15
|
+
"mcp >= 1.19.0",
|
|
16
|
+
"langchain-mcp-adapters >= 0.1.11",
|
|
17
|
+
"langchain >= 1.0.3",
|
|
18
|
+
"tiktoken == 0.11.0",
|
|
19
|
+
"bedrock-agentcore >= 1.0.3",
|
|
20
|
+
"botocore[crt] >= 1.35.0",
|
|
21
|
+
"python-dotenv >= 1.0.1",
|
|
22
|
+
{{#if (eq modelProvider "Bedrock")}}
|
|
23
|
+
"langchain-aws >= 1.0.0",
|
|
24
|
+
{{/if}}
|
|
25
|
+
{{#if (eq modelProvider "Anthropic")}}
|
|
26
|
+
"langchain-anthropic >= 1.1.0",
|
|
27
|
+
{{/if}}
|
|
28
|
+
{{#if (eq modelProvider "OpenAI")}}
|
|
29
|
+
"langchain-openai >= 1.0.3",
|
|
30
|
+
{{/if}}
|
|
31
|
+
{{#if (eq modelProvider "Gemini")}}
|
|
32
|
+
"langchain-google-genai >= 3.0.3",
|
|
33
|
+
{{/if}}
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
[tool.hatch.build.targets.wheel]
|
|
37
|
+
packages = ["."]
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
This is a project generated by the agentcore create CLI tool!
|
|
2
|
+
|
|
3
|
+
# Layout
|
|
4
|
+
|
|
5
|
+
There is one directory with generated application code, `src/` . At the root, there is a `.gitignore` file, a
|
|
6
|
+
`.agentcore` folder which represents the configurations and state associated with this project. Other `agentcore`
|
|
7
|
+
commands like `deploy`, `dev`, and `invoke` rely on the configuration stored here.
|
|
8
|
+
|
|
9
|
+
## src/
|
|
10
|
+
|
|
11
|
+
The main entrypoint to your app is defined in `src/main.py`. Using the AgentCore SDK `@app.entrypoint` decorator, this
|
|
12
|
+
file defines a Starlette ASGI app with the OpenAI Agents SDK framework running within.
|
|
13
|
+
|
|
14
|
+
`src/model/load.py` instantiates your chosen model provider (OpenAI).
|
|
15
|
+
|
|
16
|
+
## Environment Variables
|
|
17
|
+
|
|
18
|
+
| Variable | Required | Description |
|
|
19
|
+
| --------------------------- | -------- | ---------------------------------------------------------------- |
|
|
20
|
+
| `AGENTCORE_IDENTITY_OPENAI` | Yes | OpenAI API key (local) or Identity provider name (deployed) |
|
|
21
|
+
| `LOCAL_DEV` | No | Set to `1` to use `agentcore/.env` instead of AgentCore Identity |
|
|
22
|
+
|
|
23
|
+
# Developing locally
|
|
24
|
+
|
|
25
|
+
If installation was successful, a virtual environment is already created with dependencies installed.
|
|
26
|
+
|
|
27
|
+
Run `source .venv/bin/activate` before developing.
|
|
28
|
+
|
|
29
|
+
`agentcore dev` will start a local server on 0.0.0.0:8080.
|
|
30
|
+
|
|
31
|
+
In a new terminal, you can invoke that server with:
|
|
32
|
+
|
|
33
|
+
`agentcore invoke --dev "What can you do"`
|
|
34
|
+
|
|
35
|
+
# Deployment
|
|
36
|
+
|
|
37
|
+
After providing credentials, `agentcore deploy` will deploy your project into Amazon Bedrock AgentCore.
|
|
38
|
+
|
|
39
|
+
Use `agentcore invoke` to invoke your deployed agent.
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Environment variables
|
|
2
|
+
.env
|
|
3
|
+
|
|
4
|
+
# Python
|
|
5
|
+
__pycache__/
|
|
6
|
+
*.py[cod]
|
|
7
|
+
*$py.class
|
|
8
|
+
*.so
|
|
9
|
+
.Python
|
|
10
|
+
build/
|
|
11
|
+
develop-eggs/
|
|
12
|
+
dist/
|
|
13
|
+
downloads/
|
|
14
|
+
eggs/
|
|
15
|
+
.eggs/
|
|
16
|
+
lib/
|
|
17
|
+
lib64/
|
|
18
|
+
parts/
|
|
19
|
+
sdist/
|
|
20
|
+
var/
|
|
21
|
+
wheels/
|
|
22
|
+
*.egg-info/
|
|
23
|
+
.installed.cfg
|
|
24
|
+
*.egg
|
|
25
|
+
|
|
26
|
+
# Virtual environments
|
|
27
|
+
venv/
|
|
28
|
+
ENV/
|
|
29
|
+
env/
|
|
30
|
+
|
|
31
|
+
# IDE
|
|
32
|
+
.vscode/
|
|
33
|
+
.idea/
|
|
34
|
+
*.swp
|
|
35
|
+
*.swo
|
|
36
|
+
*~
|
|
37
|
+
|
|
38
|
+
# OS
|
|
39
|
+
.DS_Store
|
|
40
|
+
Thumbs.db
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from agents import Agent, Runner, function_tool
|
|
3
|
+
from bedrock_agentcore.runtime import BedrockAgentCoreApp
|
|
4
|
+
from model.load import load_model
|
|
5
|
+
from mcp_client.client import get_streamable_http_mcp_client
|
|
6
|
+
|
|
7
|
+
app = BedrockAgentCoreApp()
|
|
8
|
+
log = app.logger
|
|
9
|
+
|
|
10
|
+
# Set environment variables for model authentication
|
|
11
|
+
load_model()
|
|
12
|
+
|
|
13
|
+
# Get MCP Server
|
|
14
|
+
mcp_server = get_streamable_http_mcp_client()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Define a simple function tool
|
|
18
|
+
@function_tool
|
|
19
|
+
def add_numbers(a: int, b: int) -> int:
|
|
20
|
+
"""Return the sum of two numbers"""
|
|
21
|
+
return a + b
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Define the agent execution
|
|
25
|
+
async def main(query):
|
|
26
|
+
try:
|
|
27
|
+
async with mcp_server as server:
|
|
28
|
+
active_servers = [server] if server else []
|
|
29
|
+
# Currently defaults to GPT-4.1
|
|
30
|
+
# https://openai.github.io/openai-agents-python/models/
|
|
31
|
+
agent = Agent(
|
|
32
|
+
name="{{ name }}", mcp_servers=active_servers, tools=[add_numbers]
|
|
33
|
+
)
|
|
34
|
+
result = await Runner.run(agent, query)
|
|
35
|
+
return result
|
|
36
|
+
except Exception as e:
|
|
37
|
+
log.error(f"Error during agent execution: {e}", exc_info=True)
|
|
38
|
+
raise e
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@app.entrypoint
|
|
42
|
+
async def invoke(payload, context):
|
|
43
|
+
log.info("Invoking Agent.....")
|
|
44
|
+
|
|
45
|
+
# Process the user prompt
|
|
46
|
+
prompt = payload.get("prompt", "What can you help me with?")
|
|
47
|
+
|
|
48
|
+
# Run the agent
|
|
49
|
+
result = await main(prompt)
|
|
50
|
+
|
|
51
|
+
# Return result
|
|
52
|
+
return {"result": result.final_output}
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
app.run()
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from agents.mcp import MCPServerStreamableHttp
|
|
2
|
+
|
|
3
|
+
# ExaAI provides information about code through web searches, crawling and code context searches through their platform. Requires no authentication
|
|
4
|
+
EXAMPLE_MCP_ENDPOINT = "https://mcp.exa.ai/mcp"
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_streamable_http_mcp_client() -> MCPServerStreamableHttp:
|
|
8
|
+
"""
|
|
9
|
+
Returns an MCP Client compatible with OpenAI Agents SDK.
|
|
10
|
+
"""
|
|
11
|
+
# to use an MCP server that supports bearer authentication, add headers={"Authorization": f"Bearer {access_token}"}
|
|
12
|
+
return MCPServerStreamableHttp(
|
|
13
|
+
name="AgentCore Gateway MCP", params={"url": EXAMPLE_MCP_ENDPOINT}
|
|
14
|
+
)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
3
|
+
|
|
4
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
5
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
9
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
10
|
+
"""Fetch API key from AgentCore Identity."""
|
|
11
|
+
return api_key
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _get_api_key() -> str:
|
|
15
|
+
"""
|
|
16
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
17
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
18
|
+
"""
|
|
19
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
20
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
21
|
+
if not api_key:
|
|
22
|
+
raise RuntimeError(
|
|
23
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
24
|
+
)
|
|
25
|
+
return api_key
|
|
26
|
+
return _agentcore_identity_api_key_provider()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def load_model() -> None:
|
|
30
|
+
"""
|
|
31
|
+
Set up OpenAI API key authentication.
|
|
32
|
+
Uses AgentCore Identity for API key management in deployed environments,
|
|
33
|
+
and falls back to .env file for local development.
|
|
34
|
+
Sets the OPENAI_API_KEY environment variable for the OpenAI Agents SDK.
|
|
35
|
+
"""
|
|
36
|
+
api_key = _get_api_key()
|
|
37
|
+
os.environ["OPENAI_API_KEY"] = api_key if api_key else ""
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "{{ name }}"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "AgentCore Runtime Application using OpenAI Agents SDK"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
dependencies = [
|
|
12
|
+
"aws-opentelemetry-distro",
|
|
13
|
+
"openai-agents >= 0.4.2",
|
|
14
|
+
"bedrock-agentcore >= 1.0.3",
|
|
15
|
+
"botocore[crt] >= 1.35.0",
|
|
16
|
+
"python-dotenv >= 1.0.1",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
[tool.hatch.build.targets.wheel]
|
|
20
|
+
packages = ["."]
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
This is a project generated by the agentcore create basic CLI tool!
|
|
2
|
+
|
|
3
|
+
# Layout
|
|
4
|
+
|
|
5
|
+
There is one directory with generated application code, `src/` . At the root, there is a `.gitignore` file, a
|
|
6
|
+
`.agentcore` folder which represents the configurations and state associated with this project. Other `agentcore`
|
|
7
|
+
commands like `deploy`, `dev`, and `invoke` rely on the configuration stored here.
|
|
8
|
+
|
|
9
|
+
## src/
|
|
10
|
+
|
|
11
|
+
The main entrypoint to your app is defined in `src/main.py`. Using the AgentCore SDK `@app.entrypoint` decorator, this
|
|
12
|
+
file defines a Starlette ASGI app with the chosen Agent framework SDK running within.
|
|
13
|
+
|
|
14
|
+
`src/model/load.py` instantiates your chosen model provider.
|
|
15
|
+
|
|
16
|
+
## Environment Variables
|
|
17
|
+
|
|
18
|
+
| Variable | Required | Description |
|
|
19
|
+
| ------------------------------ | --------------- | ---------------------------------------------------------------- |
|
|
20
|
+
| `AGENTCORE_IDENTITY_OPENAI` | Yes (OpenAI) | OpenAI API key (local) or Identity provider name (deployed) |
|
|
21
|
+
| `AGENTCORE_IDENTITY_ANTHROPIC` | Yes (Anthropic) | Anthropic API key (local) or Identity provider name (deployed) |
|
|
22
|
+
| `AGENTCORE_IDENTITY_GEMINI` | Yes (Gemini) | Gemini API key (local) or Identity provider name (deployed) |
|
|
23
|
+
| `LOCAL_DEV` | No | Set to `1` to use `agentcore/.env` instead of AgentCore Identity |
|
|
24
|
+
|
|
25
|
+
# Developing locally
|
|
26
|
+
|
|
27
|
+
If installation was successful, a virtual environment is already created with dependencies installed.
|
|
28
|
+
|
|
29
|
+
Run `source .venv/bin/activate` before developing.
|
|
30
|
+
|
|
31
|
+
`agentcore dev` will start a local server on 0.0.0.0:8080.
|
|
32
|
+
|
|
33
|
+
In a new terminal, you can invoke that server with:
|
|
34
|
+
|
|
35
|
+
`agentcore invoke --dev "What can you do"`
|
|
36
|
+
|
|
37
|
+
# Deployment
|
|
38
|
+
|
|
39
|
+
After providing credentials, `agentcore deploy` will deploy your project into Amazon Bedrock AgentCore.
|
|
40
|
+
|
|
41
|
+
Use `agentcore invoke` to invoke your deployed agent.
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# Environment variables
|
|
2
|
+
.env
|
|
3
|
+
.env
|
|
4
|
+
|
|
5
|
+
# Python
|
|
6
|
+
__pycache__/
|
|
7
|
+
*.py[cod]
|
|
8
|
+
*$py.class
|
|
9
|
+
*.so
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
*.egg-info/
|
|
24
|
+
.installed.cfg
|
|
25
|
+
*.egg
|
|
26
|
+
|
|
27
|
+
# Virtual environments
|
|
28
|
+
venv/
|
|
29
|
+
ENV/
|
|
30
|
+
env/
|
|
31
|
+
|
|
32
|
+
# IDE
|
|
33
|
+
.vscode/
|
|
34
|
+
.idea/
|
|
35
|
+
*.swp
|
|
36
|
+
*.swo
|
|
37
|
+
*~
|
|
38
|
+
|
|
39
|
+
# OS
|
|
40
|
+
.DS_Store
|
|
41
|
+
Thumbs.db
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from strands import Agent, tool
|
|
2
|
+
from bedrock_agentcore.runtime import BedrockAgentCoreApp
|
|
3
|
+
from model.load import load_model
|
|
4
|
+
from mcp_client.client import get_streamable_http_mcp_client
|
|
5
|
+
{{#if hasMemory}}
|
|
6
|
+
from memory.session import get_memory_session_manager
|
|
7
|
+
{{/if}}
|
|
8
|
+
|
|
9
|
+
app = BedrockAgentCoreApp()
|
|
10
|
+
log = app.logger
|
|
11
|
+
|
|
12
|
+
# Define a Streamable HTTP MCP Client
|
|
13
|
+
mcp_client = get_streamable_http_mcp_client()
|
|
14
|
+
|
|
15
|
+
# Define a collection of tools used by the model
|
|
16
|
+
tools = []
|
|
17
|
+
|
|
18
|
+
# Define a simple function tool
|
|
19
|
+
@tool
|
|
20
|
+
def add_numbers(a: int, b: int) -> int:
|
|
21
|
+
"""Return the sum of two numbers"""
|
|
22
|
+
return a+b
|
|
23
|
+
tools.append(add_numbers)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
{{#if hasMemory}}
|
|
27
|
+
def agent_factory():
|
|
28
|
+
cache = {}
|
|
29
|
+
def get_or_create_agent(session_id, user_id):
|
|
30
|
+
key = f"{session_id}/{user_id}"
|
|
31
|
+
if key not in cache:
|
|
32
|
+
# Create an agent for the given session_id and user_id
|
|
33
|
+
cache[key] = Agent(
|
|
34
|
+
model=load_model(),
|
|
35
|
+
session_manager=get_memory_session_manager(session_id, user_id),
|
|
36
|
+
system_prompt="""
|
|
37
|
+
You are a helpful assistant. Use tools when appropriate.
|
|
38
|
+
""",
|
|
39
|
+
tools=tools+[mcp_client]
|
|
40
|
+
)
|
|
41
|
+
return cache[key]
|
|
42
|
+
return get_or_create_agent
|
|
43
|
+
get_or_create_agent = agent_factory()
|
|
44
|
+
{{else}}
|
|
45
|
+
# Create agent
|
|
46
|
+
agent = Agent(
|
|
47
|
+
model=load_model(),
|
|
48
|
+
system_prompt="""
|
|
49
|
+
You are a helpful assistant. Use tools when appropriate.
|
|
50
|
+
""",
|
|
51
|
+
tools=tools+[mcp_client]
|
|
52
|
+
)
|
|
53
|
+
{{/if}}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@app.entrypoint
|
|
57
|
+
async def invoke(payload, context):
|
|
58
|
+
log.info("Invoking Agent.....")
|
|
59
|
+
|
|
60
|
+
{{#if hasMemory}}
|
|
61
|
+
session_id = getattr(context, 'session_id', 'default-session')
|
|
62
|
+
user_id = getattr(context, 'user_id', 'default-user')
|
|
63
|
+
agent = get_or_create_agent(session_id, user_id)
|
|
64
|
+
|
|
65
|
+
{{/if}}
|
|
66
|
+
# Execute and format response
|
|
67
|
+
stream = agent.stream_async(payload.get("prompt"))
|
|
68
|
+
|
|
69
|
+
async for event in stream:
|
|
70
|
+
# Handle Text parts of the response
|
|
71
|
+
if "data" in event and isinstance(event["data"], str):
|
|
72
|
+
yield event["data"]
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
if __name__ == "__main__":
|
|
76
|
+
app.run()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from mcp.client.streamable_http import streamablehttp_client
|
|
2
|
+
from strands.tools.mcp.mcp_client import MCPClient
|
|
3
|
+
|
|
4
|
+
# ExaAI provides information about code through web searches, crawling and code context searches through their platform. Requires no authentication
|
|
5
|
+
EXAMPLE_MCP_ENDPOINT = "https://mcp.exa.ai/mcp"
|
|
6
|
+
|
|
7
|
+
def get_streamable_http_mcp_client() -> MCPClient:
|
|
8
|
+
"""
|
|
9
|
+
Returns an MCP Client compatible with Strands
|
|
10
|
+
"""
|
|
11
|
+
# to use an MCP server that supports bearer authentication, add headers={"Authorization": f"Bearer {access_token}"}
|
|
12
|
+
return MCPClient(lambda: streamablehttp_client(EXAMPLE_MCP_ENDPOINT))
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
{{#if (eq modelProvider "Bedrock")}}
|
|
2
|
+
from strands.models.bedrock import BedrockModel
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def load_model() -> BedrockModel:
|
|
6
|
+
"""Get Bedrock model client using IAM credentials."""
|
|
7
|
+
return BedrockModel(model_id="global.anthropic.claude-sonnet-4-5-20250929-v1:0")
|
|
8
|
+
{{/if}}
|
|
9
|
+
{{#if (eq modelProvider "Anthropic")}}
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
from strands.models.anthropic import AnthropicModel
|
|
13
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
14
|
+
|
|
15
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
16
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
20
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
21
|
+
"""Fetch API key from AgentCore Identity."""
|
|
22
|
+
return api_key
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _get_api_key() -> str:
|
|
26
|
+
"""
|
|
27
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
28
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
29
|
+
"""
|
|
30
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
31
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
32
|
+
if not api_key:
|
|
33
|
+
raise RuntimeError(
|
|
34
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
35
|
+
)
|
|
36
|
+
return api_key
|
|
37
|
+
return _agentcore_identity_api_key_provider()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def load_model() -> AnthropicModel:
|
|
41
|
+
"""Get authenticated Anthropic model client."""
|
|
42
|
+
return AnthropicModel(
|
|
43
|
+
client_args={"api_key": _get_api_key()},
|
|
44
|
+
model_id="claude-sonnet-4-5-20250929",
|
|
45
|
+
max_tokens=5000,
|
|
46
|
+
)
|
|
47
|
+
{{/if}}
|
|
48
|
+
{{#if (eq modelProvider "OpenAI")}}
|
|
49
|
+
import os
|
|
50
|
+
|
|
51
|
+
from strands.models.openai import OpenAIModel
|
|
52
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
53
|
+
|
|
54
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
55
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
59
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
60
|
+
"""Fetch API key from AgentCore Identity."""
|
|
61
|
+
return api_key
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _get_api_key() -> str:
|
|
65
|
+
"""
|
|
66
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
67
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
68
|
+
"""
|
|
69
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
70
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
71
|
+
if not api_key:
|
|
72
|
+
raise RuntimeError(
|
|
73
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
74
|
+
)
|
|
75
|
+
return api_key
|
|
76
|
+
return _agentcore_identity_api_key_provider()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def load_model() -> OpenAIModel:
|
|
80
|
+
"""Get authenticated OpenAI model client."""
|
|
81
|
+
return OpenAIModel(
|
|
82
|
+
client_args={"api_key": _get_api_key()},
|
|
83
|
+
model_id="gpt-4o",
|
|
84
|
+
)
|
|
85
|
+
{{/if}}
|
|
86
|
+
{{#if (eq modelProvider "Gemini")}}
|
|
87
|
+
import os
|
|
88
|
+
|
|
89
|
+
from strands.models.gemini import GeminiModel
|
|
90
|
+
from bedrock_agentcore.identity.auth import requires_api_key
|
|
91
|
+
|
|
92
|
+
IDENTITY_PROVIDER_NAME = "{{identityProviders.[0].name}}"
|
|
93
|
+
IDENTITY_ENV_VAR = "{{identityProviders.[0].envVarName}}"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@requires_api_key(provider_name=IDENTITY_PROVIDER_NAME)
|
|
97
|
+
def _agentcore_identity_api_key_provider(api_key: str) -> str:
|
|
98
|
+
"""Fetch API key from AgentCore Identity."""
|
|
99
|
+
return api_key
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _get_api_key() -> str:
|
|
103
|
+
"""
|
|
104
|
+
Uses AgentCore Identity for API key management in deployed environments.
|
|
105
|
+
For local development, run via 'agentcore dev' which loads agentcore/.env.
|
|
106
|
+
"""
|
|
107
|
+
if os.getenv("LOCAL_DEV") == "1":
|
|
108
|
+
api_key = os.getenv(IDENTITY_ENV_VAR)
|
|
109
|
+
if not api_key:
|
|
110
|
+
raise RuntimeError(
|
|
111
|
+
f"{IDENTITY_ENV_VAR} not found. Add {IDENTITY_ENV_VAR}=your-key to .env.local"
|
|
112
|
+
)
|
|
113
|
+
return api_key
|
|
114
|
+
return _agentcore_identity_api_key_provider()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def load_model() -> GeminiModel:
|
|
118
|
+
"""Get authenticated Gemini model client."""
|
|
119
|
+
return GeminiModel(
|
|
120
|
+
client_args={"api_key": _get_api_key()},
|
|
121
|
+
model_id="gemini-2.0-flash",
|
|
122
|
+
)
|
|
123
|
+
{{/if}}
|