create-adk-agent 0.0.2 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +10 -1
  2. package/bin/create-adk-agent.js +361 -35
  3. package/dist/generators/init/files/.env.example.template +16 -0
  4. package/dist/generators/init/files/.eslintrc.json.template +20 -0
  5. package/dist/generators/init/files/.gitignore.template +27 -0
  6. package/dist/generators/init/files/.prettierrc.template +7 -0
  7. package/dist/generators/init/files/README.md.template +243 -0
  8. package/dist/generators/init/files/jest.config.ts.template +7 -0
  9. package/dist/generators/init/files/package.json.template +41 -0
  10. package/dist/generators/init/files/src/agents/basic/agent.ts.template +34 -0
  11. package/dist/generators/init/files/src/agents/multi-tool/agent.ts.template +83 -0
  12. package/dist/generators/init/files/src/agents/streaming/agent.ts.template +36 -0
  13. package/dist/generators/init/files/src/agents/team/farewell-agent.ts.template +43 -0
  14. package/dist/generators/init/files/src/agents/team/greeting-agent.ts.template +43 -0
  15. package/dist/generators/init/files/src/agents/team/root-agent.ts.template +18 -0
  16. package/dist/generators/init/files/src/agents/workflow/agent.ts.template +69 -0
  17. package/dist/generators/init/files/src/index.ts.template +61 -0
  18. package/dist/generators/init/files/tests/agents.test.ts.template +80 -0
  19. package/dist/generators/init/files/tsconfig.json.template +20 -0
  20. package/dist/generators/init/files/vite.config.ts.template +36 -0
  21. package/dist/generators/init/generator.js +3 -0
  22. package/dist/generators/init/generator.js.map +1 -1
  23. package/dist/generators/init/schema.json +124 -0
  24. package/package.json +20 -4
  25. package/src/generators/init/files/README.md.template +3 -2
  26. package/src/generators/init/files/package.json.template +8 -6
  27. package/src/generators/init/files/vite.config.ts.template +36 -0
  28. package/templates/basic/.env.example +16 -0
  29. package/templates/basic/.eslintrc.json +13 -0
  30. package/templates/basic/.prettierrc +5 -0
  31. package/templates/basic/README.md +155 -0
  32. package/templates/basic/_gitignore +8 -0
  33. package/templates/basic/jest.config.ts +8 -0
  34. package/templates/basic/package.json +41 -0
  35. package/templates/basic/src/index.ts +60 -0
  36. package/templates/basic/tests/agents.test.ts +19 -0
  37. package/templates/basic/tsconfig.json +21 -0
@@ -0,0 +1,243 @@
1
+ # <%= projectName %>
2
+
3
+ <%= description %>
4
+
5
+ ## 🔐 Security First - API Keys
6
+
7
+ **⚠️ IMPORTANT: Never commit your `.env` file to version control!**
8
+
9
+ This project requires API keys for LLM models. Your `.env` file is already in `.gitignore` to protect your keys.
10
+
11
+ ### Get Your API Key
12
+
13
+ <% if (modelProvider === 'gemini') { %>**Google Gemini API Key:**
14
+ - Visit [Google AI Studio](https://aistudio.google.com/apikey)
15
+ - Create or sign in to your Google account
16
+ - Generate an API key
17
+ - Add it to your `.env` file:
18
+ ```
19
+ GEMINI_API_KEY=your_actual_api_key_here
20
+ ```
21
+ <% } else if (modelProvider === 'openai') { %>**OpenAI API Key:**
22
+ - Visit [OpenAI Platform](https://platform.openai.com/api-keys)
23
+ - Sign in or create an account
24
+ - Generate an API key
25
+ - Add it to your `.env` file:
26
+ ```
27
+ OPENAI_API_KEY=your_actual_api_key_here
28
+ ```
29
+ <% } else if (modelProvider === 'anthropic') { %>**Anthropic API Key:**
30
+ - Visit [Anthropic Console](https://console.anthropic.com/settings/keys)
31
+ - Sign in or create an account
32
+ - Generate an API key
33
+ - Add it to your `.env` file:
34
+ ```
35
+ ANTHROPIC_API_KEY=your_actual_api_key_here
36
+ ```
37
+ <% } %>
38
+
39
+ ### Setup Environment
40
+
41
+ 1. Copy the example environment file:
42
+ ```bash
43
+ cp .env.example .env
44
+ ```
45
+
46
+ 2. Edit `.env` and add your actual API key
47
+
48
+ 3. Verify it's ignored by git:
49
+ ```bash
50
+ git status # .env should NOT appear
51
+ ```
52
+
53
+ ## 🚀 Quick Start
54
+
55
+ ### Install Dependencies
56
+
57
+ ```bash
58
+ npm install
59
+ ```
60
+
61
+ ### Run in Development Mode
62
+
63
+ Using **Vite** for fast TypeScript execution with hot reload:
64
+
65
+ ```bash
66
+ npm run dev
67
+ ```
68
+
69
+ This will:
70
+ - ✅ Start your agent immediately with Vite's fast HMR
71
+ - ✅ Watch for file changes and auto-reload
72
+ - ✅ Show compilation errors instantly
73
+ - ✅ Optimized build performance
74
+
75
+ ### Run with ADK DevTools
76
+
77
+ ADK provides powerful development tools:
78
+
79
+ ```bash
80
+ # Web UI - Interactive testing in your browser
81
+ npm run adk:web
82
+
83
+ # CLI Runner - Test agents from command line
84
+ npm run adk:run
85
+ ```
86
+
87
+ ## 📁 Project Structure
88
+
89
+ ```
90
+ <%= projectName %>/
91
+ ├── src/
92
+ │ ├── index.ts # Entry point with environment validation
93
+ <% if (hasBasic) { %>│ ├── agents/basic/ # Basic agent with time tool
94
+ <% } %><% if (hasMultiTool) { %>│ ├── agents/multi-tool/ # Agent with multiple tools
95
+ <% } %><% if (hasTeam) { %>│ ├── agents/team/ # Multi-agent team coordination
96
+ <% } %><% if (hasStreaming) { %>│ ├── agents/streaming/ # Streaming responses
97
+ <% } %><% if (hasWorkflow) { %>│ └── agents/workflow/ # Workflow patterns
98
+ <% } %>├── tests/ # Jest tests
99
+ ├── .env # Your API keys (DO NOT COMMIT)
100
+ ├── .env.example # Example environment file
101
+ ├── package.json # Dependencies and scripts
102
+ └── tsconfig.json # TypeScript configuration
103
+ ```
104
+
105
+ ## 🤖 Available Agents
106
+
107
+ <% if (hasBasic) { %>### Basic Agent
108
+ Simple agent that demonstrates:
109
+ - ✅ FunctionTool creation with Zod validation
110
+ - ✅ Time-based tool (get current time in any timezone)
111
+ - ✅ Basic agent configuration
112
+
113
+ **Run:** `npm run dev` or access via ADK Web UI
114
+ <% } %>
115
+ <% if (hasMultiTool) { %>### Multi-Tool Agent
116
+ Demonstrates multiple tools:
117
+ - 🕐 Time tool (get current time)
118
+ - 🌤️ Weather tool (get weather for location)
119
+ - 🧮 Calculator tool (mathematical calculations)
120
+
121
+ **Run:** `npm run adk:run` and select this agent
122
+ <% } %>
123
+ <% if (hasTeam) { %>### Team Agent
124
+ Multi-agent coordination:
125
+ - 👋 Greeting agent (personalized hellos in multiple languages)
126
+ - 👋 Farewell agent (personalized goodbyes in multiple languages)
127
+ - 🎯 Root agent (coordinates and delegates tasks)
128
+
129
+ **Run via ADK Web UI** to see agent delegation in action
130
+ <% } %>
131
+ <% if (hasStreaming) { %>### Streaming Agent
132
+ Demonstrates streaming responses:
133
+ - 📡 Real-time response streaming with Live API
134
+ - 🔄 Long-running operation simulation
135
+ - 💬 Detailed step-by-step explanations
136
+
137
+ **Best experienced in ADK Web UI** with streaming enabled
138
+ <% } %>
139
+ <% if (hasWorkflow) { %>### Workflow Agent
140
+ Sequential workflow patterns:
141
+ - ✅ Input validation step
142
+ - 🔄 Data transformation step
143
+ - 💾 Result saving step
144
+
145
+ **Run with:** `npm run adk:run` to see ordered execution
146
+ <% } %>
147
+
148
+ ## 🛠️ Development Workflow
149
+
150
+ ### Development Mode (tsx watch)
151
+
152
+ ```bash
153
+ npm run dev
154
+ ```
155
+
156
+ **Why tsx?**
157
+ - ⚡ Instant start (no build step)
158
+ - 🔥 Hot reload on file changes
159
+ - 🐛 Better error messages
160
+ - 📦 Zero configuration
161
+
162
+ ### Build for Production
163
+
164
+ ```bash
165
+ npm run build # Compile TypeScript to dist/
166
+ npm run prod # Run compiled JavaScript
167
+ ```
168
+
169
+ ### Testing
170
+
171
+ ```bash
172
+ npm test # Run all tests
173
+ npm run test:watch # Watch mode
174
+ ```
175
+
176
+ ### Code Quality
177
+
178
+ ```bash
179
+ npm run lint # Check for issues
180
+ npm run format # Format with Prettier
181
+ ```
182
+
183
+ ## 🎯 Model Configuration
184
+
185
+ Your project is configured to use:
186
+ - **Provider:** <%= modelProvider %>
187
+ - **Model:** <%= model %>
188
+
189
+ ### Change Models
190
+
191
+ Edit your agent files to use different models:
192
+
193
+ <% if (modelProvider === 'gemini') { %>```typescript
194
+ // Direct model string for Gemini
195
+ model: 'gemini-3.0-flash', // Fast responses
196
+ model: 'gemini-3.0-pro', // Best quality
197
+ model: 'gemini-2.5-flash', // Latest Flash
198
+ ```
199
+ <% } else if (modelProvider === 'openai') { %>```typescript
200
+ import { LiteLlm } from '@google/adk';
201
+
202
+ // Use LiteLLM wrapper for OpenAI
203
+ model: new LiteLlm({ model: 'openai/gpt-4o' }),
204
+ model: new LiteLlm({ model: 'openai/gpt-4o-mini' }),
205
+ ```
206
+ <% } else if (modelProvider === 'anthropic') { %>```typescript
207
+ import { LiteLlm } from '@google/adk';
208
+
209
+ // Use LiteLLM wrapper for Anthropic
210
+ model: new LiteLlm({ model: 'anthropic/claude-3-5-sonnet' }),
211
+ model: new LiteLlm({ model: 'anthropic/claude-3-5-haiku' }),
212
+ ```
213
+ <% } %>
214
+
215
+ ## 📚 Learn More
216
+
217
+ - [ADK Documentation](https://google.github.io/adk-docs/)
218
+ - [TypeScript Quick Start](https://google.github.io/adk-docs/get-started/typescript/)
219
+ - [Agent Tutorial](https://google.github.io/adk-docs/tutorial/agent/)
220
+ - [API Reference](https://google.github.io/adk-docs/api/)
221
+
222
+ ## 🐛 Troubleshooting
223
+
224
+ ### "GEMINI_API_KEY is required" error
225
+ - Make sure `.env` file exists and contains your API key
226
+ - Key format: `GEMINI_API_KEY=your_key_here` (no quotes)
227
+ - Restart your development server after editing `.env`
228
+
229
+ ### Module not found errors
230
+ - Run `npm install` to ensure all dependencies are installed
231
+ - Check that you're using `.js` extensions in imports (required for ESM)
232
+
233
+ ### TypeScript errors
234
+ - Run `npm run build` to see detailed compilation errors
235
+ - Check `tsconfig.json` has `verbatimModuleSyntax: false`
236
+
237
+ ### Tool execution errors
238
+ - Verify your tool's Zod schema matches the parameters
239
+ - Check the tool's `execute` function returns `{ status, report }`
240
+
241
+ ## 📄 License
242
+
243
+ MIT
@@ -0,0 +1,7 @@
1
+ export default {
2
+ preset: 'ts-jest',
3
+ testEnvironment: 'node',
4
+ roots: ['<rootDir>/tests'],
5
+ testMatch: ['**/*.test.ts'],
6
+ moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
7
+ };
@@ -0,0 +1,41 @@
1
+ {
2
+ "name": "<%= projectName %>",
3
+ "version": "1.0.0",
4
+ "description": "<%= description %>",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "scripts": {
8
+ "dev": "vite-node --watch src/index.ts",
9
+ "dev:agent": "vite-node --watch src/agents/multi-tool/agent.ts",
10
+ "start": "vite-node src/index.ts",
11
+ "build": "vite build",
12
+ "prod": "node dist/index.js",
13
+ "test": "jest",
14
+ "test:watch": "jest --watch",
15
+ "lint": "eslint src/**/*.ts",
16
+ "format": "prettier --write src/**/*.ts",
17
+ "adk:web": "adk web",
18
+ "adk:run": "adk run src/agents/multi-tool/agent.ts"
19
+ },
20
+ "keywords": ["adk", "agent", "ai", "llm"],
21
+ "license": "MIT",
22
+ "dependencies": {
23
+ "@google/adk": "^0.2.0",
24
+ "@google/adk-devtools": "^0.2.0",
25
+ "dotenv": "^16.4.0",
26
+ "zod": "^3.23.0"
27
+ },
28
+ "devDependencies": {
29
+ "@types/node": "^20.10.0",
30
+ "@typescript-eslint/eslint-plugin": "^7.0.0",
31
+ "@typescript-eslint/parser": "^7.0.0",
32
+ "eslint": "^8.56.0",
33
+ "jest": "^29.7.0",
34
+ "prettier": "^3.2.0",
35
+ "ts-jest": "^29.1.0",
36
+ "typescript": "^5.9.3",
37
+ "vite": "^6.0.0",
38
+ "vite-node": "^2.1.0",
39
+ "vite-plugin-node": "^3.1.0"
40
+ }
41
+ }
@@ -0,0 +1,34 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool to get current time
6
+ const getCurrentTime = new FunctionTool({
7
+ name: 'get_current_time',
8
+ description: 'Returns the current time in the specified timezone.',
9
+ parameters: z.object({
10
+ timezone: z
11
+ .string()
12
+ .optional()
13
+ .describe('The timezone to use (e.g., "America/New_York", "Europe/London")'),
14
+ }),
15
+ execute: ({ timezone = 'UTC' }) => {
16
+ const now = new Date();
17
+ const timeString = now.toLocaleString('en-US', { timeZone: timezone });
18
+ return {
19
+ status: 'success',
20
+ report: `The current time in ${timezone} is ${timeString}`,
21
+ };
22
+ },
23
+ });
24
+
25
+ // Create the agent
26
+ export const rootAgent = new LlmAgent({
27
+ name: 'hello_time_agent',
28
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
29
+ description: 'An agent that can tell you the current time in any timezone.',
30
+ instruction: `You are a friendly time-telling assistant. When asked about the time,
31
+ use the get_current_time tool to provide the current time in the requested timezone.
32
+ If no timezone is specified, use UTC as the default.`,
33
+ tools: [getCurrentTime],
34
+ });
@@ -0,0 +1,83 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool to get current time
6
+ const getCurrentTime = new FunctionTool({
7
+ name: 'get_current_time',
8
+ description: 'Returns the current time in the specified timezone.',
9
+ parameters: z.object({
10
+ timezone: z
11
+ .string()
12
+ .optional()
13
+ .describe('The timezone to use (e.g., "America/New_York", "Europe/London")'),
14
+ }),
15
+ execute: ({ timezone = 'UTC' }) => {
16
+ const now = new Date();
17
+ const timeString = now.toLocaleString('en-US', { timeZone: timezone });
18
+ return {
19
+ status: 'success',
20
+ report: `The current time in ${timezone} is ${timeString}`,
21
+ };
22
+ },
23
+ });
24
+
25
+ // Tool to get weather (mock data for demo)
26
+ const getWeather = new FunctionTool({
27
+ name: 'get_weather',
28
+ description: 'Returns the current weather for a given location.',
29
+ parameters: z.object({
30
+ location: z.string().describe('The city or location to get weather for'),
31
+ }),
32
+ execute: ({ location }) => {
33
+ // Mock weather data - in production, call a real weather API
34
+ const conditions = ['sunny', 'cloudy', 'rainy', 'partly cloudy'];
35
+ const condition = conditions[Math.floor(Math.random() * conditions.length)];
36
+ const temp = Math.floor(Math.random() * 30) + 50; // 50-80°F
37
+
38
+ return {
39
+ status: 'success',
40
+ report: `The weather in ${location} is ${condition} with a temperature of ${temp}°F`,
41
+ };
42
+ },
43
+ });
44
+
45
+ // Tool for calculations
46
+ const calculate = new FunctionTool({
47
+ name: 'calculate',
48
+ description: 'Performs basic mathematical calculations.',
49
+ parameters: z.object({
50
+ expression: z
51
+ .string()
52
+ .describe('The mathematical expression to evaluate (e.g., "2 + 2", "10 * 5")'),
53
+ }),
54
+ execute: ({ expression }) => {
55
+ try {
56
+ // Simple eval for demo - in production, use a safe math parser
57
+ const result = eval(expression);
58
+ return {
59
+ status: 'success',
60
+ report: `${expression} = ${result}`,
61
+ };
62
+ } catch (error) {
63
+ return {
64
+ status: 'error',
65
+ report: `Error evaluating expression: ${error}`,
66
+ };
67
+ }
68
+ },
69
+ });
70
+
71
+ // Create the agent with multiple tools
72
+ export const rootAgent = new LlmAgent({
73
+ name: 'multi_tool_agent',
74
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
75
+ description: 'An agent with multiple tools for time, weather, and calculations.',
76
+ instruction: `You are a helpful assistant with access to multiple tools:
77
+ - get_current_time: Get the current time in any timezone
78
+ - get_weather: Get weather information for a location
79
+ - calculate: Perform mathematical calculations
80
+
81
+ Use the appropriate tool based on the user's request. Be friendly and helpful.`,
82
+ tools: [getCurrentTime, getWeather, calculate],
83
+ });
@@ -0,0 +1,36 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool that simulates a long-running operation
6
+ const processData = new FunctionTool({
7
+ name: 'process_data',
8
+ description: 'Processes data and returns the result.',
9
+ parameters: z.object({
10
+ data: z.string().describe('The data to process'),
11
+ }),
12
+ execute: ({ data }) => {
13
+ // Simulate processing
14
+ const processedData = data.split('').reverse().join('');
15
+ return {
16
+ status: 'success',
17
+ report: `Processed data: ${processedData}`,
18
+ };
19
+ },
20
+ });
21
+
22
+ // Streaming agent - uses Live API for streaming responses
23
+ export const rootAgent = new LlmAgent({
24
+ name: 'streaming_agent',
25
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
26
+ description: 'An agent that demonstrates streaming responses.',
27
+ instruction: `You are a helpful assistant that provides streaming responses.
28
+ When processing data, use the process_data tool and explain what you're doing.
29
+ Provide detailed, step-by-step explanations to demonstrate streaming.`,
30
+ tools: [processData],
31
+ });
32
+
33
+ // Note: To enable streaming in your application, use the ADK Live API:
34
+ // - For CLI: Use 'adk run' with --stream flag
35
+ // - For web: ADK DevTools UI has streaming enabled by default
36
+ // - For programmatic use: Use agent.run() with streaming options
@@ -0,0 +1,43 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool for saying goodbye
6
+ const sayGoodbye = new FunctionTool({
7
+ name: 'say_goodbye',
8
+ description: 'Creates a personalized farewell message.',
9
+ parameters: z.object({
10
+ name: z.string().describe('The name of the person to bid farewell'),
11
+ language: z
12
+ .string()
13
+ .optional()
14
+ .describe('The language for the farewell (e.g., "english", "spanish", "french")'),
15
+ }),
16
+ execute: ({ name, language = 'english' }) => {
17
+ const farewells: Record<string, string> = {
18
+ english: `Goodbye ${name}! Have a great day!`,
19
+ spanish: `¡Adiós ${name}! ¡Que tengas un gran día!`,
20
+ french: `Au revoir ${name}! Bonne journée!`,
21
+ german: `Auf Wiedersehen ${name}! Schönen Tag noch!`,
22
+ italian: `Arrivederci ${name}! Buona giornata!`,
23
+ };
24
+
25
+ const farewell = farewells[language.toLowerCase()] || farewells.english;
26
+ return {
27
+ status: 'success',
28
+ report: farewell,
29
+ };
30
+ },
31
+ });
32
+
33
+ // Farewell agent
34
+ export const farewellAgent = new LlmAgent({
35
+ name: 'farewell_agent',
36
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
37
+ description: 'Agent specialized in creating personalized farewells.',
38
+ instruction: `You are a friendly farewell specialist. When asked to say goodbye:
39
+ 1. Use the say_goodbye tool to create a personalized farewell
40
+ 2. If a language preference is mentioned, use that language
41
+ 3. Add a warm, positive message wishing them well`,
42
+ tools: [sayGoodbye],
43
+ });
@@ -0,0 +1,43 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool for saying hello
6
+ const sayHello = new FunctionTool({
7
+ name: 'say_hello',
8
+ description: 'Creates a personalized greeting message.',
9
+ parameters: z.object({
10
+ name: z.string().describe('The name of the person to greet'),
11
+ language: z
12
+ .string()
13
+ .optional()
14
+ .describe('The language for the greeting (e.g., "english", "spanish", "french")'),
15
+ }),
16
+ execute: ({ name, language = 'english' }) => {
17
+ const greetings: Record<string, string> = {
18
+ english: `Hello ${name}! Welcome!`,
19
+ spanish: `¡Hola ${name}! ¡Bienvenido!`,
20
+ french: `Bonjour ${name}! Bienvenue!`,
21
+ german: `Hallo ${name}! Willkommen!`,
22
+ italian: `Ciao ${name}! Benvenuto!`,
23
+ };
24
+
25
+ const greeting = greetings[language.toLowerCase()] || greetings.english;
26
+ return {
27
+ status: 'success',
28
+ report: greeting,
29
+ };
30
+ },
31
+ });
32
+
33
+ // Greeting agent
34
+ export const greetingAgent = new LlmAgent({
35
+ name: 'greeting_agent',
36
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
37
+ description: 'Agent specialized in creating personalized greetings.',
38
+ instruction: `You are a friendly greeting specialist. When asked to greet someone:
39
+ 1. Use the say_hello tool to create a personalized greeting
40
+ 2. If a language preference is mentioned, use that language
41
+ 3. Add a warm, welcoming message after the greeting`,
42
+ tools: [sayHello],
43
+ });
@@ -0,0 +1,18 @@
1
+ import { LlmAgent } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { greetingAgent } from './greeting-agent.js';
4
+ import { farewellAgent } from './farewell-agent.js';
5
+
6
+ // Root agent that coordinates between greeting and farewell agents
7
+ export const rootAgent = new LlmAgent({
8
+ name: 'team_root_agent',
9
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
10
+ description: 'Root agent that coordinates greeting and farewell tasks.',
11
+ instruction: `You are the coordinator of a team of agents. Your job is to delegate tasks:
12
+ - For greetings, hellos, or welcome messages, delegate to the greeting_agent
13
+ - For farewells, goodbyes, or departure messages, delegate to the farewell_agent
14
+ - You can delegate to multiple agents if the user wants both a greeting and farewell
15
+
16
+ Always provide a friendly introduction before delegating.`,
17
+ agents: [greetingAgent, farewellAgent],
18
+ });
@@ -0,0 +1,69 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Step 1: Data validation
6
+ const validateInput = new FunctionTool({
7
+ name: 'validate_input',
8
+ description: 'Validates input data for processing.',
9
+ parameters: z.object({
10
+ input: z.string().describe('The input data to validate'),
11
+ }),
12
+ execute: ({ input }) => {
13
+ const isValid = input.length > 0 && input.length < 1000;
14
+ return {
15
+ status: isValid ? 'success' : 'error',
16
+ report: isValid
17
+ ? `Input is valid (${input.length} characters)`
18
+ : 'Input is invalid (too long or empty)',
19
+ };
20
+ },
21
+ });
22
+
23
+ // Step 2: Data transformation
24
+ const transformData = new FunctionTool({
25
+ name: 'transform_data',
26
+ description: 'Transforms validated data.',
27
+ parameters: z.object({
28
+ data: z.string().describe('The data to transform'),
29
+ }),
30
+ execute: ({ data }) => {
31
+ const transformed = data.toUpperCase().split('').reverse().join('');
32
+ return {
33
+ status: 'success',
34
+ report: `Transformed: ${transformed}`,
35
+ };
36
+ },
37
+ });
38
+
39
+ // Step 3: Save result
40
+ const saveResult = new FunctionTool({
41
+ name: 'save_result',
42
+ description: 'Saves the processed result.',
43
+ parameters: z.object({
44
+ result: z.string().describe('The result to save'),
45
+ }),
46
+ execute: ({ result }) => {
47
+ // In a real application, save to database or file
48
+ return {
49
+ status: 'success',
50
+ report: `Result saved: ${result}`,
51
+ };
52
+ },
53
+ });
54
+
55
+ // Workflow agent - demonstrates sequential processing
56
+ export const rootAgent = new LlmAgent({
57
+ name: 'workflow_agent',
58
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
59
+ description: 'An agent that demonstrates workflow patterns.',
60
+ instruction: `You are a workflow orchestration assistant. When given data to process:
61
+
62
+ 1. VALIDATE: First use validate_input to check if the input is valid
63
+ 2. TRANSFORM: If valid, use transform_data to process it
64
+ 3. SAVE: Finally use save_result to save the processed data
65
+
66
+ Always follow this sequence. Report the status after each step.
67
+ If validation fails, do not proceed to transformation.`,
68
+ tools: [validateInput, transformData, saveResult],
69
+ });