create-adk-agent 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +198 -0
  2. package/dist/generators/init/generator.js +87 -0
  3. package/dist/generators/init/generator.js.map +1 -0
  4. package/dist/generators/init/schema.d.js +3 -0
  5. package/dist/generators/init/schema.d.js.map +1 -0
  6. package/dist/index.js +3 -0
  7. package/dist/index.js.map +1 -0
  8. package/dist/lib/create-adk-agent.js +5 -0
  9. package/dist/lib/create-adk-agent.js.map +1 -0
  10. package/generators.json +10 -0
  11. package/package.json +73 -0
  12. package/src/generators/init/files/.env.example.template +16 -0
  13. package/src/generators/init/files/.eslintrc.json.template +20 -0
  14. package/src/generators/init/files/.gitignore.template +27 -0
  15. package/src/generators/init/files/.prettierrc.template +7 -0
  16. package/src/generators/init/files/README.md.template +242 -0
  17. package/src/generators/init/files/jest.config.ts.template +7 -0
  18. package/src/generators/init/files/package.json.template +39 -0
  19. package/src/generators/init/files/src/agents/basic/agent.ts.template +34 -0
  20. package/src/generators/init/files/src/agents/multi-tool/agent.ts.template +83 -0
  21. package/src/generators/init/files/src/agents/streaming/agent.ts.template +36 -0
  22. package/src/generators/init/files/src/agents/team/farewell-agent.ts.template +43 -0
  23. package/src/generators/init/files/src/agents/team/greeting-agent.ts.template +43 -0
  24. package/src/generators/init/files/src/agents/team/root-agent.ts.template +18 -0
  25. package/src/generators/init/files/src/agents/workflow/agent.ts.template +69 -0
  26. package/src/generators/init/files/src/index.ts.template +61 -0
  27. package/src/generators/init/files/tests/agents.test.ts.template +80 -0
  28. package/src/generators/init/files/tsconfig.json.template +20 -0
  29. package/src/generators/init/schema.json +124 -0
@@ -0,0 +1,242 @@
1
+ # <%= projectName %>
2
+
3
+ <%= description %>
4
+
5
+ ## 🔐 Security First - API Keys
6
+
7
+ **⚠️ IMPORTANT: Never commit your `.env` file to version control!**
8
+
9
+ This project requires API keys for LLM models. Your `.env` file is already in `.gitignore` to protect your keys.
10
+
11
+ ### Get Your API Key
12
+
13
+ <% if (modelProvider === 'gemini') { %>**Google Gemini API Key:**
14
+ - Visit [Google AI Studio](https://aistudio.google.com/apikey)
15
+ - Create or sign in to your Google account
16
+ - Generate an API key
17
+ - Add it to your `.env` file:
18
+ ```
19
+ GEMINI_API_KEY=your_actual_api_key_here
20
+ ```
21
+ <% } else if (modelProvider === 'openai') { %>**OpenAI API Key:**
22
+ - Visit [OpenAI Platform](https://platform.openai.com/api-keys)
23
+ - Sign in or create an account
24
+ - Generate an API key
25
+ - Add it to your `.env` file:
26
+ ```
27
+ OPENAI_API_KEY=your_actual_api_key_here
28
+ ```
29
+ <% } else if (modelProvider === 'anthropic') { %>**Anthropic API Key:**
30
+ - Visit [Anthropic Console](https://console.anthropic.com/settings/keys)
31
+ - Sign in or create an account
32
+ - Generate an API key
33
+ - Add it to your `.env` file:
34
+ ```
35
+ ANTHROPIC_API_KEY=your_actual_api_key_here
36
+ ```
37
+ <% } %>
38
+
39
+ ### Setup Environment
40
+
41
+ 1. Copy the example environment file:
42
+ ```bash
43
+ cp .env.example .env
44
+ ```
45
+
46
+ 2. Edit `.env` and add your actual API key
47
+
48
+ 3. Verify it's ignored by git:
49
+ ```bash
50
+ git status # .env should NOT appear
51
+ ```
52
+
53
+ ## 🚀 Quick Start
54
+
55
+ ### Install Dependencies
56
+
57
+ ```bash
58
+ npm install
59
+ ```
60
+
61
+ ### Run in Development Mode
62
+
63
+ Using **tsx** for instant TypeScript execution with hot reload:
64
+
65
+ ```bash
66
+ npm run dev
67
+ ```
68
+
69
+ This will:
70
+ - ✅ Start your agent immediately (no build step!)
71
+ - ✅ Watch for file changes and auto-reload
72
+ - ✅ Show compilation errors instantly
73
+
74
+ ### Run with ADK DevTools
75
+
76
+ ADK provides powerful development tools:
77
+
78
+ ```bash
79
+ # Web UI - Interactive testing in your browser
80
+ npm run adk:web
81
+
82
+ # CLI Runner - Test agents from command line
83
+ npm run adk:run
84
+ ```
85
+
86
+ ## 📁 Project Structure
87
+
88
+ ```
89
+ <%= projectName %>/
90
+ ├── src/
91
+ │ ├── index.ts # Entry point with environment validation
92
+ <% if (hasBasic) { %>│ ├── agents/basic/ # Basic agent with time tool
93
+ <% } %><% if (hasMultiTool) { %>│ ├── agents/multi-tool/ # Agent with multiple tools
94
+ <% } %><% if (hasTeam) { %>│ ├── agents/team/ # Multi-agent team coordination
95
+ <% } %><% if (hasStreaming) { %>│ ├── agents/streaming/ # Streaming responses
96
+ <% } %><% if (hasWorkflow) { %>│ └── agents/workflow/ # Workflow patterns
97
+ <% } %>├── tests/ # Jest tests
98
+ ├── .env # Your API keys (DO NOT COMMIT)
99
+ ├── .env.example # Example environment file
100
+ ├── package.json # Dependencies and scripts
101
+ └── tsconfig.json # TypeScript configuration
102
+ ```
103
+
104
+ ## 🤖 Available Agents
105
+
106
+ <% if (hasBasic) { %>### Basic Agent
107
+ Simple agent that demonstrates:
108
+ - ✅ FunctionTool creation with Zod validation
109
+ - ✅ Time-based tool (get current time in any timezone)
110
+ - ✅ Basic agent configuration
111
+
112
+ **Run:** `npm run dev` or access via ADK Web UI
113
+ <% } %>
114
+ <% if (hasMultiTool) { %>### Multi-Tool Agent
115
+ Demonstrates multiple tools:
116
+ - 🕐 Time tool (get current time)
117
+ - 🌤️ Weather tool (get weather for location)
118
+ - 🧮 Calculator tool (mathematical calculations)
119
+
120
+ **Run:** `npm run adk:run` and select this agent
121
+ <% } %>
122
+ <% if (hasTeam) { %>### Team Agent
123
+ Multi-agent coordination:
124
+ - 👋 Greeting agent (personalized hellos in multiple languages)
125
+ - 👋 Farewell agent (personalized goodbyes in multiple languages)
126
+ - 🎯 Root agent (coordinates and delegates tasks)
127
+
128
+ **Run via ADK Web UI** to see agent delegation in action
129
+ <% } %>
130
+ <% if (hasStreaming) { %>### Streaming Agent
131
+ Demonstrates streaming responses:
132
+ - 📡 Real-time response streaming with Live API
133
+ - 🔄 Long-running operation simulation
134
+ - 💬 Detailed step-by-step explanations
135
+
136
+ **Best experienced in ADK Web UI** with streaming enabled
137
+ <% } %>
138
+ <% if (hasWorkflow) { %>### Workflow Agent
139
+ Sequential workflow patterns:
140
+ - ✅ Input validation step
141
+ - 🔄 Data transformation step
142
+ - 💾 Result saving step
143
+
144
+ **Run with:** `npm run adk:run` to see ordered execution
145
+ <% } %>
146
+
147
+ ## 🛠️ Development Workflow
148
+
149
+ ### Development Mode (tsx watch)
150
+
151
+ ```bash
152
+ npm run dev
153
+ ```
154
+
155
+ **Why tsx?**
156
+ - ⚡ Instant start (no build step)
157
+ - 🔥 Hot reload on file changes
158
+ - 🐛 Better error messages
159
+ - 📦 Zero configuration
160
+
161
+ ### Build for Production
162
+
163
+ ```bash
164
+ npm run build # Compile TypeScript to dist/
165
+ npm run prod # Run compiled JavaScript
166
+ ```
167
+
168
+ ### Testing
169
+
170
+ ```bash
171
+ npm test # Run all tests
172
+ npm run test:watch # Watch mode
173
+ ```
174
+
175
+ ### Code Quality
176
+
177
+ ```bash
178
+ npm run lint # Check for issues
179
+ npm run format # Format with Prettier
180
+ ```
181
+
182
+ ## 🎯 Model Configuration
183
+
184
+ Your project is configured to use:
185
+ - **Provider:** <%= modelProvider %>
186
+ - **Model:** <%= model %>
187
+
188
+ ### Change Models
189
+
190
+ Edit your agent files to use different models:
191
+
192
+ <% if (modelProvider === 'gemini') { %>```typescript
193
+ // Direct model string for Gemini
194
+ model: 'gemini-3.0-flash', // Fast responses
195
+ model: 'gemini-3.0-pro', // Best quality
196
+ model: 'gemini-2.5-flash', // Latest Flash
197
+ ```
198
+ <% } else if (modelProvider === 'openai') { %>```typescript
199
+ import { LiteLlm } from '@google/adk';
200
+
201
+ // Use LiteLLM wrapper for OpenAI
202
+ model: new LiteLlm({ model: 'openai/gpt-4o' }),
203
+ model: new LiteLlm({ model: 'openai/gpt-4o-mini' }),
204
+ ```
205
+ <% } else if (modelProvider === 'anthropic') { %>```typescript
206
+ import { LiteLlm } from '@google/adk';
207
+
208
+ // Use LiteLLM wrapper for Anthropic
209
+ model: new LiteLlm({ model: 'anthropic/claude-3-5-sonnet' }),
210
+ model: new LiteLlm({ model: 'anthropic/claude-3-5-haiku' }),
211
+ ```
212
+ <% } %>
213
+
214
+ ## 📚 Learn More
215
+
216
+ - [ADK Documentation](https://google.github.io/adk-docs/)
217
+ - [TypeScript Quick Start](https://google.github.io/adk-docs/get-started/typescript/)
218
+ - [Agent Tutorial](https://google.github.io/adk-docs/tutorial/agent/)
219
+ - [API Reference](https://google.github.io/adk-docs/api/)
220
+
221
+ ## 🐛 Troubleshooting
222
+
223
+ ### "GEMINI_API_KEY is required" error
224
+ - Make sure `.env` file exists and contains your API key
225
+ - Key format: `GEMINI_API_KEY=your_key_here` (no quotes)
226
+ - Restart your development server after editing `.env`
227
+
228
+ ### Module not found errors
229
+ - Run `npm install` to ensure all dependencies are installed
230
+ - Check that you're using `.js` extensions in imports (required for ESM)
231
+
232
+ ### TypeScript errors
233
+ - Run `npm run build` to see detailed compilation errors
234
+ - Check `tsconfig.json` has `verbatimModuleSyntax: false`
235
+
236
+ ### Tool execution errors
237
+ - Verify your tool's Zod schema matches the parameters
238
+ - Check the tool's `execute` function returns `{ status, report }`
239
+
240
+ ## 📄 License
241
+
242
+ MIT
@@ -0,0 +1,7 @@
1
+ export default {
2
+ preset: 'ts-jest',
3
+ testEnvironment: 'node',
4
+ roots: ['<rootDir>/tests'],
5
+ testMatch: ['**/*.test.ts'],
6
+ moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
7
+ };
@@ -0,0 +1,39 @@
1
+ {
2
+ "name": "<%= projectName %>",
3
+ "version": "1.0.0",
4
+ "description": "<%= description %>",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "scripts": {
8
+ "dev": "tsx watch src/index.ts",
9
+ "dev:agent": "tsx watch src/agents/multi-tool/agent.ts",
10
+ "start": "tsx src/index.ts",
11
+ "build": "tsc",
12
+ "prod": "node dist/index.js",
13
+ "test": "jest",
14
+ "test:watch": "jest --watch",
15
+ "lint": "eslint src/**/*.ts",
16
+ "format": "prettier --write src/**/*.ts",
17
+ "adk:web": "adk web",
18
+ "adk:run": "adk run src/agents/multi-tool/agent.ts"
19
+ },
20
+ "keywords": ["adk", "agent", "ai", "llm"],
21
+ "license": "MIT",
22
+ "dependencies": {
23
+ "@google/adk": "^0.2.0",
24
+ "@google/adk-devtools": "^0.2.0",
25
+ "dotenv": "^16.4.0",
26
+ "zod": "^3.23.0"
27
+ },
28
+ "devDependencies": {
29
+ "@types/node": "^20.10.0",
30
+ "@typescript-eslint/eslint-plugin": "^7.0.0",
31
+ "@typescript-eslint/parser": "^7.0.0",
32
+ "eslint": "^8.56.0",
33
+ "jest": "^29.7.0",
34
+ "prettier": "^3.2.0",
35
+ "ts-jest": "^29.1.0",
36
+ "tsx": "^4.7.0",
37
+ "typescript": "^5.9.3"
38
+ }
39
+ }
@@ -0,0 +1,34 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool to get current time
6
+ const getCurrentTime = new FunctionTool({
7
+ name: 'get_current_time',
8
+ description: 'Returns the current time in the specified timezone.',
9
+ parameters: z.object({
10
+ timezone: z
11
+ .string()
12
+ .optional()
13
+ .describe('The timezone to use (e.g., "America/New_York", "Europe/London")'),
14
+ }),
15
+ execute: ({ timezone = 'UTC' }) => {
16
+ const now = new Date();
17
+ const timeString = now.toLocaleString('en-US', { timeZone: timezone });
18
+ return {
19
+ status: 'success',
20
+ report: `The current time in ${timezone} is ${timeString}`,
21
+ };
22
+ },
23
+ });
24
+
25
+ // Create the agent
26
+ export const rootAgent = new LlmAgent({
27
+ name: 'hello_time_agent',
28
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
29
+ description: 'An agent that can tell you the current time in any timezone.',
30
+ instruction: `You are a friendly time-telling assistant. When asked about the time,
31
+ use the get_current_time tool to provide the current time in the requested timezone.
32
+ If no timezone is specified, use UTC as the default.`,
33
+ tools: [getCurrentTime],
34
+ });
@@ -0,0 +1,83 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool to get current time
6
+ const getCurrentTime = new FunctionTool({
7
+ name: 'get_current_time',
8
+ description: 'Returns the current time in the specified timezone.',
9
+ parameters: z.object({
10
+ timezone: z
11
+ .string()
12
+ .optional()
13
+ .describe('The timezone to use (e.g., "America/New_York", "Europe/London")'),
14
+ }),
15
+ execute: ({ timezone = 'UTC' }) => {
16
+ const now = new Date();
17
+ const timeString = now.toLocaleString('en-US', { timeZone: timezone });
18
+ return {
19
+ status: 'success',
20
+ report: `The current time in ${timezone} is ${timeString}`,
21
+ };
22
+ },
23
+ });
24
+
25
+ // Tool to get weather (mock data for demo)
26
+ const getWeather = new FunctionTool({
27
+ name: 'get_weather',
28
+ description: 'Returns the current weather for a given location.',
29
+ parameters: z.object({
30
+ location: z.string().describe('The city or location to get weather for'),
31
+ }),
32
+ execute: ({ location }) => {
33
+ // Mock weather data - in production, call a real weather API
34
+ const conditions = ['sunny', 'cloudy', 'rainy', 'partly cloudy'];
35
+ const condition = conditions[Math.floor(Math.random() * conditions.length)];
36
+ const temp = Math.floor(Math.random() * 30) + 50; // 50-80°F
37
+
38
+ return {
39
+ status: 'success',
40
+ report: `The weather in ${location} is ${condition} with a temperature of ${temp}°F`,
41
+ };
42
+ },
43
+ });
44
+
45
+ // Tool for calculations
46
+ const calculate = new FunctionTool({
47
+ name: 'calculate',
48
+ description: 'Performs basic mathematical calculations.',
49
+ parameters: z.object({
50
+ expression: z
51
+ .string()
52
+ .describe('The mathematical expression to evaluate (e.g., "2 + 2", "10 * 5")'),
53
+ }),
54
+ execute: ({ expression }) => {
55
+ try {
56
+ // Simple eval for demo - in production, use a safe math parser
57
+ const result = eval(expression);
58
+ return {
59
+ status: 'success',
60
+ report: `${expression} = ${result}`,
61
+ };
62
+ } catch (error) {
63
+ return {
64
+ status: 'error',
65
+ report: `Error evaluating expression: ${error}`,
66
+ };
67
+ }
68
+ },
69
+ });
70
+
71
+ // Create the agent with multiple tools
72
+ export const rootAgent = new LlmAgent({
73
+ name: 'multi_tool_agent',
74
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
75
+ description: 'An agent with multiple tools for time, weather, and calculations.',
76
+ instruction: `You are a helpful assistant with access to multiple tools:
77
+ - get_current_time: Get the current time in any timezone
78
+ - get_weather: Get weather information for a location
79
+ - calculate: Perform mathematical calculations
80
+
81
+ Use the appropriate tool based on the user's request. Be friendly and helpful.`,
82
+ tools: [getCurrentTime, getWeather, calculate],
83
+ });
@@ -0,0 +1,36 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool that simulates a long-running operation
6
+ const processData = new FunctionTool({
7
+ name: 'process_data',
8
+ description: 'Processes data and returns the result.',
9
+ parameters: z.object({
10
+ data: z.string().describe('The data to process'),
11
+ }),
12
+ execute: ({ data }) => {
13
+ // Simulate processing
14
+ const processedData = data.split('').reverse().join('');
15
+ return {
16
+ status: 'success',
17
+ report: `Processed data: ${processedData}`,
18
+ };
19
+ },
20
+ });
21
+
22
+ // Streaming agent - uses Live API for streaming responses
23
+ export const rootAgent = new LlmAgent({
24
+ name: 'streaming_agent',
25
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
26
+ description: 'An agent that demonstrates streaming responses.',
27
+ instruction: `You are a helpful assistant that provides streaming responses.
28
+ When processing data, use the process_data tool and explain what you're doing.
29
+ Provide detailed, step-by-step explanations to demonstrate streaming.`,
30
+ tools: [processData],
31
+ });
32
+
33
+ // Note: To enable streaming in your application, use the ADK Live API:
34
+ // - For CLI: Use 'adk run' with --stream flag
35
+ // - For web: ADK DevTools UI has streaming enabled by default
36
+ // - For programmatic use: Use agent.run() with streaming options
@@ -0,0 +1,43 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool for saying goodbye
6
+ const sayGoodbye = new FunctionTool({
7
+ name: 'say_goodbye',
8
+ description: 'Creates a personalized farewell message.',
9
+ parameters: z.object({
10
+ name: z.string().describe('The name of the person to bid farewell'),
11
+ language: z
12
+ .string()
13
+ .optional()
14
+ .describe('The language for the farewell (e.g., "english", "spanish", "french")'),
15
+ }),
16
+ execute: ({ name, language = 'english' }) => {
17
+ const farewells: Record<string, string> = {
18
+ english: `Goodbye ${name}! Have a great day!`,
19
+ spanish: `¡Adiós ${name}! ¡Que tengas un gran día!`,
20
+ french: `Au revoir ${name}! Bonne journée!`,
21
+ german: `Auf Wiedersehen ${name}! Schönen Tag noch!`,
22
+ italian: `Arrivederci ${name}! Buona giornata!`,
23
+ };
24
+
25
+ const farewell = farewells[language.toLowerCase()] || farewells.english;
26
+ return {
27
+ status: 'success',
28
+ report: farewell,
29
+ };
30
+ },
31
+ });
32
+
33
+ // Farewell agent
34
+ export const farewellAgent = new LlmAgent({
35
+ name: 'farewell_agent',
36
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
37
+ description: 'Agent specialized in creating personalized farewells.',
38
+ instruction: `You are a friendly farewell specialist. When asked to say goodbye:
39
+ 1. Use the say_goodbye tool to create a personalized farewell
40
+ 2. If a language preference is mentioned, use that language
41
+ 3. Add a warm, positive message wishing them well`,
42
+ tools: [sayGoodbye],
43
+ });
@@ -0,0 +1,43 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Tool for saying hello
6
+ const sayHello = new FunctionTool({
7
+ name: 'say_hello',
8
+ description: 'Creates a personalized greeting message.',
9
+ parameters: z.object({
10
+ name: z.string().describe('The name of the person to greet'),
11
+ language: z
12
+ .string()
13
+ .optional()
14
+ .describe('The language for the greeting (e.g., "english", "spanish", "french")'),
15
+ }),
16
+ execute: ({ name, language = 'english' }) => {
17
+ const greetings: Record<string, string> = {
18
+ english: `Hello ${name}! Welcome!`,
19
+ spanish: `¡Hola ${name}! ¡Bienvenido!`,
20
+ french: `Bonjour ${name}! Bienvenue!`,
21
+ german: `Hallo ${name}! Willkommen!`,
22
+ italian: `Ciao ${name}! Benvenuto!`,
23
+ };
24
+
25
+ const greeting = greetings[language.toLowerCase()] || greetings.english;
26
+ return {
27
+ status: 'success',
28
+ report: greeting,
29
+ };
30
+ },
31
+ });
32
+
33
+ // Greeting agent
34
+ export const greetingAgent = new LlmAgent({
35
+ name: 'greeting_agent',
36
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
37
+ description: 'Agent specialized in creating personalized greetings.',
38
+ instruction: `You are a friendly greeting specialist. When asked to greet someone:
39
+ 1. Use the say_hello tool to create a personalized greeting
40
+ 2. If a language preference is mentioned, use that language
41
+ 3. Add a warm, welcoming message after the greeting`,
42
+ tools: [sayHello],
43
+ });
@@ -0,0 +1,18 @@
1
+ import { LlmAgent } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { greetingAgent } from './greeting-agent.js';
4
+ import { farewellAgent } from './farewell-agent.js';
5
+
6
+ // Root agent that coordinates between greeting and farewell agents
7
+ export const rootAgent = new LlmAgent({
8
+ name: 'team_root_agent',
9
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
10
+ description: 'Root agent that coordinates greeting and farewell tasks.',
11
+ instruction: `You are the coordinator of a team of agents. Your job is to delegate tasks:
12
+ - For greetings, hellos, or welcome messages, delegate to the greeting_agent
13
+ - For farewells, goodbyes, or departure messages, delegate to the farewell_agent
14
+ - You can delegate to multiple agents if the user wants both a greeting and farewell
15
+
16
+ Always provide a friendly introduction before delegating.`,
17
+ agents: [greetingAgent, farewellAgent],
18
+ });
@@ -0,0 +1,69 @@
1
+ import { LlmAgent, FunctionTool } from '@google/adk';<% if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>
2
+ import { LiteLlm } from '@google/adk';<% } %>
3
+ import { z } from 'zod';
4
+
5
+ // Step 1: Data validation
6
+ const validateInput = new FunctionTool({
7
+ name: 'validate_input',
8
+ description: 'Validates input data for processing.',
9
+ parameters: z.object({
10
+ input: z.string().describe('The input data to validate'),
11
+ }),
12
+ execute: ({ input }) => {
13
+ const isValid = input.length > 0 && input.length < 1000;
14
+ return {
15
+ status: isValid ? 'success' : 'error',
16
+ report: isValid
17
+ ? `Input is valid (${input.length} characters)`
18
+ : 'Input is invalid (too long or empty)',
19
+ };
20
+ },
21
+ });
22
+
23
+ // Step 2: Data transformation
24
+ const transformData = new FunctionTool({
25
+ name: 'transform_data',
26
+ description: 'Transforms validated data.',
27
+ parameters: z.object({
28
+ data: z.string().describe('The data to transform'),
29
+ }),
30
+ execute: ({ data }) => {
31
+ const transformed = data.toUpperCase().split('').reverse().join('');
32
+ return {
33
+ status: 'success',
34
+ report: `Transformed: ${transformed}`,
35
+ };
36
+ },
37
+ });
38
+
39
+ // Step 3: Save result
40
+ const saveResult = new FunctionTool({
41
+ name: 'save_result',
42
+ description: 'Saves the processed result.',
43
+ parameters: z.object({
44
+ result: z.string().describe('The result to save'),
45
+ }),
46
+ execute: ({ result }) => {
47
+ // In a real application, save to database or file
48
+ return {
49
+ status: 'success',
50
+ report: `Result saved: ${result}`,
51
+ };
52
+ },
53
+ });
54
+
55
+ // Workflow agent - demonstrates sequential processing
56
+ export const rootAgent = new LlmAgent({
57
+ name: 'workflow_agent',
58
+ <% if (modelProvider === 'gemini') { %>model: '<%= model %>',<% } else if (modelProvider === 'openai' || modelProvider === 'anthropic') { %>model: new LiteLlm({ model: '<%= model %>' }),<% } else { %>model: '<%= model %>',<% } %>
59
+ description: 'An agent that demonstrates workflow patterns.',
60
+ instruction: `You are a workflow orchestration assistant. When given data to process:
61
+
62
+ 1. VALIDATE: First use validate_input to check if the input is valid
63
+ 2. TRANSFORM: If valid, use transform_data to process it
64
+ 3. SAVE: Finally use save_result to save the processed data
65
+
66
+ Always follow this sequence. Report the status after each step.
67
+ If validation fails, do not proceed to transformation.`,
68
+ tools: [validateInput, transformData, saveResult],
69
+ });