create-langgraph-app 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +38 -0
- package/dist/create.d.ts +2 -0
- package/dist/create.js +664 -0
- package/package.json +37 -0
package/README.md
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# create-langgraph-app
|
|
2
|
+
|
|
3
|
+
Interactive CLI to scaffold a new LangGraph multi-agent project in seconds.
|
|
4
|
+
|
|
5
|
+
## Usage
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npx create-langgraph-app
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## What it does
|
|
12
|
+
|
|
13
|
+
1. Asks for your project name
|
|
14
|
+
2. Lets you pick an LLM provider (OpenAI, Anthropic, Google, Groq, Ollama)
|
|
15
|
+
3. Lets you select which agent patterns to include:
|
|
16
|
+
- **Supervisor** — central coordinator + worker agents
|
|
17
|
+
- **Swarm** — peer-to-peer agent handoffs
|
|
18
|
+
- **Human-in-the-Loop** — approval before dangerous actions
|
|
19
|
+
- **Structured Output** — typed JSON responses
|
|
20
|
+
- **RAG** — retrieval-augmented generation
|
|
21
|
+
4. Generates a ready-to-run project with only the patterns you selected
|
|
22
|
+
5. Installs dependencies
|
|
23
|
+
|
|
24
|
+
## Development
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
cd create-langgraph-app
|
|
28
|
+
npm install
|
|
29
|
+
npm run dev # Run locally
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Publishing
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
cd create-langgraph-app
|
|
36
|
+
npm run build
|
|
37
|
+
npm publish
|
|
38
|
+
```
|
package/dist/create.d.ts
ADDED
package/dist/create.js
ADDED
|
@@ -0,0 +1,664 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import * as readline from "node:readline/promises";
|
|
3
|
+
import { stdin, stdout } from "node:process";
|
|
4
|
+
import * as fs from "node:fs";
|
|
5
|
+
import * as path from "node:path";
|
|
6
|
+
import { execSync } from "node:child_process";
|
|
7
|
+
// ── Helpers ──────────────────────────────────────────────────────────────────
|
|
8
|
+
const BOLD = "\x1b[1m";
|
|
9
|
+
const DIM = "\x1b[2m";
|
|
10
|
+
const GREEN = "\x1b[32m";
|
|
11
|
+
const CYAN = "\x1b[36m";
|
|
12
|
+
const YELLOW = "\x1b[33m";
|
|
13
|
+
const RESET = "\x1b[0m";
|
|
14
|
+
function banner() {
|
|
15
|
+
console.log(`
|
|
16
|
+
${BOLD}${CYAN} ╔═══════════════════════════════════════╗
|
|
17
|
+
║ create-langgraph-app ║
|
|
18
|
+
║ Multi-agent starter in seconds ║
|
|
19
|
+
╚═══════════════════════════════════════╝${RESET}
|
|
20
|
+
`);
|
|
21
|
+
}
|
|
22
|
+
async function ask(rl, question, defaultVal) {
|
|
23
|
+
const suffix = defaultVal ? ` ${DIM}(${defaultVal})${RESET}` : "";
|
|
24
|
+
const answer = await rl.question(`${BOLD}${question}${suffix}: ${RESET}`);
|
|
25
|
+
return answer.trim() || defaultVal || "";
|
|
26
|
+
}
|
|
27
|
+
async function choose(rl, question, options, multi = false) {
|
|
28
|
+
console.log(`\n${BOLD}${question}${RESET}`);
|
|
29
|
+
for (let i = 0; i < options.length; i++) {
|
|
30
|
+
console.log(` ${DIM}${i + 1}.${RESET} ${options[i].label}`);
|
|
31
|
+
}
|
|
32
|
+
if (multi) {
|
|
33
|
+
const answer = await rl.question(`${DIM}Enter numbers separated by commas (e.g. 1,2,3): ${RESET}`);
|
|
34
|
+
const indices = answer
|
|
35
|
+
.split(",")
|
|
36
|
+
.map((s) => parseInt(s.trim(), 10) - 1)
|
|
37
|
+
.filter((i) => i >= 0 && i < options.length);
|
|
38
|
+
return indices.length > 0
|
|
39
|
+
? indices.map((i) => options[i].value)
|
|
40
|
+
: options.map((o) => o.value);
|
|
41
|
+
}
|
|
42
|
+
else {
|
|
43
|
+
const answer = await rl.question(`${DIM}Enter number: ${RESET}`);
|
|
44
|
+
const idx = parseInt(answer.trim(), 10) - 1;
|
|
45
|
+
return [options[idx]?.value ?? options[0].value];
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
// ── Templates ────────────────────────────────────────────────────────────────
|
|
49
|
+
const PROVIDERS = {
|
|
50
|
+
openai: { envKey: "OPENAI_API_KEY", defaultModel: "gpt-4o-mini" },
|
|
51
|
+
anthropic: { envKey: "ANTHROPIC_API_KEY", defaultModel: "claude-sonnet-4-20250514" },
|
|
52
|
+
google: { envKey: "GOOGLE_API_KEY", defaultModel: "gemini-2.0-flash" },
|
|
53
|
+
groq: { envKey: "GROQ_API_KEY", defaultModel: "llama-3.3-70b-versatile" },
|
|
54
|
+
ollama: { envKey: "", defaultModel: "llama3.2" },
|
|
55
|
+
};
|
|
56
|
+
function generateEnv(config) {
|
|
57
|
+
const prov = PROVIDERS[config.provider];
|
|
58
|
+
const lines = [
|
|
59
|
+
`# LLM Provider`,
|
|
60
|
+
`LLM_PROVIDER=${config.provider}`,
|
|
61
|
+
``,
|
|
62
|
+
`# API key`,
|
|
63
|
+
];
|
|
64
|
+
if (prov.envKey) {
|
|
65
|
+
lines.push(`${prov.envKey}=`);
|
|
66
|
+
}
|
|
67
|
+
else {
|
|
68
|
+
lines.push(`# No API key needed for ${config.provider}`);
|
|
69
|
+
}
|
|
70
|
+
lines.push(``, `# Model (optional — defaults to ${prov.defaultModel})`, `# LLM_MODEL=${prov.defaultModel}`, `LLM_TEMPERATURE=0`, ``, `PORT=3000`, ``, `# LangSmith tracing (optional)`, `# LANGCHAIN_TRACING_V2=true`, `# LANGSMITH_API_KEY=`, `# LANGSMITH_PROJECT=${config.name}`);
|
|
71
|
+
return lines.join("\n") + "\n";
|
|
72
|
+
}
|
|
73
|
+
function generatePackageJson(config) {
|
|
74
|
+
const deps = {
|
|
75
|
+
"@langchain/core": "^1.1.39",
|
|
76
|
+
"@langchain/langgraph": "^1.2.7",
|
|
77
|
+
"@langchain/langgraph-supervisor": "^1.0.1",
|
|
78
|
+
"@langchain/mcp-adapters": "^1.1.3",
|
|
79
|
+
dotenv: "^17.4.0",
|
|
80
|
+
fastify: "^5.8.4",
|
|
81
|
+
langchain: "^1.3.0",
|
|
82
|
+
zod: "^4.3.6",
|
|
83
|
+
};
|
|
84
|
+
if (config.patterns.includes("swarm")) {
|
|
85
|
+
deps["@langchain/langgraph-swarm"] = "^1.0.1";
|
|
86
|
+
}
|
|
87
|
+
if (config.patterns.includes("rag")) {
|
|
88
|
+
deps["@langchain/textsplitters"] = "^1.0.1";
|
|
89
|
+
}
|
|
90
|
+
// Provider package
|
|
91
|
+
const provPkg = {
|
|
92
|
+
openai: "@langchain/openai",
|
|
93
|
+
anthropic: "@langchain/anthropic",
|
|
94
|
+
google: "@langchain/google-genai",
|
|
95
|
+
groq: "@langchain/groq",
|
|
96
|
+
ollama: "@langchain/ollama",
|
|
97
|
+
};
|
|
98
|
+
deps[provPkg[config.provider]] = "latest";
|
|
99
|
+
const pkg = {
|
|
100
|
+
name: config.name,
|
|
101
|
+
version: "0.1.0",
|
|
102
|
+
private: true,
|
|
103
|
+
type: "module",
|
|
104
|
+
scripts: {
|
|
105
|
+
dev: "tsx src/index.ts",
|
|
106
|
+
"dev:http": "tsx src/server.ts",
|
|
107
|
+
test: "vitest run",
|
|
108
|
+
typecheck: "tsc --noEmit",
|
|
109
|
+
},
|
|
110
|
+
dependencies: Object.fromEntries(Object.entries(deps).sort(([a], [b]) => a.localeCompare(b))),
|
|
111
|
+
devDependencies: {
|
|
112
|
+
"@types/node": "^25.5.2",
|
|
113
|
+
tsx: "^4.21.0",
|
|
114
|
+
typescript: "^6.0.2",
|
|
115
|
+
vitest: "^4.1.2",
|
|
116
|
+
},
|
|
117
|
+
};
|
|
118
|
+
return JSON.stringify(pkg, null, 2) + "\n";
|
|
119
|
+
}
|
|
120
|
+
function generateTsConfig() {
|
|
121
|
+
return JSON.stringify({
|
|
122
|
+
compilerOptions: {
|
|
123
|
+
target: "ES2022",
|
|
124
|
+
module: "ESNext",
|
|
125
|
+
moduleResolution: "Bundler",
|
|
126
|
+
strict: true,
|
|
127
|
+
esModuleInterop: true,
|
|
128
|
+
skipLibCheck: true,
|
|
129
|
+
resolveJsonModule: true,
|
|
130
|
+
types: ["node"],
|
|
131
|
+
outDir: "dist",
|
|
132
|
+
},
|
|
133
|
+
include: ["src"],
|
|
134
|
+
}, null, 2) + "\n";
|
|
135
|
+
}
|
|
136
|
+
function generateEnvConfig(config) {
|
|
137
|
+
return `import "dotenv/config";
|
|
138
|
+
|
|
139
|
+
const VALID_PROVIDERS = ["openai", "anthropic", "google", "groq", "ollama"] as const;
|
|
140
|
+
export type LlmProvider = (typeof VALID_PROVIDERS)[number];
|
|
141
|
+
|
|
142
|
+
function resolveProvider(): LlmProvider {
|
|
143
|
+
const raw = (process.env.LLM_PROVIDER || "openai").toLowerCase();
|
|
144
|
+
if (!VALID_PROVIDERS.includes(raw as LlmProvider)) {
|
|
145
|
+
throw new Error(\`Invalid LLM_PROVIDER "\${raw}". Must be one of: \${VALID_PROVIDERS.join(", ")}\`);
|
|
146
|
+
}
|
|
147
|
+
return raw as LlmProvider;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
const API_KEY_MAP: Record<LlmProvider, string> = {
|
|
151
|
+
openai: "OPENAI_API_KEY",
|
|
152
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
153
|
+
google: "GOOGLE_API_KEY",
|
|
154
|
+
groq: "GROQ_API_KEY",
|
|
155
|
+
ollama: "",
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
export const LLM_PROVIDER = resolveProvider();
|
|
159
|
+
|
|
160
|
+
const requiredKey = API_KEY_MAP[LLM_PROVIDER];
|
|
161
|
+
if (requiredKey && !process.env[requiredKey]) {
|
|
162
|
+
throw new Error(\`\${requiredKey} is required for provider "\${LLM_PROVIDER}" but not set in .env\`);
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
export const LLM_MODEL = process.env.LLM_MODEL || undefined;
|
|
166
|
+
export const LLM_TEMPERATURE = Number(process.env.LLM_TEMPERATURE ?? 0);
|
|
167
|
+
export const PORT = Number(process.env.PORT ?? 3000);
|
|
168
|
+
`;
|
|
169
|
+
}
|
|
170
|
+
function generateLlmConfig() {
|
|
171
|
+
return `import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
172
|
+
import { LLM_PROVIDER, LLM_MODEL, LLM_TEMPERATURE } from "./env";
|
|
173
|
+
|
|
174
|
+
const DEFAULTS: Record<string, string> = {
|
|
175
|
+
openai: "gpt-4o-mini",
|
|
176
|
+
anthropic: "claude-sonnet-4-20250514",
|
|
177
|
+
google: "gemini-2.0-flash",
|
|
178
|
+
groq: "llama-3.3-70b-versatile",
|
|
179
|
+
ollama: "llama3.2",
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
function createLlm(): BaseChatModel {
|
|
183
|
+
const model = LLM_MODEL ?? DEFAULTS[LLM_PROVIDER] ?? DEFAULTS.openai;
|
|
184
|
+
const temperature = LLM_TEMPERATURE;
|
|
185
|
+
|
|
186
|
+
switch (LLM_PROVIDER) {
|
|
187
|
+
case "anthropic": {
|
|
188
|
+
const { ChatAnthropic } = require("@langchain/anthropic");
|
|
189
|
+
return new ChatAnthropic({ modelName: model, temperature });
|
|
190
|
+
}
|
|
191
|
+
case "google": {
|
|
192
|
+
const { ChatGoogleGenerativeAI } = require("@langchain/google-genai");
|
|
193
|
+
return new ChatGoogleGenerativeAI({ modelName: model, temperature });
|
|
194
|
+
}
|
|
195
|
+
case "groq": {
|
|
196
|
+
const { ChatGroq } = require("@langchain/groq");
|
|
197
|
+
return new ChatGroq({ modelName: model, temperature });
|
|
198
|
+
}
|
|
199
|
+
case "ollama": {
|
|
200
|
+
const { ChatOllama } = require("@langchain/ollama");
|
|
201
|
+
return new ChatOllama({ model, temperature });
|
|
202
|
+
}
|
|
203
|
+
default: {
|
|
204
|
+
const { ChatOpenAI } = require("@langchain/openai");
|
|
205
|
+
return new ChatOpenAI({ modelName: model, temperature });
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
export const llm = createLlm();
|
|
211
|
+
`;
|
|
212
|
+
}
|
|
213
|
+
function generateAgentFactory() {
|
|
214
|
+
return `import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
215
|
+
import type { StructuredToolInterface } from "@langchain/core/tools";
|
|
216
|
+
import { createReactAgent, type CreateReactAgentParams } from "@langchain/langgraph/prebuilt";
|
|
217
|
+
|
|
218
|
+
export interface MakeAgentParams {
|
|
219
|
+
name: string;
|
|
220
|
+
llm: BaseChatModel;
|
|
221
|
+
tools?: StructuredToolInterface[];
|
|
222
|
+
system?: string;
|
|
223
|
+
responseFormat?: CreateReactAgentParams["responseFormat"];
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
export function makeAgent({ name, llm, tools = [], system, responseFormat }: MakeAgentParams) {
|
|
227
|
+
return createReactAgent({
|
|
228
|
+
name,
|
|
229
|
+
llm,
|
|
230
|
+
tools,
|
|
231
|
+
...(system ? { prompt: system } : {}),
|
|
232
|
+
...(responseFormat ? { responseFormat } : {}),
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
`;
|
|
236
|
+
}
|
|
237
|
+
function generateSupervisorHelper() {
|
|
238
|
+
return `import type { BaseChatModel } from "@langchain/core/language_models/chat_models";
|
|
239
|
+
import type { BaseCheckpointSaver, BaseStore } from "@langchain/langgraph-checkpoint";
|
|
240
|
+
import { createSupervisor } from "@langchain/langgraph-supervisor";
|
|
241
|
+
import { MemorySaver, InMemoryStore } from "@langchain/langgraph";
|
|
242
|
+
|
|
243
|
+
type SupervisorParams = Parameters<typeof createSupervisor>[0];
|
|
244
|
+
|
|
245
|
+
export interface MakeSupervisorParams extends SupervisorParams {
|
|
246
|
+
checkpointer?: BaseCheckpointSaver;
|
|
247
|
+
store?: BaseStore;
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
export function makeSupervisor({ checkpointer, store, ...params }: MakeSupervisorParams) {
|
|
251
|
+
return createSupervisor(params).compile({
|
|
252
|
+
checkpointer: checkpointer ?? new MemorySaver(),
|
|
253
|
+
store: store ?? new InMemoryStore(),
|
|
254
|
+
});
|
|
255
|
+
}
|
|
256
|
+
`;
|
|
257
|
+
}
|
|
258
|
+
function generateSupervisorApp() {
|
|
259
|
+
return `import { z } from "zod";
|
|
260
|
+
import { tool } from "@langchain/core/tools";
|
|
261
|
+
import { llm } from "./config/env";
|
|
262
|
+
import { makeAgent } from "./agents/factory";
|
|
263
|
+
import { makeSupervisor } from "./agents/supervisor";
|
|
264
|
+
|
|
265
|
+
const add = tool(async ({ a, b }) => String(a + b), {
|
|
266
|
+
name: "add", description: "Add two numbers",
|
|
267
|
+
schema: z.object({ a: z.number(), b: z.number() }),
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
const multiply = tool(async ({ a, b }) => String(a * b), {
|
|
271
|
+
name: "multiply", description: "Multiply two numbers",
|
|
272
|
+
schema: z.object({ a: z.number(), b: z.number() }),
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
export function createApp() {
|
|
276
|
+
const math = makeAgent({
|
|
277
|
+
name: "math_expert", llm,
|
|
278
|
+
tools: [add, multiply],
|
|
279
|
+
system: "You are a math expert. Use tools to compute answers.",
|
|
280
|
+
});
|
|
281
|
+
|
|
282
|
+
const writer = makeAgent({
|
|
283
|
+
name: "writer", llm, tools: [],
|
|
284
|
+
system: "You write crisp, structured answers.",
|
|
285
|
+
});
|
|
286
|
+
|
|
287
|
+
return makeSupervisor({
|
|
288
|
+
agents: [math, writer], llm,
|
|
289
|
+
outputMode: "last_message",
|
|
290
|
+
supervisorName: "supervisor",
|
|
291
|
+
});
|
|
292
|
+
}
|
|
293
|
+
`;
|
|
294
|
+
}
|
|
295
|
+
// Pattern-specific: only files for selected patterns are generated
|
|
296
|
+
function getPatternFiles(config) {
|
|
297
|
+
const files = [];
|
|
298
|
+
// Always include core files
|
|
299
|
+
files.push({ path: "src/config/env.ts", content: generateEnvConfig(config) });
|
|
300
|
+
files.push({ path: "src/config/llm.ts", content: generateLlmConfig() });
|
|
301
|
+
files.push({ path: "src/agents/factory.ts", content: generateAgentFactory() });
|
|
302
|
+
files.push({ path: "src/agents/supervisor.ts", content: generateSupervisorHelper() });
|
|
303
|
+
// Index file — imports vary by selected patterns
|
|
304
|
+
const imports = [];
|
|
305
|
+
const demos = [];
|
|
306
|
+
if (config.patterns.includes("supervisor")) {
|
|
307
|
+
files.push({
|
|
308
|
+
path: "src/apps/supervisor.ts",
|
|
309
|
+
content: `import { llm } from "../config/llm";
|
|
310
|
+
import { z } from "zod";
|
|
311
|
+
import { tool } from "@langchain/core/tools";
|
|
312
|
+
import { makeAgent } from "../agents/factory";
|
|
313
|
+
import { makeSupervisor } from "../agents/supervisor";
|
|
314
|
+
|
|
315
|
+
const add = tool(async ({ a, b }) => String(a + b), {
|
|
316
|
+
name: "add", description: "Add two numbers",
|
|
317
|
+
schema: z.object({ a: z.number(), b: z.number() }),
|
|
318
|
+
});
|
|
319
|
+
|
|
320
|
+
export function createSupervisorApp() {
|
|
321
|
+
const math = makeAgent({
|
|
322
|
+
name: "math_expert", llm,
|
|
323
|
+
tools: [add],
|
|
324
|
+
system: "You are a math expert.",
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
const writer = makeAgent({
|
|
328
|
+
name: "writer", llm, tools: [],
|
|
329
|
+
system: "You write crisp, structured answers.",
|
|
330
|
+
});
|
|
331
|
+
|
|
332
|
+
return makeSupervisor({
|
|
333
|
+
agents: [math, writer], llm,
|
|
334
|
+
outputMode: "last_message",
|
|
335
|
+
supervisorName: "supervisor",
|
|
336
|
+
});
|
|
337
|
+
}
|
|
338
|
+
`,
|
|
339
|
+
});
|
|
340
|
+
imports.push(`import { createSupervisorApp } from "./apps/supervisor";`);
|
|
341
|
+
demos.push(` console.log("=== Supervisor Demo ===");
|
|
342
|
+
const supervisorApp = createSupervisorApp();
|
|
343
|
+
const sup = await supervisorApp.invoke(
|
|
344
|
+
{ messages: [{ role: "user", content: "What is 10 + 15?" }] },
|
|
345
|
+
{ configurable: { thread_id: "demo" } }
|
|
346
|
+
);
|
|
347
|
+
console.log("Result:", sup.messages.at(-1)?.content);`);
|
|
348
|
+
}
|
|
349
|
+
if (config.patterns.includes("swarm")) {
|
|
350
|
+
files.push({
|
|
351
|
+
path: "src/agents/swarm.ts",
|
|
352
|
+
content: `import type { BaseCheckpointSaver, BaseStore } from "@langchain/langgraph-checkpoint";
|
|
353
|
+
import { MessagesAnnotation, Annotation, MemorySaver } from "@langchain/langgraph";
|
|
354
|
+
import { createSwarm } from "@langchain/langgraph-swarm";
|
|
355
|
+
|
|
356
|
+
export const SwarmState = Annotation.Root({
|
|
357
|
+
...MessagesAnnotation.spec,
|
|
358
|
+
activeAgent: Annotation<string>(),
|
|
359
|
+
});
|
|
360
|
+
|
|
361
|
+
type SwarmParams = Parameters<typeof createSwarm>[0];
|
|
362
|
+
|
|
363
|
+
export function makeSwarm({
|
|
364
|
+
agents, defaultActiveAgent, checkpointer,
|
|
365
|
+
}: {
|
|
366
|
+
agents: SwarmParams["agents"];
|
|
367
|
+
defaultActiveAgent: string;
|
|
368
|
+
checkpointer?: BaseCheckpointSaver;
|
|
369
|
+
store?: BaseStore;
|
|
370
|
+
}) {
|
|
371
|
+
return createSwarm({ agents, defaultActiveAgent, stateSchema: SwarmState })
|
|
372
|
+
.compile({ checkpointer: checkpointer ?? new MemorySaver() });
|
|
373
|
+
}
|
|
374
|
+
`,
|
|
375
|
+
});
|
|
376
|
+
files.push({
|
|
377
|
+
path: "src/agents/handoff.ts",
|
|
378
|
+
content: `import { z } from "zod";
|
|
379
|
+
import { ToolMessage } from "@langchain/core/messages";
|
|
380
|
+
import { tool } from "@langchain/core/tools";
|
|
381
|
+
import { Command, MessagesAnnotation, getCurrentTaskInput } from "@langchain/langgraph";
|
|
382
|
+
|
|
383
|
+
export function createHandoffTool({ agentName, description }: { agentName: string; description?: string }) {
|
|
384
|
+
const toolName = \`transfer_to_\${agentName.replace(/\\s+/g, "_").toLowerCase()}\`;
|
|
385
|
+
|
|
386
|
+
return tool(
|
|
387
|
+
async (_args, cfg) => {
|
|
388
|
+
const state = getCurrentTaskInput() as (typeof MessagesAnnotation)["State"];
|
|
389
|
+
const messages = state.messages ?? [];
|
|
390
|
+
const tm = new ToolMessage({
|
|
391
|
+
content: \`Transferred to \${agentName}\`,
|
|
392
|
+
name: toolName,
|
|
393
|
+
tool_call_id: cfg.toolCall.id,
|
|
394
|
+
});
|
|
395
|
+
return new Command({
|
|
396
|
+
goto: agentName,
|
|
397
|
+
graph: Command.PARENT,
|
|
398
|
+
update: { messages: messages.concat(tm), activeAgent: agentName },
|
|
399
|
+
});
|
|
400
|
+
},
|
|
401
|
+
{ name: toolName, description: description ?? \`Ask \${agentName} for help\`, schema: z.object({}) }
|
|
402
|
+
);
|
|
403
|
+
}
|
|
404
|
+
`,
|
|
405
|
+
});
|
|
406
|
+
files.push({
|
|
407
|
+
path: "src/apps/swarm.ts",
|
|
408
|
+
content: `import { llm } from "../config/llm";
|
|
409
|
+
import { z } from "zod";
|
|
410
|
+
import { tool } from "@langchain/core/tools";
|
|
411
|
+
import { makeAgent } from "../agents/factory";
|
|
412
|
+
import { createHandoffTool } from "../agents/handoff";
|
|
413
|
+
import { makeSwarm, type SwarmState } from "../agents/swarm";
|
|
414
|
+
|
|
415
|
+
const add = tool(async ({ a, b }) => String(a + b), {
|
|
416
|
+
name: "add", description: "Add two numbers",
|
|
417
|
+
schema: z.object({ a: z.number(), b: z.number() }),
|
|
418
|
+
});
|
|
419
|
+
|
|
420
|
+
const multiply = tool(async ({ a, b }) => String(a * b), {
|
|
421
|
+
name: "multiply", description: "Multiply two numbers",
|
|
422
|
+
schema: z.object({ a: z.number(), b: z.number() }),
|
|
423
|
+
});
|
|
424
|
+
|
|
425
|
+
export function createSwarmApp() {
|
|
426
|
+
const alice = makeAgent({
|
|
427
|
+
name: "alice", llm,
|
|
428
|
+
tools: [add, createHandoffTool({ agentName: "bob" })],
|
|
429
|
+
system: "You are Alice, an addition expert.",
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
const bob = makeAgent({
|
|
433
|
+
name: "bob", llm,
|
|
434
|
+
tools: [multiply, createHandoffTool({ agentName: "alice" })],
|
|
435
|
+
system: "You are Bob, a multiplication expert.",
|
|
436
|
+
});
|
|
437
|
+
|
|
438
|
+
return makeSwarm({
|
|
439
|
+
agents: [alice, bob] as any,
|
|
440
|
+
defaultActiveAgent: "alice",
|
|
441
|
+
});
|
|
442
|
+
}
|
|
443
|
+
`,
|
|
444
|
+
});
|
|
445
|
+
imports.push(`import { createSwarmApp } from "./apps/swarm";`);
|
|
446
|
+
demos.push(` console.log("\\n=== Swarm Demo ===");
|
|
447
|
+
const swarmApp = createSwarmApp();
|
|
448
|
+
const swarm = await swarmApp.invoke(
|
|
449
|
+
{ messages: [{ role: "user", content: "add 5 and 7, then talk to bob and multiply by 3" }] },
|
|
450
|
+
{ configurable: { thread_id: "swarm-demo" } }
|
|
451
|
+
);
|
|
452
|
+
console.log("Result:", swarm.messages.at(-1)?.content);`);
|
|
453
|
+
}
|
|
454
|
+
if (config.patterns.includes("hitl")) {
|
|
455
|
+
files.push({
|
|
456
|
+
path: "src/apps/interrupt.ts",
|
|
457
|
+
content: `import { z } from "zod";
|
|
458
|
+
import { tool } from "@langchain/core/tools";
|
|
459
|
+
import { interrupt } from "@langchain/langgraph";
|
|
460
|
+
import { llm } from "../config/llm";
|
|
461
|
+
import { makeAgent } from "../agents/factory";
|
|
462
|
+
import { makeSupervisor } from "../agents/supervisor";
|
|
463
|
+
|
|
464
|
+
const deleteRecord = tool(
|
|
465
|
+
async (args) => {
|
|
466
|
+
const decision = interrupt({
|
|
467
|
+
type: "approval_required",
|
|
468
|
+
message: \`Delete record "\${args.id}"? This cannot be undone.\`,
|
|
469
|
+
args,
|
|
470
|
+
});
|
|
471
|
+
return decision === "yes"
|
|
472
|
+
? \`Record "\${args.id}" deleted.\`
|
|
473
|
+
: \`Deletion of "\${args.id}" rejected.\`;
|
|
474
|
+
},
|
|
475
|
+
{
|
|
476
|
+
name: "delete_record",
|
|
477
|
+
description: "Delete a record by ID. Requires human approval.",
|
|
478
|
+
schema: z.object({ id: z.string() }),
|
|
479
|
+
}
|
|
480
|
+
);
|
|
481
|
+
|
|
482
|
+
export function createInterruptApp() {
|
|
483
|
+
const dbAdmin = makeAgent({
|
|
484
|
+
name: "db_admin", llm,
|
|
485
|
+
tools: [deleteRecord],
|
|
486
|
+
system: "You are a database administrator.",
|
|
487
|
+
});
|
|
488
|
+
|
|
489
|
+
return makeSupervisor({
|
|
490
|
+
agents: [dbAdmin], llm,
|
|
491
|
+
outputMode: "last_message",
|
|
492
|
+
supervisorName: "interrupt_supervisor",
|
|
493
|
+
});
|
|
494
|
+
}
|
|
495
|
+
`,
|
|
496
|
+
});
|
|
497
|
+
imports.push(`import { Command } from "@langchain/langgraph";`);
|
|
498
|
+
imports.push(`import { createInterruptApp } from "./apps/interrupt";`);
|
|
499
|
+
demos.push(` console.log("\\n=== Human-in-the-Loop Demo ===");
|
|
500
|
+
const interruptApp = createInterruptApp();
|
|
501
|
+
const hitlCfg = { configurable: { thread_id: "hitl-demo" } };
|
|
502
|
+
await interruptApp.invoke(
|
|
503
|
+
{ messages: [{ role: "user", content: "delete record rec_2" }] },
|
|
504
|
+
hitlCfg
|
|
505
|
+
);
|
|
506
|
+
const state = await interruptApp.getState(hitlCfg) as any;
|
|
507
|
+
if ((state.next ?? []).length > 0) {
|
|
508
|
+
console.log("Graph paused — approving...");
|
|
509
|
+
const resumed = await interruptApp.invoke(new Command({ resume: "yes" }), hitlCfg);
|
|
510
|
+
console.log("Result:", resumed.messages.at(-1)?.content);
|
|
511
|
+
}`);
|
|
512
|
+
}
|
|
513
|
+
if (config.patterns.includes("structured")) {
|
|
514
|
+
files.push({
|
|
515
|
+
path: "src/apps/analyst.ts",
|
|
516
|
+
content: `import { z } from "zod";
|
|
517
|
+
import { llm } from "../config/llm";
|
|
518
|
+
import { makeAgent } from "../agents/factory";
|
|
519
|
+
import { makeSupervisor } from "../agents/supervisor";
|
|
520
|
+
|
|
521
|
+
const SummarySchema = z.object({
|
|
522
|
+
title: z.string(),
|
|
523
|
+
keyPoints: z.array(z.string()),
|
|
524
|
+
sentiment: z.enum(["positive", "negative", "neutral"]),
|
|
525
|
+
});
|
|
526
|
+
|
|
527
|
+
export function createAnalystApp() {
|
|
528
|
+
const analyst = makeAgent({
|
|
529
|
+
name: "analyst", llm, tools: [],
|
|
530
|
+
system: "Analyze text and produce structured summaries.",
|
|
531
|
+
responseFormat: SummarySchema,
|
|
532
|
+
});
|
|
533
|
+
|
|
534
|
+
return makeSupervisor({
|
|
535
|
+
agents: [analyst], llm,
|
|
536
|
+
outputMode: "last_message",
|
|
537
|
+
supervisorName: "analyst_supervisor",
|
|
538
|
+
});
|
|
539
|
+
}
|
|
540
|
+
`,
|
|
541
|
+
});
|
|
542
|
+
imports.push(`import { createAnalystApp } from "./apps/analyst";`);
|
|
543
|
+
demos.push(` console.log("\\n=== Structured Output Demo ===");
|
|
544
|
+
const analystApp = createAnalystApp();
|
|
545
|
+
const analysis = await analystApp.invoke(
|
|
546
|
+
{ messages: [{ role: "user", content: "Analyze: Revenue grew 25% but churn increased 8%." }] },
|
|
547
|
+
{ configurable: { thread_id: "analyst-demo" } }
|
|
548
|
+
);
|
|
549
|
+
console.log("Result:", analysis.messages.at(-1)?.content);`);
|
|
550
|
+
}
|
|
551
|
+
if (config.patterns.includes("rag")) {
|
|
552
|
+
files.push({
|
|
553
|
+
path: "src/apps/rag.ts",
|
|
554
|
+
content: `import { llm } from "../config/llm";
|
|
555
|
+
import { makeAgent } from "../agents/factory";
|
|
556
|
+
import { makeSupervisor } from "../agents/supervisor";
|
|
557
|
+
// TODO: Add your vector store, embeddings, and retrieval tool here.
|
|
558
|
+
// See the full starter kit for a complete RAG implementation:
|
|
559
|
+
// https://github.com/ac12644/langgraph-starter-kit
|
|
560
|
+
|
|
561
|
+
export function createRagApp() {
|
|
562
|
+
const ragAgent = makeAgent({
|
|
563
|
+
name: "rag_agent", llm, tools: [],
|
|
564
|
+
system: "You are a knowledgeable assistant. Answer questions based on your knowledge.",
|
|
565
|
+
});
|
|
566
|
+
|
|
567
|
+
return makeSupervisor({
|
|
568
|
+
agents: [ragAgent], llm,
|
|
569
|
+
outputMode: "last_message",
|
|
570
|
+
supervisorName: "rag_supervisor",
|
|
571
|
+
});
|
|
572
|
+
}
|
|
573
|
+
`,
|
|
574
|
+
});
|
|
575
|
+
imports.push(`import { createRagApp } from "./apps/rag";`);
|
|
576
|
+
demos.push(` console.log("\\n=== RAG Demo ===");
|
|
577
|
+
const ragApp = createRagApp();
|
|
578
|
+
const rag = await ragApp.invoke(
|
|
579
|
+
{ messages: [{ role: "user", content: "What is RAG and how does it work?" }] },
|
|
580
|
+
{ configurable: { thread_id: "rag-demo" } }
|
|
581
|
+
);
|
|
582
|
+
console.log("Result:", rag.messages.at(-1)?.content);`);
|
|
583
|
+
}
|
|
584
|
+
// Generate index.ts
|
|
585
|
+
files.push({
|
|
586
|
+
path: "src/index.ts",
|
|
587
|
+
content: `import "./config/env";
|
|
588
|
+
${imports.join("\n")}
|
|
589
|
+
|
|
590
|
+
async function main() {
|
|
591
|
+
${demos.join("\n\n")}
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
main().catch((err) => { console.error(err); process.exit(1); });
|
|
595
|
+
`,
|
|
596
|
+
});
|
|
597
|
+
return files;
|
|
598
|
+
}
|
|
599
|
+
// ── Main ─────────────────────────────────────────────────────────────────────
|
|
600
|
+
async function main() {
|
|
601
|
+
banner();
|
|
602
|
+
const rl = readline.createInterface({ input: stdin, output: stdout });
|
|
603
|
+
try {
|
|
604
|
+
const name = await ask(rl, "Project name", "my-langgraph-app");
|
|
605
|
+
const [provider] = await choose(rl, "LLM provider?", [
|
|
606
|
+
{ value: "openai", label: "OpenAI (gpt-4o-mini)" },
|
|
607
|
+
{ value: "anthropic", label: "Anthropic (Claude Sonnet)" },
|
|
608
|
+
{ value: "google", label: "Google (Gemini 2.0 Flash)" },
|
|
609
|
+
{ value: "groq", label: "Groq (Llama 3.3 70B)" },
|
|
610
|
+
{ value: "ollama", label: "Ollama (local, no API key)" },
|
|
611
|
+
]);
|
|
612
|
+
const patterns = await choose(rl, "Which patterns? (select multiple)", [
|
|
613
|
+
{ value: "supervisor", label: "Supervisor — central coordinator + worker agents" },
|
|
614
|
+
{ value: "swarm", label: "Swarm — peer-to-peer agent handoffs" },
|
|
615
|
+
{ value: "hitl", label: "Human-in-the-Loop — approval before dangerous actions" },
|
|
616
|
+
{ value: "structured", label: "Structured Output — typed JSON responses" },
|
|
617
|
+
{ value: "rag", label: "RAG — retrieval-augmented generation" },
|
|
618
|
+
], true);
|
|
619
|
+
const config = { name, provider, patterns };
|
|
620
|
+
// Create project directory
|
|
621
|
+
const projectDir = path.resolve(process.cwd(), name);
|
|
622
|
+
if (fs.existsSync(projectDir)) {
|
|
623
|
+
console.log(`\n${YELLOW}Directory "${name}" already exists. Aborting.${RESET}`);
|
|
624
|
+
process.exit(1);
|
|
625
|
+
}
|
|
626
|
+
console.log(`\n${DIM}Creating project...${RESET}`);
|
|
627
|
+
fs.mkdirSync(projectDir, { recursive: true });
|
|
628
|
+
// Write config files
|
|
629
|
+
const filesToWrite = [
|
|
630
|
+
{ path: "package.json", content: generatePackageJson(config) },
|
|
631
|
+
{ path: "tsconfig.json", content: generateTsConfig() },
|
|
632
|
+
{ path: ".env", content: generateEnv(config) },
|
|
633
|
+
{ path: ".env.example", content: generateEnv(config) },
|
|
634
|
+
{ path: ".gitignore", content: "node_modules\ndist\n.env\n" },
|
|
635
|
+
...getPatternFiles(config),
|
|
636
|
+
];
|
|
637
|
+
for (const file of filesToWrite) {
|
|
638
|
+
const fullPath = path.join(projectDir, file.path);
|
|
639
|
+
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
|
640
|
+
fs.writeFileSync(fullPath, file.content);
|
|
641
|
+
}
|
|
642
|
+
// Install dependencies
|
|
643
|
+
console.log(`${DIM}Installing dependencies...${RESET}\n`);
|
|
644
|
+
execSync("npm install", { cwd: projectDir, stdio: "inherit" });
|
|
645
|
+
// Done!
|
|
646
|
+
console.log(`
|
|
647
|
+
${GREEN}${BOLD}Done!${RESET} Your project is ready.
|
|
648
|
+
|
|
649
|
+
${CYAN}cd ${name}${RESET}
|
|
650
|
+
${DIM}# Add your API key to .env${RESET}
|
|
651
|
+
${CYAN}npm run dev${RESET}
|
|
652
|
+
|
|
653
|
+
${DIM}Patterns: ${patterns.join(", ")}
|
|
654
|
+
Provider: ${provider}${RESET}
|
|
655
|
+
`);
|
|
656
|
+
}
|
|
657
|
+
finally {
|
|
658
|
+
rl.close();
|
|
659
|
+
}
|
|
660
|
+
}
|
|
661
|
+
main().catch((err) => {
|
|
662
|
+
console.error("Error:", err);
|
|
663
|
+
process.exit(1);
|
|
664
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "create-langgraph-app",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Scaffold a new LangGraph multi-agent project in seconds",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"create-langgraph-app": "dist/create.js"
|
|
8
|
+
},
|
|
9
|
+
"scripts": {
|
|
10
|
+
"build": "tsx --no-cache create.ts > /dev/null && tsc",
|
|
11
|
+
"dev": "tsx create.ts"
|
|
12
|
+
},
|
|
13
|
+
"files": [
|
|
14
|
+
"dist"
|
|
15
|
+
],
|
|
16
|
+
"keywords": [
|
|
17
|
+
"langgraph",
|
|
18
|
+
"langchain",
|
|
19
|
+
"ai-agents",
|
|
20
|
+
"multi-agent",
|
|
21
|
+
"scaffold",
|
|
22
|
+
"cli",
|
|
23
|
+
"create"
|
|
24
|
+
],
|
|
25
|
+
"license": "Apache-2.0",
|
|
26
|
+
"author": "Abhishek Chauhan",
|
|
27
|
+
"repository": {
|
|
28
|
+
"type": "git",
|
|
29
|
+
"url": "git+https://github.com/ac12644/langgraph-starter-kit.git"
|
|
30
|
+
},
|
|
31
|
+
"dependencies": {},
|
|
32
|
+
"devDependencies": {
|
|
33
|
+
"@types/node": "^25.5.2",
|
|
34
|
+
"tsx": "^4.21.0",
|
|
35
|
+
"typescript": "^6.0.2"
|
|
36
|
+
}
|
|
37
|
+
}
|