@tsdevstack/cli-mcp 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +151 -0
- package/dist/context/index.d.ts +10 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.js +1211 -0
- package/dist/mcp-serve.d.ts +7 -0
- package/dist/register-commands.d.ts +8 -0
- package/dist/resources/guide-config.d.ts +8 -0
- package/dist/resources/guide-nest-common.d.ts +9 -0
- package/dist/resources/guide-workflows.d.ts +8 -0
- package/dist/resources/guide.d.ts +8 -0
- package/dist/resources/guide.test.d.ts +1 -0
- package/dist/resources/kong-routes.d.ts +8 -0
- package/dist/resources/kong-routes.test.d.ts +1 -0
- package/dist/resources/project-state.d.ts +9 -0
- package/dist/resources/project-state.test.d.ts +1 -0
- package/dist/resources/register-resources.d.ts +7 -0
- package/dist/resources/secrets-context.d.ts +9 -0
- package/dist/resources/secrets-context.test.d.ts +1 -0
- package/dist/server.d.ts +7 -0
- package/dist/server.test.d.ts +1 -0
- package/dist/tools/action/action-tools.test.d.ts +1 -0
- package/dist/tools/action/add-service.d.ts +7 -0
- package/dist/tools/action/cloud-init.d.ts +7 -0
- package/dist/tools/action/cloud-secrets-push.d.ts +7 -0
- package/dist/tools/action/cloud-secrets-remove.d.ts +7 -0
- package/dist/tools/action/cloud-secrets-set.d.ts +7 -0
- package/dist/tools/action/deploy-kong.d.ts +7 -0
- package/dist/tools/action/deploy-lb.d.ts +7 -0
- package/dist/tools/action/deploy-scheduler.d.ts +7 -0
- package/dist/tools/action/deploy-schedulers.d.ts +7 -0
- package/dist/tools/action/deploy-service.d.ts +7 -0
- package/dist/tools/action/deploy-services.d.ts +7 -0
- package/dist/tools/action/generate-client.d.ts +7 -0
- package/dist/tools/action/generate-docker-compose.d.ts +7 -0
- package/dist/tools/action/generate-kong.d.ts +7 -0
- package/dist/tools/action/generate-secrets.d.ts +7 -0
- package/dist/tools/action/infra-bootstrap.d.ts +7 -0
- package/dist/tools/action/infra-build-docker.d.ts +7 -0
- package/dist/tools/action/infra-build-kong.d.ts +7 -0
- package/dist/tools/action/infra-deploy.d.ts +7 -0
- package/dist/tools/action/infra-destroy.d.ts +7 -0
- package/dist/tools/action/infra-generate-ci.d.ts +7 -0
- package/dist/tools/action/infra-generate-docker.d.ts +7 -0
- package/dist/tools/action/infra-generate.d.ts +7 -0
- package/dist/tools/action/infra-init-ci.d.ts +7 -0
- package/dist/tools/action/infra-init.d.ts +7 -0
- package/dist/tools/action/infra-push-docker.d.ts +7 -0
- package/dist/tools/action/register-detached-worker.d.ts +7 -0
- package/dist/tools/action/register.d.ts +7 -0
- package/dist/tools/action/remove-detached-worker.d.ts +7 -0
- package/dist/tools/action/remove-scheduler.d.ts +7 -0
- package/dist/tools/action/remove-service-cloud.d.ts +7 -0
- package/dist/tools/action/remove-service.d.ts +7 -0
- package/dist/tools/action/run-db-migrate.d.ts +7 -0
- package/dist/tools/action/sync.d.ts +7 -0
- package/dist/tools/action/unregister-detached-worker.d.ts +7 -0
- package/dist/tools/action/validate-service.d.ts +7 -0
- package/dist/tools/query/diff-secrets.d.ts +7 -0
- package/dist/tools/query/get-infrastructure-config.d.ts +7 -0
- package/dist/tools/query/get-project-config.d.ts +7 -0
- package/dist/tools/query/get-secret.d.ts +7 -0
- package/dist/tools/query/get-service-status.d.ts +7 -0
- package/dist/tools/query/infra-plan.d.ts +7 -0
- package/dist/tools/query/infra-status.d.ts +7 -0
- package/dist/tools/query/list-deployed-services.d.ts +7 -0
- package/dist/tools/query/list-environments.d.ts +7 -0
- package/dist/tools/query/list-schedulers.d.ts +7 -0
- package/dist/tools/query/list-secrets.d.ts +7 -0
- package/dist/tools/query/list-services.d.ts +7 -0
- package/dist/tools/query/plan-db-migrate.d.ts +7 -0
- package/dist/tools/query/query-tools.test.d.ts +1 -0
- package/dist/tools/query/register.d.ts +7 -0
- package/dist/tools/register-tools.d.ts +7 -0
- package/dist/utils/run-command.d.ts +8 -0
- package/dist/utils/run-command.test.d.ts +1 -0
- package/package.json +69 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,1211 @@
|
|
|
1
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
2
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
3
|
+
import { readFileSync, readdirSync } from "node:fs";
|
|
4
|
+
import { join } from "node:path";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
import { execFile } from "node:child_process";
|
|
7
|
+
let context;
|
|
8
|
+
function initContext(ctx) {
|
|
9
|
+
context = ctx;
|
|
10
|
+
}
|
|
11
|
+
function wrapCommand(fn) {
|
|
12
|
+
return context.wrapCommand(fn);
|
|
13
|
+
}
|
|
14
|
+
function registerListServicesTool(server) {
|
|
15
|
+
server.tool('list_services', 'List all services in the project with their types and ports. Use this first to understand the project.', {}, {
|
|
16
|
+
title: 'List Services',
|
|
17
|
+
readOnlyHint: true,
|
|
18
|
+
destructiveHint: false,
|
|
19
|
+
idempotentHint: true,
|
|
20
|
+
openWorldHint: false
|
|
21
|
+
}, async ()=>{
|
|
22
|
+
try {
|
|
23
|
+
const configPath = join(process.cwd(), '.tsdevstack', 'config.json');
|
|
24
|
+
const config = JSON.parse(readFileSync(configPath, 'utf-8'));
|
|
25
|
+
return {
|
|
26
|
+
content: [
|
|
27
|
+
{
|
|
28
|
+
type: 'text',
|
|
29
|
+
text: JSON.stringify(config.services, null, 2)
|
|
30
|
+
}
|
|
31
|
+
]
|
|
32
|
+
};
|
|
33
|
+
} catch {
|
|
34
|
+
return {
|
|
35
|
+
content: [
|
|
36
|
+
{
|
|
37
|
+
type: 'text',
|
|
38
|
+
text: 'config.json not found. Run `npx tsdevstack init` to create a project.'
|
|
39
|
+
}
|
|
40
|
+
],
|
|
41
|
+
isError: true
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
function registerListEnvironmentsTool(server) {
|
|
47
|
+
server.tool('list_environments', 'List configured cloud environments (dev, staging, prod) and their providers.', {}, {
|
|
48
|
+
title: 'List Environments',
|
|
49
|
+
readOnlyHint: true,
|
|
50
|
+
destructiveHint: false,
|
|
51
|
+
idempotentHint: true,
|
|
52
|
+
openWorldHint: false
|
|
53
|
+
}, async ()=>{
|
|
54
|
+
try {
|
|
55
|
+
const tsdevstackDir = join(process.cwd(), '.tsdevstack');
|
|
56
|
+
const files = readdirSync(tsdevstackDir);
|
|
57
|
+
const credentialFiles = files.filter((f)=>f.startsWith('.credentials.') && f.endsWith('.json'));
|
|
58
|
+
if (0 === credentialFiles.length) return {
|
|
59
|
+
content: [
|
|
60
|
+
{
|
|
61
|
+
type: 'text',
|
|
62
|
+
text: 'No cloud environments configured. Run `npx tsdevstack cloud:init --gcp|--aws|--azure` to initialize a provider.'
|
|
63
|
+
}
|
|
64
|
+
]
|
|
65
|
+
};
|
|
66
|
+
const environments = [];
|
|
67
|
+
for (const file of credentialFiles){
|
|
68
|
+
const provider = file.replace('.credentials.', '').replace('.json', '');
|
|
69
|
+
const content = JSON.parse(readFileSync(join(tsdevstackDir, file), 'utf-8'));
|
|
70
|
+
const envNames = Object.keys(content);
|
|
71
|
+
environments.push({
|
|
72
|
+
provider,
|
|
73
|
+
environments: envNames
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
return {
|
|
77
|
+
content: [
|
|
78
|
+
{
|
|
79
|
+
type: 'text',
|
|
80
|
+
text: JSON.stringify(environments, null, 2)
|
|
81
|
+
}
|
|
82
|
+
]
|
|
83
|
+
};
|
|
84
|
+
} catch {
|
|
85
|
+
return {
|
|
86
|
+
content: [
|
|
87
|
+
{
|
|
88
|
+
type: 'text',
|
|
89
|
+
text: 'Could not read credential files. Ensure .tsdevstack/ directory exists.'
|
|
90
|
+
}
|
|
91
|
+
],
|
|
92
|
+
isError: true
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
function registerGetProjectConfigTool(server) {
|
|
98
|
+
server.tool('get_project_config', 'Full project configuration including service names, types, and workspace setup.', {}, {
|
|
99
|
+
title: 'Get Project Config',
|
|
100
|
+
readOnlyHint: true,
|
|
101
|
+
destructiveHint: false,
|
|
102
|
+
idempotentHint: true,
|
|
103
|
+
openWorldHint: false
|
|
104
|
+
}, async ()=>{
|
|
105
|
+
try {
|
|
106
|
+
const configPath = join(process.cwd(), '.tsdevstack', 'config.json');
|
|
107
|
+
const content = readFileSync(configPath, 'utf-8');
|
|
108
|
+
return {
|
|
109
|
+
content: [
|
|
110
|
+
{
|
|
111
|
+
type: 'text',
|
|
112
|
+
text: content
|
|
113
|
+
}
|
|
114
|
+
]
|
|
115
|
+
};
|
|
116
|
+
} catch {
|
|
117
|
+
return {
|
|
118
|
+
content: [
|
|
119
|
+
{
|
|
120
|
+
type: 'text',
|
|
121
|
+
text: 'config.json not found. Run `npx tsdevstack init` to create a project.'
|
|
122
|
+
}
|
|
123
|
+
],
|
|
124
|
+
isError: true
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
function registerGetInfrastructureConfigTool(server) {
|
|
130
|
+
server.tool('get_infrastructure_config', 'Per-environment infrastructure settings: DB tiers, domains, scaling, custom overrides. This is a user-created file.', {}, {
|
|
131
|
+
title: 'Get Infrastructure Config',
|
|
132
|
+
readOnlyHint: true,
|
|
133
|
+
destructiveHint: false,
|
|
134
|
+
idempotentHint: true,
|
|
135
|
+
openWorldHint: false
|
|
136
|
+
}, async ()=>{
|
|
137
|
+
try {
|
|
138
|
+
const configPath = join(process.cwd(), '.tsdevstack', 'infrastructure.json');
|
|
139
|
+
const content = readFileSync(configPath, 'utf-8');
|
|
140
|
+
return {
|
|
141
|
+
content: [
|
|
142
|
+
{
|
|
143
|
+
type: 'text',
|
|
144
|
+
text: content
|
|
145
|
+
}
|
|
146
|
+
]
|
|
147
|
+
};
|
|
148
|
+
} catch {
|
|
149
|
+
return {
|
|
150
|
+
content: [
|
|
151
|
+
{
|
|
152
|
+
type: 'text',
|
|
153
|
+
text: 'infrastructure.json not found. This is a user-created file — see the guide/config resource for how to create it.'
|
|
154
|
+
}
|
|
155
|
+
],
|
|
156
|
+
isError: true
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
const DEFAULT_TIMEOUT_MS = 600000;
|
|
162
|
+
function runCommand(args, timeoutMs = DEFAULT_TIMEOUT_MS) {
|
|
163
|
+
return new Promise((resolve)=>{
|
|
164
|
+
execFile('npx', [
|
|
165
|
+
'tsdevstack',
|
|
166
|
+
...args
|
|
167
|
+
], {
|
|
168
|
+
cwd: process.cwd(),
|
|
169
|
+
timeout: timeoutMs,
|
|
170
|
+
maxBuffer: 10485760
|
|
171
|
+
}, (error, stdout, stderr)=>{
|
|
172
|
+
const output = [
|
|
173
|
+
stdout,
|
|
174
|
+
stderr
|
|
175
|
+
].filter(Boolean).join('\n');
|
|
176
|
+
resolve({
|
|
177
|
+
content: [
|
|
178
|
+
{
|
|
179
|
+
type: 'text',
|
|
180
|
+
text: output || 'Command completed.'
|
|
181
|
+
}
|
|
182
|
+
],
|
|
183
|
+
isError: null !== error
|
|
184
|
+
});
|
|
185
|
+
});
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
function registerGetServiceStatusTool(server) {
|
|
189
|
+
server.tool('get_service_status', 'Cloud resource status for a specific service (running, image tag, URL, health).', {
|
|
190
|
+
service: z.string().describe('Service name to check status for'),
|
|
191
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
192
|
+
}, {
|
|
193
|
+
title: 'Get Service Status',
|
|
194
|
+
readOnlyHint: true,
|
|
195
|
+
destructiveHint: false,
|
|
196
|
+
idempotentHint: true,
|
|
197
|
+
openWorldHint: false
|
|
198
|
+
}, async ({ service, env })=>runCommand([
|
|
199
|
+
'infra:service-status',
|
|
200
|
+
service,
|
|
201
|
+
'--env',
|
|
202
|
+
env
|
|
203
|
+
]));
|
|
204
|
+
}
|
|
205
|
+
function registerListDeployedServicesTool(server) {
|
|
206
|
+
server.tool('list_deployed_services', 'All deployed services in a cloud environment with their current status.', {
|
|
207
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
208
|
+
}, {
|
|
209
|
+
title: 'List Deployed Services',
|
|
210
|
+
readOnlyHint: true,
|
|
211
|
+
destructiveHint: false,
|
|
212
|
+
idempotentHint: true,
|
|
213
|
+
openWorldHint: false
|
|
214
|
+
}, async ({ env })=>runCommand([
|
|
215
|
+
'infra:list-deployed',
|
|
216
|
+
'--env',
|
|
217
|
+
env
|
|
218
|
+
]));
|
|
219
|
+
}
|
|
220
|
+
function registerListSecretsTool(server) {
|
|
221
|
+
server.tool('list_secrets', "Secret names stored in a cloud environment's secret manager. Does NOT return values.", {
|
|
222
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
223
|
+
}, {
|
|
224
|
+
title: 'List Secrets',
|
|
225
|
+
readOnlyHint: true,
|
|
226
|
+
destructiveHint: false,
|
|
227
|
+
idempotentHint: true,
|
|
228
|
+
openWorldHint: false
|
|
229
|
+
}, async ({ env })=>runCommand([
|
|
230
|
+
'cloud-secrets:list',
|
|
231
|
+
'--env',
|
|
232
|
+
env
|
|
233
|
+
]));
|
|
234
|
+
}
|
|
235
|
+
function registerDiffSecretsTool(server) {
|
|
236
|
+
server.tool('diff_secrets', "Compare local secret names vs cloud — shows what's missing or extra. Run before deploying to catch mismatches.", {
|
|
237
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
238
|
+
}, {
|
|
239
|
+
title: 'Diff Secrets',
|
|
240
|
+
readOnlyHint: true,
|
|
241
|
+
destructiveHint: false,
|
|
242
|
+
idempotentHint: true,
|
|
243
|
+
openWorldHint: false
|
|
244
|
+
}, async ({ env })=>runCommand([
|
|
245
|
+
'cloud-secrets:diff',
|
|
246
|
+
'--env',
|
|
247
|
+
env
|
|
248
|
+
]));
|
|
249
|
+
}
|
|
250
|
+
function registerGetSecretTool(server) {
|
|
251
|
+
server.tool('get_secret', 'Get a single secret value from cloud. Use to check if a secret is set (e.g., DOMAIN). Returns the value — use with care.', {
|
|
252
|
+
key: z.string().describe('Secret key name (e.g., DOMAIN, RESEND_API_KEY)'),
|
|
253
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
254
|
+
}, {
|
|
255
|
+
title: 'Get Secret',
|
|
256
|
+
readOnlyHint: true,
|
|
257
|
+
destructiveHint: false,
|
|
258
|
+
idempotentHint: true,
|
|
259
|
+
openWorldHint: false
|
|
260
|
+
}, async ({ key, env })=>runCommand([
|
|
261
|
+
'cloud-secrets:get',
|
|
262
|
+
key,
|
|
263
|
+
'--env',
|
|
264
|
+
env
|
|
265
|
+
]));
|
|
266
|
+
}
|
|
267
|
+
function registerListSchedulersTool(server) {
|
|
268
|
+
server.tool('list_schedulers', 'Scheduled jobs (cron tasks) and their deployment status.', {
|
|
269
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
270
|
+
}, {
|
|
271
|
+
title: 'List Schedulers',
|
|
272
|
+
readOnlyHint: true,
|
|
273
|
+
destructiveHint: false,
|
|
274
|
+
idempotentHint: true,
|
|
275
|
+
openWorldHint: false
|
|
276
|
+
}, async ({ env })=>runCommand([
|
|
277
|
+
'infra:list-schedulers',
|
|
278
|
+
'--env',
|
|
279
|
+
env
|
|
280
|
+
]));
|
|
281
|
+
}
|
|
282
|
+
function registerPlanDbMigrateTool(server) {
|
|
283
|
+
server.tool('plan_db_migrate', 'Show pending Prisma database migrations for a service. Run before `run_db_migrate` to preview changes.', {
|
|
284
|
+
service: z.string().describe('Service name (must have a database)'),
|
|
285
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
286
|
+
}, {
|
|
287
|
+
title: 'Plan DB Migrate',
|
|
288
|
+
readOnlyHint: true,
|
|
289
|
+
destructiveHint: false,
|
|
290
|
+
idempotentHint: true,
|
|
291
|
+
openWorldHint: false
|
|
292
|
+
}, async ({ service, env })=>runCommand([
|
|
293
|
+
'infra:plan-db-migrate',
|
|
294
|
+
'--service',
|
|
295
|
+
service,
|
|
296
|
+
'--env',
|
|
297
|
+
env
|
|
298
|
+
]));
|
|
299
|
+
}
|
|
300
|
+
function registerInfraPlanTool(server) {
|
|
301
|
+
server.tool('infra_plan', 'Terraform plan — preview infrastructure changes without applying. Always run before `infra_deploy`.', {
|
|
302
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
303
|
+
}, {
|
|
304
|
+
title: 'Infra Plan',
|
|
305
|
+
readOnlyHint: true,
|
|
306
|
+
destructiveHint: false,
|
|
307
|
+
idempotentHint: true,
|
|
308
|
+
openWorldHint: false
|
|
309
|
+
}, async ({ env })=>runCommand([
|
|
310
|
+
'infra:plan',
|
|
311
|
+
'--env',
|
|
312
|
+
env
|
|
313
|
+
]));
|
|
314
|
+
}
|
|
315
|
+
function registerInfraStatusTool(server) {
|
|
316
|
+
server.tool('infra_status', 'Check if infrastructure configuration is in sync (Terraform state vs config files).', {}, {
|
|
317
|
+
title: 'Infra Status',
|
|
318
|
+
readOnlyHint: true,
|
|
319
|
+
destructiveHint: false,
|
|
320
|
+
idempotentHint: true,
|
|
321
|
+
openWorldHint: false
|
|
322
|
+
}, async ()=>runCommand([
|
|
323
|
+
'infra:status'
|
|
324
|
+
]));
|
|
325
|
+
}
|
|
326
|
+
function registerQueryTools(server) {
|
|
327
|
+
registerListServicesTool(server);
|
|
328
|
+
registerListEnvironmentsTool(server);
|
|
329
|
+
registerGetProjectConfigTool(server);
|
|
330
|
+
registerGetInfrastructureConfigTool(server);
|
|
331
|
+
registerGetServiceStatusTool(server);
|
|
332
|
+
registerListDeployedServicesTool(server);
|
|
333
|
+
registerListSecretsTool(server);
|
|
334
|
+
registerDiffSecretsTool(server);
|
|
335
|
+
registerGetSecretTool(server);
|
|
336
|
+
registerListSchedulersTool(server);
|
|
337
|
+
registerPlanDbMigrateTool(server);
|
|
338
|
+
registerInfraPlanTool(server);
|
|
339
|
+
registerInfraStatusTool(server);
|
|
340
|
+
}
|
|
341
|
+
function registerSyncTool(server) {
|
|
342
|
+
server.tool('sync', 'Regenerate all local config: secrets, docker-compose, kong, migrations. Run after adding services or changing secrets.', {}, {
|
|
343
|
+
title: 'Sync',
|
|
344
|
+
readOnlyHint: false,
|
|
345
|
+
destructiveHint: false,
|
|
346
|
+
idempotentHint: true,
|
|
347
|
+
openWorldHint: false
|
|
348
|
+
}, async ()=>runCommand([
|
|
349
|
+
'sync'
|
|
350
|
+
]));
|
|
351
|
+
}
|
|
352
|
+
function registerGenerateSecretsTool(server) {
|
|
353
|
+
server.tool('generate_secrets', 'Regenerate local secrets files. Run after editing .secrets.user.json. Preserves existing JWT keys and passwords.', {}, {
|
|
354
|
+
title: 'Generate Secrets',
|
|
355
|
+
readOnlyHint: false,
|
|
356
|
+
destructiveHint: false,
|
|
357
|
+
idempotentHint: true,
|
|
358
|
+
openWorldHint: false
|
|
359
|
+
}, async ()=>runCommand([
|
|
360
|
+
'generate-secrets'
|
|
361
|
+
]));
|
|
362
|
+
}
|
|
363
|
+
function registerGenerateKongTool(server) {
|
|
364
|
+
server.tool('generate_kong', 'Regenerate Kong gateway config from OpenAPI specs. Run after adding/changing API endpoints or decorators.', {}, {
|
|
365
|
+
title: 'Generate Kong',
|
|
366
|
+
readOnlyHint: false,
|
|
367
|
+
destructiveHint: false,
|
|
368
|
+
idempotentHint: true,
|
|
369
|
+
openWorldHint: false
|
|
370
|
+
}, async ()=>runCommand([
|
|
371
|
+
'generate-kong'
|
|
372
|
+
]));
|
|
373
|
+
}
|
|
374
|
+
function registerGenerateDockerComposeTool(server) {
|
|
375
|
+
server.tool('generate_docker_compose', 'Regenerate docker-compose.yml from current config. Run after adding services.', {}, {
|
|
376
|
+
title: 'Generate Docker Compose',
|
|
377
|
+
readOnlyHint: false,
|
|
378
|
+
destructiveHint: false,
|
|
379
|
+
idempotentHint: true,
|
|
380
|
+
openWorldHint: false
|
|
381
|
+
}, async ()=>runCommand([
|
|
382
|
+
'generate-docker-compose'
|
|
383
|
+
]));
|
|
384
|
+
}
|
|
385
|
+
function registerAddServiceTool(server) {
|
|
386
|
+
server.tool('add_service', 'Add a new service (nestjs, nextjs, or spa). After this, run sync to regenerate all config. Types: nestjs (backend API), nextjs (SSR frontend), spa (Rsbuild SPA).', {
|
|
387
|
+
name: z.string().describe('Service name (kebab-case, e.g. "billing-service")'),
|
|
388
|
+
type: z["enum"]([
|
|
389
|
+
'nestjs',
|
|
390
|
+
'nextjs',
|
|
391
|
+
'spa'
|
|
392
|
+
]).describe('Service type')
|
|
393
|
+
}, {
|
|
394
|
+
title: 'Add Service',
|
|
395
|
+
readOnlyHint: false,
|
|
396
|
+
destructiveHint: false,
|
|
397
|
+
idempotentHint: false,
|
|
398
|
+
openWorldHint: false
|
|
399
|
+
}, async ({ name, type })=>runCommand([
|
|
400
|
+
'add-service',
|
|
401
|
+
'--name',
|
|
402
|
+
name,
|
|
403
|
+
'--type',
|
|
404
|
+
type
|
|
405
|
+
]));
|
|
406
|
+
}
|
|
407
|
+
function registerRemoveServiceTool(server) {
|
|
408
|
+
server.tool('remove_service', 'Remove a service from the local project (deletes files, updates config). Does NOT remove from cloud — use remove_service_cloud for that.', {
|
|
409
|
+
service: z.string().describe('Service name to remove')
|
|
410
|
+
}, {
|
|
411
|
+
title: 'Remove Service',
|
|
412
|
+
readOnlyHint: false,
|
|
413
|
+
destructiveHint: true,
|
|
414
|
+
idempotentHint: false,
|
|
415
|
+
openWorldHint: false
|
|
416
|
+
}, async ({ service })=>runCommand([
|
|
417
|
+
'remove-service',
|
|
418
|
+
service
|
|
419
|
+
]));
|
|
420
|
+
}
|
|
421
|
+
function registerGenerateClientTool(server) {
|
|
422
|
+
server.tool('generate_client', "Generate TypeScript HTTP client + DTOs from a service's OpenAPI spec. Other services import this for type-safe API calls.", {
|
|
423
|
+
service: z.string().describe('Service name to generate client for')
|
|
424
|
+
}, {
|
|
425
|
+
title: 'Generate Client',
|
|
426
|
+
readOnlyHint: false,
|
|
427
|
+
destructiveHint: false,
|
|
428
|
+
idempotentHint: true,
|
|
429
|
+
openWorldHint: false
|
|
430
|
+
}, async ({ service })=>runCommand([
|
|
431
|
+
'generate-client',
|
|
432
|
+
service
|
|
433
|
+
]));
|
|
434
|
+
}
|
|
435
|
+
function registerRegisterDetachedWorkerTool(server) {
|
|
436
|
+
server.tool('register_detached_worker', "Register a detached worker in config.json. Only updates config — does NOT scaffold files. User must create worker.ts, worker.module.ts, and processor files manually using nest-common's startWorker(). After registering, run sync then infra_deploy.", {
|
|
437
|
+
name: z.string().describe('Worker name (kebab-case)'),
|
|
438
|
+
baseService: z.string().describe('Base NestJS service this worker belongs to')
|
|
439
|
+
}, {
|
|
440
|
+
title: 'Register Detached Worker',
|
|
441
|
+
readOnlyHint: false,
|
|
442
|
+
destructiveHint: false,
|
|
443
|
+
idempotentHint: false,
|
|
444
|
+
openWorldHint: false
|
|
445
|
+
}, async ({ name, baseService })=>runCommand([
|
|
446
|
+
'register-detached-worker',
|
|
447
|
+
'--name',
|
|
448
|
+
name,
|
|
449
|
+
'--base-service',
|
|
450
|
+
baseService
|
|
451
|
+
]));
|
|
452
|
+
}
|
|
453
|
+
function registerUnregisterDetachedWorkerTool(server) {
|
|
454
|
+
server.tool('unregister_detached_worker', 'Remove a detached worker entry from config.json. Does NOT remove from cloud — use remove_detached_worker for that.', {
|
|
455
|
+
worker: z.string().describe('Worker name to unregister')
|
|
456
|
+
}, {
|
|
457
|
+
title: 'Unregister Detached Worker',
|
|
458
|
+
readOnlyHint: false,
|
|
459
|
+
destructiveHint: false,
|
|
460
|
+
idempotentHint: false,
|
|
461
|
+
openWorldHint: false
|
|
462
|
+
}, async ({ worker })=>runCommand([
|
|
463
|
+
'unregister-detached-worker',
|
|
464
|
+
'--worker',
|
|
465
|
+
worker
|
|
466
|
+
]));
|
|
467
|
+
}
|
|
468
|
+
function registerCloudSecretsPushTool(server) {
|
|
469
|
+
server.tool('cloud_secrets_push', 'Push secrets to cloud. Generates framework secrets, prompts for DOMAIN/RESEND_API_KEY/EMAIL_FROM, auto-derives the rest. Run once per environment during initial setup.', {
|
|
470
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
471
|
+
}, {
|
|
472
|
+
title: 'Cloud Secrets Push',
|
|
473
|
+
readOnlyHint: false,
|
|
474
|
+
destructiveHint: false,
|
|
475
|
+
idempotentHint: true,
|
|
476
|
+
openWorldHint: false
|
|
477
|
+
}, async ({ env })=>runCommand([
|
|
478
|
+
'cloud-secrets:push',
|
|
479
|
+
'--env',
|
|
480
|
+
env
|
|
481
|
+
]));
|
|
482
|
+
}
|
|
483
|
+
function registerCloudSecretsSetTool(server) {
|
|
484
|
+
server.tool('cloud_secrets_set', 'Set or update a single secret in cloud. Use for overrides or adding new third-party API keys.', {
|
|
485
|
+
key: z.string().describe('Secret key name (e.g. DOMAIN, STRIPE_KEY)'),
|
|
486
|
+
value: z.string().describe('Secret value'),
|
|
487
|
+
env: z.string().describe('Target environment (dev, staging, prod)'),
|
|
488
|
+
service: z.string().optional().describe('Service scope (defaults to shared)')
|
|
489
|
+
}, {
|
|
490
|
+
title: 'Cloud Secrets Set',
|
|
491
|
+
readOnlyHint: false,
|
|
492
|
+
destructiveHint: false,
|
|
493
|
+
idempotentHint: true,
|
|
494
|
+
openWorldHint: false
|
|
495
|
+
}, async ({ key, value, env, service })=>{
|
|
496
|
+
const args = [
|
|
497
|
+
'cloud-secrets:set',
|
|
498
|
+
key,
|
|
499
|
+
'--value',
|
|
500
|
+
value,
|
|
501
|
+
'--env',
|
|
502
|
+
env,
|
|
503
|
+
'--overwrite'
|
|
504
|
+
];
|
|
505
|
+
if (service) args.push('--service', service);
|
|
506
|
+
return runCommand(args);
|
|
507
|
+
});
|
|
508
|
+
}
|
|
509
|
+
function registerCloudSecretsRemoveTool(server) {
|
|
510
|
+
server.tool('cloud_secrets_remove', 'Remove a secret from cloud secret manager. Verify the secret is unused before removing.', {
|
|
511
|
+
key: z.string().describe('Secret key name to remove'),
|
|
512
|
+
env: z.string().describe('Target environment (dev, staging, prod)'),
|
|
513
|
+
service: z.string().optional().describe('Service scope (defaults to shared)')
|
|
514
|
+
}, {
|
|
515
|
+
title: 'Cloud Secrets Remove',
|
|
516
|
+
readOnlyHint: false,
|
|
517
|
+
destructiveHint: true,
|
|
518
|
+
idempotentHint: true,
|
|
519
|
+
openWorldHint: false
|
|
520
|
+
}, async ({ key, env, service })=>{
|
|
521
|
+
const args = [
|
|
522
|
+
'cloud-secrets:remove',
|
|
523
|
+
key,
|
|
524
|
+
'--env',
|
|
525
|
+
env,
|
|
526
|
+
'--force'
|
|
527
|
+
];
|
|
528
|
+
if (service) args.push('--service', service);
|
|
529
|
+
return runCommand(args);
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
function registerInfraDeployTool(server) {
|
|
533
|
+
server.tool('infra_deploy', 'Full deployment: Terraform infra + build + push + deploy all services + Kong + LB. Required when adding new services. Long-running (30+ min) — advise the user to run `npx tsdevstack infra:deploy --env {env}` in their terminal instead.', {
|
|
534
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
535
|
+
}, {
|
|
536
|
+
title: 'Infra Deploy',
|
|
537
|
+
readOnlyHint: false,
|
|
538
|
+
destructiveHint: false,
|
|
539
|
+
idempotentHint: true,
|
|
540
|
+
openWorldHint: false
|
|
541
|
+
}, async ({ env })=>runCommand([
|
|
542
|
+
'infra:deploy',
|
|
543
|
+
'--env',
|
|
544
|
+
env,
|
|
545
|
+
'--auto-approve'
|
|
546
|
+
]));
|
|
547
|
+
}
|
|
548
|
+
function registerDeployServicesTool(server) {
|
|
549
|
+
server.tool('deploy_services', 'Deploy code changes to existing services only. Faster than full deploy. Use for code updates when no infrastructure changes are needed. Supports optional service filter.', {
|
|
550
|
+
env: z.string().describe('Target environment (dev, staging, prod)'),
|
|
551
|
+
service: z.string().optional().describe('Optional: deploy only this service (or comma-separated list)')
|
|
552
|
+
}, {
|
|
553
|
+
title: 'Deploy Services',
|
|
554
|
+
readOnlyHint: false,
|
|
555
|
+
destructiveHint: false,
|
|
556
|
+
idempotentHint: true,
|
|
557
|
+
openWorldHint: false
|
|
558
|
+
}, async ({ env, service })=>{
|
|
559
|
+
const args = [
|
|
560
|
+
'infra:deploy-services',
|
|
561
|
+
'--env',
|
|
562
|
+
env
|
|
563
|
+
];
|
|
564
|
+
if (service) args.push('--service', service);
|
|
565
|
+
return runCommand(args);
|
|
566
|
+
});
|
|
567
|
+
}
|
|
568
|
+
function registerDeployKongTool(server) {
|
|
569
|
+
server.tool('deploy_kong', 'Rebuild and deploy Kong gateway. Run after changing routes (adding endpoints, changing auth decorators).', {
|
|
570
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
571
|
+
}, {
|
|
572
|
+
title: 'Deploy Kong',
|
|
573
|
+
readOnlyHint: false,
|
|
574
|
+
destructiveHint: false,
|
|
575
|
+
idempotentHint: true,
|
|
576
|
+
openWorldHint: false
|
|
577
|
+
}, async ({ env })=>runCommand([
|
|
578
|
+
'infra:deploy-kong',
|
|
579
|
+
'--env',
|
|
580
|
+
env
|
|
581
|
+
]));
|
|
582
|
+
}
|
|
583
|
+
function registerDeployLbTool(server) {
|
|
584
|
+
server.tool('deploy_lb', 'Deploy/update the load balancer. Run after changing domains or adding frontend apps. Outputs DNS records and SSL validation info.', {
|
|
585
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
586
|
+
}, {
|
|
587
|
+
title: 'Deploy Load Balancer',
|
|
588
|
+
readOnlyHint: false,
|
|
589
|
+
destructiveHint: false,
|
|
590
|
+
idempotentHint: true,
|
|
591
|
+
openWorldHint: false
|
|
592
|
+
}, async ({ env })=>runCommand([
|
|
593
|
+
'infra:deploy-lb',
|
|
594
|
+
'--env',
|
|
595
|
+
env,
|
|
596
|
+
'--auto-approve'
|
|
597
|
+
]));
|
|
598
|
+
}
|
|
599
|
+
function registerRunDbMigrateTool(server) {
|
|
600
|
+
server.tool('run_db_migrate', 'Apply pending Prisma migrations for a service in cloud. Run plan_db_migrate first to preview changes.', {
|
|
601
|
+
service: z.string().describe('Service name with database'),
|
|
602
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
603
|
+
}, {
|
|
604
|
+
title: 'Run DB Migrate',
|
|
605
|
+
readOnlyHint: false,
|
|
606
|
+
destructiveHint: false,
|
|
607
|
+
idempotentHint: true,
|
|
608
|
+
openWorldHint: false
|
|
609
|
+
}, async ({ service, env })=>runCommand([
|
|
610
|
+
'infra:run-db-migrate',
|
|
611
|
+
'--service',
|
|
612
|
+
service,
|
|
613
|
+
'--env',
|
|
614
|
+
env
|
|
615
|
+
]));
|
|
616
|
+
}
|
|
617
|
+
function registerDeploySchedulersTool(server) {
|
|
618
|
+
server.tool('deploy_schedulers', 'Deploy all scheduled jobs (cron tasks) to cloud.', {
|
|
619
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
620
|
+
}, {
|
|
621
|
+
title: 'Deploy Schedulers',
|
|
622
|
+
readOnlyHint: false,
|
|
623
|
+
destructiveHint: false,
|
|
624
|
+
idempotentHint: true,
|
|
625
|
+
openWorldHint: false
|
|
626
|
+
}, async ({ env })=>runCommand([
|
|
627
|
+
'infra:deploy-schedulers',
|
|
628
|
+
'--env',
|
|
629
|
+
env,
|
|
630
|
+
'--auto-approve'
|
|
631
|
+
]));
|
|
632
|
+
}
|
|
633
|
+
function registerRemoveServiceCloudTool(server) {
|
|
634
|
+
server.tool('remove_service_cloud', 'Remove a service from cloud (deletes container, secrets, database). Cannot be undone. Data is permanently lost.', {
|
|
635
|
+
service: z.string().describe('Service name to remove from cloud'),
|
|
636
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
637
|
+
}, {
|
|
638
|
+
title: 'Remove Service (Cloud)',
|
|
639
|
+
readOnlyHint: false,
|
|
640
|
+
destructiveHint: true,
|
|
641
|
+
idempotentHint: false,
|
|
642
|
+
openWorldHint: false
|
|
643
|
+
}, async ({ service, env })=>runCommand([
|
|
644
|
+
'infra:remove-service',
|
|
645
|
+
service,
|
|
646
|
+
'--env',
|
|
647
|
+
env,
|
|
648
|
+
'--confirm'
|
|
649
|
+
]));
|
|
650
|
+
}
|
|
651
|
+
function registerRemoveDetachedWorkerTool(server) {
|
|
652
|
+
server.tool('remove_detached_worker', 'Remove a detached worker from cloud. Cannot be undone.', {
|
|
653
|
+
worker: z.string().describe('Worker name to remove'),
|
|
654
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
655
|
+
}, {
|
|
656
|
+
title: 'Remove Detached Worker (Cloud)',
|
|
657
|
+
readOnlyHint: false,
|
|
658
|
+
destructiveHint: true,
|
|
659
|
+
idempotentHint: false,
|
|
660
|
+
openWorldHint: false
|
|
661
|
+
}, async ({ worker, env })=>runCommand([
|
|
662
|
+
'infra:remove-detached-worker',
|
|
663
|
+
'--worker',
|
|
664
|
+
worker,
|
|
665
|
+
'--env',
|
|
666
|
+
env,
|
|
667
|
+
'--confirm'
|
|
668
|
+
]));
|
|
669
|
+
}
|
|
670
|
+
function registerInfraDestroyTool(server) {
|
|
671
|
+
server.tool('infra_destroy', 'Destroy ALL cloud infrastructure for an environment. Permanently deletes databases, services, and all data. Cannot be undone. Use with extreme caution.', {
|
|
672
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
673
|
+
}, {
|
|
674
|
+
title: 'Infra Destroy',
|
|
675
|
+
readOnlyHint: false,
|
|
676
|
+
destructiveHint: true,
|
|
677
|
+
idempotentHint: false,
|
|
678
|
+
openWorldHint: false
|
|
679
|
+
}, async ({ env })=>runCommand([
|
|
680
|
+
'infra:destroy',
|
|
681
|
+
'--env',
|
|
682
|
+
env,
|
|
683
|
+
'--auto-approve'
|
|
684
|
+
]));
|
|
685
|
+
}
|
|
686
|
+
function registerCloudInitTool(server) {
|
|
687
|
+
server.tool('cloud_init', 'Initialize cloud provider credentials for a specific provider (gcp, aws, azure). One-time setup per provider. Provider must be passed as flag to skip interactive prompt.', {
|
|
688
|
+
provider: z["enum"]([
|
|
689
|
+
'gcp',
|
|
690
|
+
'aws',
|
|
691
|
+
'azure'
|
|
692
|
+
]).describe('Cloud provider to initialize')
|
|
693
|
+
}, {
|
|
694
|
+
title: 'Cloud Init',
|
|
695
|
+
readOnlyHint: false,
|
|
696
|
+
destructiveHint: false,
|
|
697
|
+
idempotentHint: true,
|
|
698
|
+
openWorldHint: false
|
|
699
|
+
}, async ({ provider })=>runCommand([
|
|
700
|
+
'cloud:init',
|
|
701
|
+
`--${provider}`
|
|
702
|
+
]));
|
|
703
|
+
}
|
|
704
|
+
function registerInfraBootstrapTool(server) {
|
|
705
|
+
server.tool('infra_bootstrap', 'Bootstrap cloud project (enable APIs, add roles). One-time setup per environment.', {
|
|
706
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
707
|
+
}, {
|
|
708
|
+
title: 'Infra Bootstrap',
|
|
709
|
+
readOnlyHint: false,
|
|
710
|
+
destructiveHint: false,
|
|
711
|
+
idempotentHint: true,
|
|
712
|
+
openWorldHint: false
|
|
713
|
+
}, async ({ env })=>runCommand([
|
|
714
|
+
'infra:bootstrap',
|
|
715
|
+
'--env',
|
|
716
|
+
env
|
|
717
|
+
]));
|
|
718
|
+
}
|
|
719
|
+
function registerInfraInitTool(server) {
|
|
720
|
+
server.tool('infra_init', 'Initialize infrastructure (creates Terraform state bucket). One-time setup per environment.', {
|
|
721
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
722
|
+
}, {
|
|
723
|
+
title: 'Infra Init',
|
|
724
|
+
readOnlyHint: false,
|
|
725
|
+
destructiveHint: false,
|
|
726
|
+
idempotentHint: true,
|
|
727
|
+
openWorldHint: false
|
|
728
|
+
}, async ({ env })=>runCommand([
|
|
729
|
+
'infra:init',
|
|
730
|
+
'--env',
|
|
731
|
+
env
|
|
732
|
+
]));
|
|
733
|
+
}
|
|
734
|
+
function registerInfraGenerateTool(server) {
|
|
735
|
+
server.tool('infra_generate', 'Generate Terraform files from config. Usually called internally by deploy, but useful for previewing generated output.', {
|
|
736
|
+
env: z.string().describe('Target environment (dev, staging, prod)')
|
|
737
|
+
}, {
|
|
738
|
+
title: 'Infra Generate',
|
|
739
|
+
readOnlyHint: false,
|
|
740
|
+
destructiveHint: false,
|
|
741
|
+
idempotentHint: true,
|
|
742
|
+
openWorldHint: false
|
|
743
|
+
}, async ({ env })=>runCommand([
|
|
744
|
+
'infra:generate',
|
|
745
|
+
'--env',
|
|
746
|
+
env
|
|
747
|
+
]));
|
|
748
|
+
}
|
|
749
|
+
function registerInfraGenerateDockerTool(server) {
|
|
750
|
+
server.tool('infra_generate_docker', 'Generate Dockerfiles for services. Usually called internally by deploy. Supports optional service filter.', {
|
|
751
|
+
service: z.string().optional().describe('Generate for specific service only (optional)')
|
|
752
|
+
}, {
|
|
753
|
+
title: 'Generate Dockerfiles',
|
|
754
|
+
readOnlyHint: false,
|
|
755
|
+
destructiveHint: false,
|
|
756
|
+
idempotentHint: true,
|
|
757
|
+
openWorldHint: false
|
|
758
|
+
}, async ({ service })=>{
|
|
759
|
+
const args = [
|
|
760
|
+
'infra:generate-docker'
|
|
761
|
+
];
|
|
762
|
+
if (service) args.push('--service', service);
|
|
763
|
+
return runCommand(args);
|
|
764
|
+
});
|
|
765
|
+
}
|
|
766
|
+
function registerInfraBuildDockerTool(server) {
|
|
767
|
+
server.tool('infra_build_docker', 'Build Docker images with BuildKit. Usually called internally by deploy. Supports --service for single build, --tag for custom tag (defaults to git SHA).', {
|
|
768
|
+
service: z.string().optional().describe('Build specific service only (optional)'),
|
|
769
|
+
env: z.string().optional().describe('Target environment (optional)'),
|
|
770
|
+
tag: z.string().optional().describe('Image tag (defaults to git SHA)')
|
|
771
|
+
}, {
|
|
772
|
+
title: 'Build Docker Images',
|
|
773
|
+
readOnlyHint: false,
|
|
774
|
+
destructiveHint: false,
|
|
775
|
+
idempotentHint: true,
|
|
776
|
+
openWorldHint: false
|
|
777
|
+
}, async ({ service, env, tag })=>{
|
|
778
|
+
const args = [
|
|
779
|
+
'infra:build-docker'
|
|
780
|
+
];
|
|
781
|
+
if (service) args.push('--service', service);
|
|
782
|
+
if (env) args.push('--env', env);
|
|
783
|
+
if (tag) args.push('--tag', tag);
|
|
784
|
+
return runCommand(args);
|
|
785
|
+
});
|
|
786
|
+
}
|
|
787
|
+
function registerInfraPushDockerTool(server) {
|
|
788
|
+
server.tool('infra_push_docker', 'Push Docker images to registry. Usually called internally by deploy. Supports --service for single push.', {
|
|
789
|
+
service: z.string().optional().describe('Push specific service only (optional)'),
|
|
790
|
+
env: z.string().optional().describe('Target environment (optional)'),
|
|
791
|
+
tag: z.string().optional().describe('Image tag (defaults to git SHA)')
|
|
792
|
+
}, {
|
|
793
|
+
title: 'Push Docker Images',
|
|
794
|
+
readOnlyHint: false,
|
|
795
|
+
destructiveHint: false,
|
|
796
|
+
idempotentHint: true,
|
|
797
|
+
openWorldHint: false
|
|
798
|
+
}, async ({ service, env, tag })=>{
|
|
799
|
+
const args = [
|
|
800
|
+
'infra:push-docker'
|
|
801
|
+
];
|
|
802
|
+
if (service) args.push('--service', service);
|
|
803
|
+
if (env) args.push('--env', env);
|
|
804
|
+
if (tag) args.push('--tag', tag);
|
|
805
|
+
return runCommand(args);
|
|
806
|
+
});
|
|
807
|
+
}
|
|
808
|
+
function registerInfraBuildKongTool(server) {
|
|
809
|
+
server.tool('infra_build_kong', 'Build Kong Docker image. Usually called internally by deploy-kong.', {
|
|
810
|
+
env: z.string().optional().describe('Target environment (optional)'),
|
|
811
|
+
tag: z.string().optional().describe('Image tag (defaults to git SHA)')
|
|
812
|
+
}, {
|
|
813
|
+
title: 'Build Kong Image',
|
|
814
|
+
readOnlyHint: false,
|
|
815
|
+
destructiveHint: false,
|
|
816
|
+
idempotentHint: true,
|
|
817
|
+
openWorldHint: false
|
|
818
|
+
}, async ({ env, tag })=>{
|
|
819
|
+
const args = [
|
|
820
|
+
'infra:build-kong'
|
|
821
|
+
];
|
|
822
|
+
if (env) args.push('--env', env);
|
|
823
|
+
if (tag) args.push('--tag', tag);
|
|
824
|
+
return runCommand(args);
|
|
825
|
+
});
|
|
826
|
+
}
|
|
827
|
+
function registerInfraInitCiTool(server) {
|
|
828
|
+
server.tool('infra_init_ci', 'Initialize CI/CD workflows (GitHub Actions). One-time setup.', {}, {
|
|
829
|
+
title: 'Init CI/CD',
|
|
830
|
+
readOnlyHint: false,
|
|
831
|
+
destructiveHint: false,
|
|
832
|
+
idempotentHint: true,
|
|
833
|
+
openWorldHint: false
|
|
834
|
+
}, async ()=>runCommand([
|
|
835
|
+
'infra:init-ci',
|
|
836
|
+
'--github'
|
|
837
|
+
]));
|
|
838
|
+
}
|
|
839
|
+
function registerInfraGenerateCiTool(server) {
|
|
840
|
+
server.tool('infra_generate_ci', 'Regenerate CI workflows from ci.json.', {
|
|
841
|
+
env: z.string().optional().describe('Target environment (optional)')
|
|
842
|
+
}, {
|
|
843
|
+
title: 'Generate CI Workflows',
|
|
844
|
+
readOnlyHint: false,
|
|
845
|
+
destructiveHint: false,
|
|
846
|
+
idempotentHint: true,
|
|
847
|
+
openWorldHint: false
|
|
848
|
+
}, async ({ env })=>{
|
|
849
|
+
const args = [
|
|
850
|
+
'infra:generate-ci'
|
|
851
|
+
];
|
|
852
|
+
if (env) args.push('--env', env);
|
|
853
|
+
return runCommand(args);
|
|
854
|
+
});
|
|
855
|
+
}
|
|
856
|
+
function registerDeployServiceTool(server) {
|
|
857
|
+
server.tool('deploy_service', 'Build, push, deploy a single service (full workflow). Alternative to deploy_services --service. Supports optional tag override.', {
|
|
858
|
+
service: z.string().describe('Service name to deploy'),
|
|
859
|
+
env: z.string().optional().describe('Target environment (optional)'),
|
|
860
|
+
tag: z.string().optional().describe('Image tag to deploy (defaults to current git SHA)')
|
|
861
|
+
}, {
|
|
862
|
+
title: 'Deploy Service',
|
|
863
|
+
readOnlyHint: false,
|
|
864
|
+
destructiveHint: false,
|
|
865
|
+
idempotentHint: true,
|
|
866
|
+
openWorldHint: false
|
|
867
|
+
}, async ({ service, env, tag })=>{
|
|
868
|
+
const args = [
|
|
869
|
+
'infra:deploy-service',
|
|
870
|
+
service
|
|
871
|
+
];
|
|
872
|
+
if (env) args.push('--env', env);
|
|
873
|
+
if (tag) args.push('--tag', tag);
|
|
874
|
+
return runCommand(args);
|
|
875
|
+
});
|
|
876
|
+
}
|
|
877
|
+
function registerDeploySchedulerTool(server) {
|
|
878
|
+
server.tool('deploy_scheduler', 'Deploy a single scheduled job. Alternative to batch deploy_schedulers.', {
|
|
879
|
+
job: z.string().optional().describe('Scheduled job name to deploy (optional)'),
|
|
880
|
+
env: z.string().optional().describe('Target environment (optional)')
|
|
881
|
+
}, {
|
|
882
|
+
title: 'Deploy Scheduler',
|
|
883
|
+
readOnlyHint: false,
|
|
884
|
+
destructiveHint: false,
|
|
885
|
+
idempotentHint: true,
|
|
886
|
+
openWorldHint: false
|
|
887
|
+
}, async ({ job, env })=>{
|
|
888
|
+
const args = [
|
|
889
|
+
'infra:deploy-scheduler'
|
|
890
|
+
];
|
|
891
|
+
if (job) args.push('--job', job);
|
|
892
|
+
if (env) args.push('--env', env);
|
|
893
|
+
args.push('--auto-approve');
|
|
894
|
+
return runCommand(args);
|
|
895
|
+
});
|
|
896
|
+
}
|
|
897
|
+
function registerRemoveSchedulerTool(server) {
|
|
898
|
+
server.tool('remove_scheduler', 'Remove a single scheduled job from cloud. Cannot be undone.', {
|
|
899
|
+
job: z.string().optional().describe('Job name to remove (optional)'),
|
|
900
|
+
env: z.string().optional().describe('Target environment (optional)')
|
|
901
|
+
}, {
|
|
902
|
+
title: 'Remove Scheduler',
|
|
903
|
+
readOnlyHint: false,
|
|
904
|
+
destructiveHint: true,
|
|
905
|
+
idempotentHint: false,
|
|
906
|
+
openWorldHint: false
|
|
907
|
+
}, async ({ job, env })=>{
|
|
908
|
+
const args = [
|
|
909
|
+
'infra:remove-scheduler'
|
|
910
|
+
];
|
|
911
|
+
if (job) args.push('--job', job);
|
|
912
|
+
if (env) args.push('--env', env);
|
|
913
|
+
args.push('--confirm');
|
|
914
|
+
return runCommand(args);
|
|
915
|
+
});
|
|
916
|
+
}
|
|
917
|
+
function registerValidateServiceTool(server) {
|
|
918
|
+
server.tool('validate_service', 'Validate a service follows naming conventions and structure.', {
|
|
919
|
+
service: z.string().optional().describe('Service name to validate (optional, auto-detects from cwd)')
|
|
920
|
+
}, {
|
|
921
|
+
title: 'Validate Service',
|
|
922
|
+
readOnlyHint: true,
|
|
923
|
+
destructiveHint: false,
|
|
924
|
+
idempotentHint: true,
|
|
925
|
+
openWorldHint: false
|
|
926
|
+
}, async ({ service })=>{
|
|
927
|
+
const args = [
|
|
928
|
+
'validate-service'
|
|
929
|
+
];
|
|
930
|
+
if (service) args.push(service);
|
|
931
|
+
return runCommand(args);
|
|
932
|
+
});
|
|
933
|
+
}
|
|
934
|
+
function registerActionTools(server) {
|
|
935
|
+
registerSyncTool(server);
|
|
936
|
+
registerGenerateSecretsTool(server);
|
|
937
|
+
registerGenerateKongTool(server);
|
|
938
|
+
registerGenerateDockerComposeTool(server);
|
|
939
|
+
registerAddServiceTool(server);
|
|
940
|
+
registerRemoveServiceTool(server);
|
|
941
|
+
registerGenerateClientTool(server);
|
|
942
|
+
registerRegisterDetachedWorkerTool(server);
|
|
943
|
+
registerUnregisterDetachedWorkerTool(server);
|
|
944
|
+
registerCloudSecretsPushTool(server);
|
|
945
|
+
registerCloudSecretsSetTool(server);
|
|
946
|
+
registerCloudSecretsRemoveTool(server);
|
|
947
|
+
registerInfraDeployTool(server);
|
|
948
|
+
registerDeployServicesTool(server);
|
|
949
|
+
registerDeployKongTool(server);
|
|
950
|
+
registerDeployLbTool(server);
|
|
951
|
+
registerRunDbMigrateTool(server);
|
|
952
|
+
registerDeploySchedulersTool(server);
|
|
953
|
+
registerRemoveServiceCloudTool(server);
|
|
954
|
+
registerRemoveDetachedWorkerTool(server);
|
|
955
|
+
registerInfraDestroyTool(server);
|
|
956
|
+
registerCloudInitTool(server);
|
|
957
|
+
registerInfraBootstrapTool(server);
|
|
958
|
+
registerInfraInitTool(server);
|
|
959
|
+
registerInfraGenerateTool(server);
|
|
960
|
+
registerInfraGenerateDockerTool(server);
|
|
961
|
+
registerInfraBuildDockerTool(server);
|
|
962
|
+
registerInfraPushDockerTool(server);
|
|
963
|
+
registerInfraBuildKongTool(server);
|
|
964
|
+
registerInfraInitCiTool(server);
|
|
965
|
+
registerInfraGenerateCiTool(server);
|
|
966
|
+
registerDeployServiceTool(server);
|
|
967
|
+
registerDeploySchedulerTool(server);
|
|
968
|
+
registerRemoveSchedulerTool(server);
|
|
969
|
+
registerValidateServiceTool(server);
|
|
970
|
+
}
|
|
971
|
+
function registerTools(server) {
|
|
972
|
+
registerQueryTools(server);
|
|
973
|
+
registerActionTools(server);
|
|
974
|
+
}
|
|
975
|
+
function readProjectFile(relativePath) {
|
|
976
|
+
try {
|
|
977
|
+
return readFileSync(join(process.cwd(), relativePath), 'utf-8');
|
|
978
|
+
} catch {
|
|
979
|
+
return null;
|
|
980
|
+
}
|
|
981
|
+
}
|
|
982
|
+
function registerProjectStateResources(server) {
|
|
983
|
+
server.resource('config', 'tsdevstack://config', {
|
|
984
|
+
description: 'Project name, services, types, ports — the main project configuration.'
|
|
985
|
+
}, async (uri)=>{
|
|
986
|
+
const content = readProjectFile('.tsdevstack/config.json');
|
|
987
|
+
return {
|
|
988
|
+
contents: [
|
|
989
|
+
{
|
|
990
|
+
uri: uri.href,
|
|
991
|
+
text: content ?? 'config.json not found. Run `npx tsdevstack init` to create a project.',
|
|
992
|
+
mimeType: 'application/json'
|
|
993
|
+
}
|
|
994
|
+
]
|
|
995
|
+
};
|
|
996
|
+
});
|
|
997
|
+
server.resource('infrastructure', 'tsdevstack://infrastructure', {
|
|
998
|
+
description: 'Per-environment infrastructure settings (DB tiers, domains, scaling). User-created file.'
|
|
999
|
+
}, async (uri)=>{
|
|
1000
|
+
const content = readProjectFile('.tsdevstack/infrastructure.json');
|
|
1001
|
+
return {
|
|
1002
|
+
contents: [
|
|
1003
|
+
{
|
|
1004
|
+
uri: uri.href,
|
|
1005
|
+
text: content ?? 'infrastructure.json not found. This is a user-created file — see the guide/config resource for how to create it.',
|
|
1006
|
+
mimeType: 'application/json'
|
|
1007
|
+
}
|
|
1008
|
+
]
|
|
1009
|
+
};
|
|
1010
|
+
});
|
|
1011
|
+
server.resource('infrastructure-schema', 'tsdevstack://infrastructure-schema', {
|
|
1012
|
+
description: 'Valid fields and values for infrastructure.json. Generated by `infra:init`. Only exists after initialization.'
|
|
1013
|
+
}, async (uri)=>{
|
|
1014
|
+
const content = readProjectFile('.tsdevstack/infrastructure.schema.json');
|
|
1015
|
+
return {
|
|
1016
|
+
contents: [
|
|
1017
|
+
{
|
|
1018
|
+
uri: uri.href,
|
|
1019
|
+
text: content ?? 'infrastructure.schema.json not found. Run `npx tsdevstack infra:init --env <env>` to generate the schema.',
|
|
1020
|
+
mimeType: 'application/json'
|
|
1021
|
+
}
|
|
1022
|
+
]
|
|
1023
|
+
};
|
|
1024
|
+
});
|
|
1025
|
+
server.resource('ci', 'tsdevstack://ci', {
|
|
1026
|
+
description: 'CI/CD configuration — environments, provider, workflow settings.'
|
|
1027
|
+
}, async (uri)=>{
|
|
1028
|
+
const content = readProjectFile('.tsdevstack/ci.json');
|
|
1029
|
+
return {
|
|
1030
|
+
contents: [
|
|
1031
|
+
{
|
|
1032
|
+
uri: uri.href,
|
|
1033
|
+
text: content ?? 'ci.json not found. Run `npx tsdevstack infra:init-ci --github` to initialize CI/CD.',
|
|
1034
|
+
mimeType: 'application/json'
|
|
1035
|
+
}
|
|
1036
|
+
]
|
|
1037
|
+
};
|
|
1038
|
+
});
|
|
1039
|
+
}
|
|
1040
|
+
function secrets_context_readProjectFile(relativePath) {
|
|
1041
|
+
try {
|
|
1042
|
+
return readFileSync(join(process.cwd(), relativePath), 'utf-8');
|
|
1043
|
+
} catch {
|
|
1044
|
+
return null;
|
|
1045
|
+
}
|
|
1046
|
+
}
|
|
1047
|
+
function registerSecretsContextResources(server) {
|
|
1048
|
+
server.resource('secrets-map', 'tsdevstack://secrets/map', {
|
|
1049
|
+
description: 'Which secrets are assigned to which services. Critical for debugging "service X doesn\'t have secret Y" issues.'
|
|
1050
|
+
}, async (uri)=>{
|
|
1051
|
+
const content = secrets_context_readProjectFile('.tsdevstack/secret-map.json');
|
|
1052
|
+
return {
|
|
1053
|
+
contents: [
|
|
1054
|
+
{
|
|
1055
|
+
uri: uri.href,
|
|
1056
|
+
text: content ?? 'secret-map.json not found. Run `npx tsdevstack generate-secrets` to generate it.',
|
|
1057
|
+
mimeType: 'application/json'
|
|
1058
|
+
}
|
|
1059
|
+
]
|
|
1060
|
+
};
|
|
1061
|
+
});
|
|
1062
|
+
server.resource('secrets-names', 'tsdevstack://secrets/names', {
|
|
1063
|
+
description: 'Secret names, scopes, and values (local dev only — no cloud credentials). Full content of the merged secrets file.'
|
|
1064
|
+
}, async (uri)=>{
|
|
1065
|
+
const content = secrets_context_readProjectFile('.secrets.local.json');
|
|
1066
|
+
return {
|
|
1067
|
+
contents: [
|
|
1068
|
+
{
|
|
1069
|
+
uri: uri.href,
|
|
1070
|
+
text: content ?? '.secrets.local.json not found. Run `npx tsdevstack generate-secrets` to generate it.',
|
|
1071
|
+
mimeType: 'application/json'
|
|
1072
|
+
}
|
|
1073
|
+
]
|
|
1074
|
+
};
|
|
1075
|
+
});
|
|
1076
|
+
server.resource('secrets-user', 'tsdevstack://secrets/user', {
|
|
1077
|
+
description: 'User secret definitions and service assignments (keys only, no values). Shows which secrets the user has defined and which services they are assigned to.'
|
|
1078
|
+
}, async (uri)=>{
|
|
1079
|
+
const raw = secrets_context_readProjectFile('.secrets.user.json');
|
|
1080
|
+
if (!raw) return {
|
|
1081
|
+
contents: [
|
|
1082
|
+
{
|
|
1083
|
+
uri: uri.href,
|
|
1084
|
+
text: '.secrets.user.json not found. This file is created when the user adds custom secrets.'
|
|
1085
|
+
}
|
|
1086
|
+
]
|
|
1087
|
+
};
|
|
1088
|
+
const parsed = JSON.parse(raw);
|
|
1089
|
+
const keysOnly = {};
|
|
1090
|
+
for (const [key, value] of Object.entries(parsed))if ('secrets' === key && 'object' == typeof value && null !== value) keysOnly[key] = Object.keys(value);
|
|
1091
|
+
else keysOnly[key] = value;
|
|
1092
|
+
return {
|
|
1093
|
+
contents: [
|
|
1094
|
+
{
|
|
1095
|
+
uri: uri.href,
|
|
1096
|
+
text: JSON.stringify(keysOnly, null, 2),
|
|
1097
|
+
mimeType: 'application/json'
|
|
1098
|
+
}
|
|
1099
|
+
]
|
|
1100
|
+
};
|
|
1101
|
+
});
|
|
1102
|
+
}
|
|
1103
|
+
function registerKongRoutesResource(server) {
|
|
1104
|
+
server.resource('kong-routes', 'tsdevstack://kong/routes', {
|
|
1105
|
+
description: 'Current Kong gateway routing config (services, routes, plugins). Shows which endpoints exist, their auth requirements, and which service handles each route.'
|
|
1106
|
+
}, async (uri)=>{
|
|
1107
|
+
const cwd = process.cwd();
|
|
1108
|
+
let content = null;
|
|
1109
|
+
for (const filename of [
|
|
1110
|
+
'kong.tsdevstack.yml',
|
|
1111
|
+
'kong.yml'
|
|
1112
|
+
])try {
|
|
1113
|
+
content = readFileSync(join(cwd, filename), 'utf-8');
|
|
1114
|
+
break;
|
|
1115
|
+
} catch {}
|
|
1116
|
+
return {
|
|
1117
|
+
contents: [
|
|
1118
|
+
{
|
|
1119
|
+
uri: uri.href,
|
|
1120
|
+
text: content ?? 'Kong config not found. Run `npx tsdevstack generate-kong` to generate it.',
|
|
1121
|
+
mimeType: 'text/yaml'
|
|
1122
|
+
}
|
|
1123
|
+
]
|
|
1124
|
+
};
|
|
1125
|
+
});
|
|
1126
|
+
}
|
|
1127
|
+
const GUIDE_CONTENT = "# tsdevstack Framework Guide\n\n## Project Structure\n- `apps/` — Microservices and frontends (NestJS, Next.js, SPA)\n- `packages/` — Shared libraries and CLI packages (new packages go here, scaffold with rslib)\n- `.tsdevstack/` — Framework configuration (config.json, infrastructure.json, credentials, secret-map)\n- `docs/` — Documentation\n- `infrastructure/` — GENERATED Terraform and Kong files (never edit directly)\n\n## Service Types\n- `nestjs` — Backend API service. Has globalPrefix, optional database, OpenAPI spec, Kong routes.\n- `nextjs` — Server-rendered frontend (Next.js App Router).\n- `spa` — Single-page application (Rsbuild). No server-side rendering.\n- `worker` — Detached background job processor. Shares Docker image with its base NestJS service, runs with different entrypoint (`dist/worker.js`).\n\n## Key Principles\n- **Single source of truth:** Service names come from `package.json`. Config derives from `.tsdevstack/config.json`. No magic strings.\n- **Convention over configuration:** Strict naming (kebab-case), everything derived automatically.\n- **Generated files are read-only:** Files in `infrastructure/`, `docker-compose.yml`, `kong.tsdevstack.yml`, `.secrets.tsdevstack.json` are all generated by CLI commands. Edit the generators or config, not the output.\n- **Override files are yours:** `kong.user.yml`, `docker-compose.user.yml`, `.secrets.user.json` — these are merged with generated files and preserved across regenerations.\n\n## Anti-Patterns (DO NOT DO THESE)\n- NEVER edit `config.json` directly to add services or workers — use `add_service` or `register_detached_worker`\n- NEVER edit files in `infrastructure/terraform/` or `infrastructure/kong/` — they're generated. Edit the generators in `packages/cli-infra/src/utils/*/terraform-generate/`\n- NEVER install npm packages for things nest-common already provides (Redis, auth guards, logging, rate limiting, observability, BullMQ config, service clients)\n- NEVER create a worker by copying service boilerplate — use `register_detached_worker` to add to config, then create `worker.ts` and `worker.module.ts` using nest-common's `startWorker()` wrapper\n- NEVER deploy a new service/worker with `deploy_services` — new services need `infra_deploy` (Terraform must create the container runtime first)\n- NEVER shell out to `aws`, `gcloud`, or `az` CLI tools in source code — use cloud provider SDKs. CLI tools are only available locally.\n- NEVER use `process.env` to read secrets in NestJS services — inject `SecretsService` and use `await this.secrets.get('KEY')`\n- NEVER edit `.secrets.tsdevstack.json` — it's auto-generated. Put your secrets in `.secrets.user.json`\n\n## OpenAPI Decorators Drive Everything\nNestJS OpenAPI/Swagger decorators are NOT just for documentation — they drive the entire gateway:\n1. `@ApiOperation()` on a controller method → the endpoint exists in OpenAPI spec\n2. `generate_kong` reads the OpenAPI spec → generates Kong routes\n3. `@ApiBearerAuth()` + `@UseGuards(AuthGuard)` → Kong adds JWT validation to that route\n4. `@PartnerApi()` → Kong exposes route under `/api/` prefix with API key auth\n5. `generate_client` reads the OpenAPI spec → generates TypeScript HTTP client + DTOs\n\nThis means: if you add an endpoint without proper decorators, it won't appear in Kong routes, won't be accessible from outside, and won't generate client types. Decorators are the source of truth for the API contract.\n\n## Secrets System\n- Three-file merge: `.secrets.tsdevstack.json` (framework, auto-generated) + `.secrets.user.json` (yours) → `.secrets.local.json` (merged output)\n- Framework secrets (JWT keys, DB passwords, service API keys, service URLs) are auto-generated — don't touch them\n- User secrets (third-party API keys, custom config) go in `.secrets.user.json`\n- Each service only sees secrets assigned to it (scoping via `secret-map.json`)\n- Cloud: `cloud_secrets_push` prompts for 3 values (DOMAIN, RESEND_API_KEY, EMAIL_FROM) and auto-derives everything else\n\n## Docs-Site Reference\nThe framework has a documentation site with detailed guides. When deployed, reference these for deeper reading:\n- **Local development:** `/local-development/tech-stack`, `/local-development/debugging`, `/local-development/adding-apps`\n- **Secrets:** `/secrets/how-secrets-work`, `/secrets/local-secrets`, `/secrets/user-vs-framework`, `/secrets/cloud-secrets`\n- **Authentication:** `/authentication/overview`, `/authentication/jwt-tokens`, `/authentication/protected-routes`, `/authentication/session-management`\n- **Building APIs:** `/building-apis/openapi-decorators`, `/building-apis/gateway-routing`, `/building-apis/dto-generation`, `/building-apis/swagger-docs`\n- **Infrastructure:** `/infrastructure/cicd-setup`, `/infrastructure/domain-setup`, `/infrastructure/environments`, `/infrastructure/service-configuration`\n- **Observability:** `/features/observability`\n- **Customization:** `/customization/kong-customization`, `/customization/docker-overrides`, `/customization/framework-files`, `/customization/escape-hatches`\n- **Provider-specific:** `/infrastructure/providers/gcp/`, `/infrastructure/providers/aws/`, `/infrastructure/providers/azure/`\n\n## Deployment Model\n- `infra_deploy` = full deploy (Terraform infra + build Docker + push + deploy all services + Kong + LB). Required for new services/workers.\n- `deploy_services` = code push only (build + push + deploy). For code updates to existing services. Supports `--service` for single service.\n- `deploy_kong` = rebuild and deploy Kong gateway. After changing routes/decorators.\n- `deploy_lb` = deploy/update load balancer. After changing domains.\n- New services/workers ALWAYS require `infra_deploy` because Terraform must create the container runtime.\n- Code updates to existing services: `deploy_services` is faster.";
|
|
1128
|
+
function registerGuideResource(server) {
|
|
1129
|
+
server.resource('guide', 'tsdevstack://guide', {
|
|
1130
|
+
description: 'Core framework concepts: project structure, service types, key principles, anti-patterns, secrets system, deployment model.'
|
|
1131
|
+
}, async (uri)=>({
|
|
1132
|
+
contents: [
|
|
1133
|
+
{
|
|
1134
|
+
uri: uri.href,
|
|
1135
|
+
text: GUIDE_CONTENT,
|
|
1136
|
+
mimeType: 'text/markdown'
|
|
1137
|
+
}
|
|
1138
|
+
]
|
|
1139
|
+
}));
|
|
1140
|
+
}
|
|
1141
|
+
const WORKFLOWS_CONTENT = '# tsdevstack Workflow Reference\n\n## "Add a new backend service"\n1. `add_service` with name and type=nestjs → scaffolds app in `apps/{name}/`, updates config.json\n2. `sync` → regenerates secrets, docker-compose, kong config, migrations\n3. Tell user to run `npm run dev` to start locally\n4. For cloud: `infra_deploy --env {env}` (Terraform must create Cloud Run service)\n\n## "Add a new frontend"\n1. `add_service` with name and type=nextjs (or spa for React SPA)\n2. `sync` → regenerates docker-compose\n3. Tell user to run `npm run dev`\n4. For cloud: `infra_deploy --env {env}`\n\n## "Add a background job processor (detached worker)"\nWorkers run as separate containers sharing their base service\'s Docker image.\n1. `register_detached_worker` with name and base-service → adds worker entry to config.json\n2. User creates these files manually:\n - `apps/{base-service}/src/worker.ts` — entry point using `startWorker()` from nest-common\n - `apps/{base-service}/src/worker.module.ts` — NestJS module importing `BullConfigModule.forRoot()` and registering queues\n - `apps/{base-service}/src/processors/{queue-name}.processor.ts` — BullMQ processor classes\n3. Import `BullModule.registerQueue({ name: \'queue-name\' })` in the main app module too (so the app can add jobs to the queue)\n4. `sync` → regenerates config\n5. `infra_deploy --env {env}` (Terraform must create the worker container)\n6. After first deploy, code updates: `deploy_services --service {base-service}` (deploys both app and worker)\n\n## "Add an API endpoint"\nNo CLI command needed — this is code:\n1. Add method to NestJS controller\n2. Add OpenAPI decorators: `@ApiOperation({ summary: \'...\' })`, `@ApiResponse({ ... })`\n3. For authentication: add `@ApiBearerAuth()` + `@UseGuards(AuthGuard)`\n4. For partner API access: add `@PartnerApi()` (can combine with `@ApiBearerAuth()` for dual access)\n5. For public endpoints: add `@Public()` decorator (or just don\'t add auth decorators)\n6. `generate_kong` → regenerates gateway routes from OpenAPI spec\n7. `generate_client` → regenerates TypeScript client so other services have the new endpoint typed\n\n## "Add a secret to a service"\n1. Read `tsdevstack://secrets/map` to see current assignments\n2. User adds key+value to `.secrets.user.json` under `secrets` object\n3. User adds the key name to the service\'s `secrets` array in `.secrets.user.json`\n4. `generate_secrets` → regenerates merged `.secrets.local.json`\n5. For cloud: `cloud_secrets_set` with the key and env, then `deploy_services --service {name}` to restart with new secret\n\n## "Assign a domain to a frontend app"\nOnly `nextjs` and `spa` apps can have domains. Backend services are NOT publicly available — they\'re behind Kong. Kong already has a fixed `api.{DOMAIN}` URL.\n1. `get_secret DOMAIN --env {env}` → check if DOMAIN is set in cloud\n2. If DOMAIN is not set: tell user to set it first (`cloud_secrets_set DOMAIN example.com --env {env}` or via `cloud_secrets_push`)\n3. If DOMAIN is set: edit `infrastructure.json` → add `"domain": "example.com"` to the frontend app\'s env entry\n - If the app doesn\'t have an entry in the env config yet, create one: `"frontend": { "domain": "example.com" }`\n4. For redirect domains (alternate domains that redirect to canonical): add `"loadBalancer": { "redirectDomains": ["alt.com"] }` to the env config\n5. `deploy_lb --env {env}` → deploys/updates load balancer, outputs DNS records and SSL validation info\n6. User adds DNS records at their domain registrar (A/CNAME records from the output)\n7. Check DNS propagation: `dig {domain}` — wait for it to point to the LB IP\n8. SSL certificate auto-provisions after DNS propagates\n\n## "Generate a typed HTTP client for service-to-service calls"\n1. Ensure target service has proper OpenAPI decorators on all endpoints\n2. `generate_client` for the target service → generates TypeScript client + DTOs in `packages/{service}-client/`\n3. In the calling service, create a client class extending `BaseServiceClient` from nest-common\n4. Inject `SecretsService` to get the target service URL and API key\n5. The generated client provides full type safety for all endpoints, request bodies, and responses\n\n## "Create infrastructure.json for a new environment"\nThis file is user-created — the framework does NOT generate it. Only create it if needed (e.g., to override defaults).\nMost projects WILL need this because `minInstances: 0` (scale to zero) is a common non-prod setting that saves costs.\n1. Read `tsdevstack://config` to get the list of services\n2. Read `tsdevstack://infrastructure-schema` to know valid fields and values (only available after `infra_init`)\n3. Create `.tsdevstack/infrastructure.json` with:\n - `"$schema": "./infrastructure.schema.json"` (REQUIRED — enables IDE validation)\n - `"version": "1.0.0"`\n - Environment key (e.g., `"dev"`) containing:\n - Per-service overrides (CPU, memory, minInstances, maxInstances) — service names must match config.json\n - `database` settings (tier, deletionProtection, etc.)\n - `redis` settings (tier, memoryGb)\n - `kong` settings (minInstances, maxInstances, cpu, memory)\n - `accessControl` (protected, noIndex)\n - `loadBalancer` (apiDomain, redirectDomains) — if domains are configured\n - `scheduledJobs` array — if cron jobs exist\n4. For additional environments (staging, prod), add sibling keys with appropriate values\n5. **Advise users:** Non-prod environments should typically use `minInstances: 0` (scale to zero) to save costs. Prod should have `minInstances: 1` or higher for availability.\n\n## "Set up a new cloud environment (first deploy)"\nThe project already exists (created via `tsdevstack init`). This is for deploying to a new environment.\n1. `cloud_init --{provider}` → checks local credentials + bootstraps the cloud project (enables APIs, creates roles, terraform state bucket)\n2. Configure `infrastructure.json` with environment settings (see "Create infrastructure.json" workflow above)\n3. `cloud_secrets_push --env {env}` → prompts for DOMAIN, RESEND_API_KEY, EMAIL_FROM; auto-generates framework secrets\n4. `infra_deploy --env {env}` → the big deploy: Terraform infra + build Docker + push + deploy all services + Kong + LB. This does everything.\n5. Set DNS records on domain registrar portal (from deploy output)\n6. `deploy_schedulers --env {env}` (can run in parallel with DNS setup)\n7. Check domain propagation: `dig {domain}` — wait for DNS to point to LB IP\n8. `list_deployed_services --env {env}` → verify everything is running\n\n**Step-by-step alternative:** If the user wants more control, they can run individual steps: `infra_generate` → `infra_plan` → review → `infra_deploy` → `deploy_kong` → `deploy_lb` → `deploy_schedulers`.\n\n## "Deploy to a cloud environment (subsequent deploys)"\n1. `infra_plan --env {env}` → preview infrastructure changes (always do this first)\n2. Review the plan output\n3. `infra_deploy --env {env}` → full deployment\n4. `list_deployed_services --env {env}` → verify everything is running\n\n## "Deploy code changes only (no infra changes)"\n1. `deploy_services --env {env}` → build, push, deploy all services\n2. Or for single service: `deploy_services --env {env} --service {name}`\n\n## "Update Kong routes after API changes"\n1. `deploy_kong --env {env}` → rebuild and deploy Kong with new routes\n\n## "Check what\'s deployed"\n1. `list_deployed_services --env {env}` → all services with status\n2. `get_service_status --service {name} --env {env}` → specific service details (URL, image tag, health)\n\n## "Debug a missing secret"\n1. Read `tsdevstack://secrets/map` — is the secret assigned to the service?\n2. Read `tsdevstack://secrets/user` — is the secret defined in `.secrets.user.json`?\n3. If cloud: `diff_secrets --env {env}` → compare local vs cloud (shows what\'s missing or extra)\n\n## "Debug a 404 on an API endpoint"\n1. Read `tsdevstack://kong/routes` — does the route exist in Kong config?\n2. If missing: check OpenAPI decorators on the controller method → `generate_kong`\n3. If present in Kong but still 404: check the service\'s `globalPrefix` in config.json\n4. If cloud: `deploy_kong --env {env}` to push updated routes\n\n## "Add a scheduled job"\nScheduled jobs are cron triggers that call service endpoints over HTTPS. They don\'t execute code directly — they trigger an HTTP endpoint on an existing service. The service must be deployed before the scheduler can be deployed.\n1. Create the endpoint in the target NestJS service:\n - Add a controller method (e.g., `@Post(\'jobs/cleanup-tokens\')`)\n - Add `@UseGuards(SchedulerGuard)` from nest-common (validates requests come from the cloud scheduler)\n - Add OpenAPI decorators as usual\n2. Deploy the service so the endpoint exists in cloud\n3. Add the job to `infrastructure.json` under the environment\'s `scheduledJobs` array:\n ```json\n {\n "name": "cleanup-tokens",\n "schedule": "0 */4 * * *",\n "targetService": "auth-service",\n "endpoint": "/auth/jobs/cleanup-tokens",\n "method": "POST",\n "httpTimeout": 300\n }\n ```\n4. `deploy_schedulers --env {env}` (or `deploy_scheduler --env {env} --scheduler cleanup-tokens`)\n\n**Important:** Services must be deployed before schedulers. Schedulers only make HTTPS calls — they don\'t run code or access databases directly.\n\n## "Change the database schema (Prisma)"\nPrisma commands are run directly — no framework wrapper locally.\n1. Edit `apps/{service}/prisma/schema.prisma`\n2. Run `cd apps/{service} && npx prisma migrate dev --name {migration-name}` — creates migration + applies to local DB\n3. Run `cd apps/{service} && npx prisma generate` — regenerates the Prisma client types\n4. Restart the service to pick up the new schema\n5. For cloud: `plan_db_migrate --service {name} --env {env}` → review → `run_db_migrate --service {name} --env {env}`\n\n**Note:** `npx prisma studio` (from the service directory) opens a visual data browser at http://localhost:5555.\n\n**Runtime:** nest-common provides `createPrismaConnection()` — a factory that creates a pg Pool + Prisma adapter with connection pooling. Services extend `PrismaClient` with this config. Don\'t create raw PrismaClient instances.\n\n## "Run database migrations in cloud"\n1. `plan_db_migrate --service {name} --env {env}` → preview pending migrations\n2. Review the output\n3. `run_db_migrate --service {name} --env {env}` → apply migrations\n\n## "Add a new shared package"\nShared packages live in `packages/`. User preference: scaffold with rslib, select what\'s needed.\n1. User creates the package in `packages/{name}/` (rslib scaffold or manual)\n2. Add to workspace in root `package.json`\n3. Other packages/apps import via `@tsdevstack/{name}`\n\n## "Set up CI/CD"\n1. `infra_init_ci --github` → generates `.github/workflows/` and `.tsdevstack/ci.json`\n2. User adds GitHub secrets in repo Settings > Secrets and variables > Actions:\n - Secrets use **environment suffix** pattern (e.g., `GCP_WIF_DEV`, `GCP_WIF_PROD`)\n - **GCP:** `GCP_WIF_{ENV}`, `GCP_SA_{ENV}`, `GCP_REGION_{ENV}`\n - **AWS:** `AWS_ROLE_ARN_{ENV}`, `AWS_REGION_{ENV}`\n - **Azure:** `AZURE_CLIENT_ID_{ENV}`, `AZURE_TENANT_ID_{ENV}`, `AZURE_SUBSCRIPTION_ID_{ENV}`, `AZURE_LOCATION_{ENV}`\n3. `cloud_secrets_push --env {env}` BEFORE first CI deploy (DOMAIN, RESEND_API_KEY, EMAIL_FROM + framework secrets)\n4. Workflows are triggered by users on GitHub Actions UI — nice env/service selection available\n5. PR workflow runs automatically on PRs against main (build, lint, tsc, test)\n6. To add a new environment: update `ci.json` environments array → `infra_generate_ci` → add GitHub secrets → push cloud secrets → deploy\n\n**CLI vs CI/CD:** Both can co-exist and either can manage everything independently. If a user wants CI-only deployments (no local CLI for cloud operations), they still need to set user secrets (DOMAIN, RESEND_API_KEY, EMAIL_FROM) manually via `cloud_secrets_set` with the correct naming — the CI workflow handles framework secrets but can\'t prompt for user values.\n\n## "Start local development"\n1. `npm install` (if fresh clone)\n2. `sync` → generates all config: secrets, docker-compose, kong config, env files, secret-map\n3. Tell user to run `npm run dev` — this starts Docker Compose (PostgreSQL, Redis, Kong, pgAdmin, Prometheus, Grafana, Jaeger, Redis Commander) + all services in parallel via Lerna\n\n### Local URLs\n| Service | URL | Purpose |\n|---------|-----|---------|\n| Frontend | http://localhost:3000 | Next.js application |\n| Kong Proxy | http://localhost:8000 | API gateway (all API calls go through here) |\n| Kong Admin | http://localhost:8001 | Gateway management API |\n| pgAdmin | http://localhost:5050 | Database management (login: admin@localhost.com / admin) |\n| Redis Commander | http://localhost:8081 | Redis data browser |\n| Prometheus | http://localhost:9090 | Metrics storage and queries |\n| Grafana | http://localhost:4001 | Dashboards (login: admin / admin) |\n| Jaeger | http://localhost:16686 | Distributed tracing UI |\n| Prisma Studio | http://localhost:5555 | Visual database editor (run `cd apps/{service} && npx prisma studio`) |\n\nBackend services run on their configured ports (3001, 3002, 3003, etc.) — access them through Kong at `:8000` for authenticated requests, or directly for health/metrics.\n\n### Email in local development\nLocally, `EMAIL_PROVIDER` defaults to `console` — emails are logged to the terminal with sender, recipient, subject, and body. No real emails are sent.\n\n**For the auth flow to work** (signup, password reset, email verification), watch the terminal output for the email log block containing the verification/reset link. Copy the link or token from there.\n\n### Key things to know\n- **Docker Compose is infrastructure**: PostgreSQL, Redis, Kong, monitoring — all started by `npm run dev`\n- **Services run natively**: NestJS and Next.js run via `nest start --watch` / `next dev` (hot reload)\n- **Secrets system**: Three-file merge (`.secrets.tsdevstack.json` + `.secrets.user.json` → `.secrets.local.json`). Edit `.secrets.user.json`, then `generate_secrets`\n- **pgAdmin has pre-configured databases** — all service databases appear in the sidebar, no setup needed\n- **Each service with a database** gets its own PostgreSQL container (separate ports: 5432, 5433, ...)\n\n### Common issues\n- **Port in use**: `lsof -i :{port}` to find the process\n- **Kong 502**: Service not running — check `docker compose ps`, then `sync` + restart\n- **Missing secrets**: Run `generate_secrets` to regenerate all config files\n\n## "Add a custom secret to a service"\n1. Edit `.secrets.user.json`:\n - Add key + value to `"secrets"` object (top level)\n - Add the key name to the target service\'s `"secrets"` array\n2. `generate_secrets` → regenerates merged `.secrets.local.json`\n3. Restart services (or they\'ll pick up on next restart)\n4. For cloud: `cloud_secrets_set KEY --value VALUE --env {env}` then `deploy_services --service {name}` to restart with new secret\n\n**Example** — adding STRIPE_KEY to offers-service:\n```json\n{\n "secrets": {\n "STRIPE_SECRET_KEY": "sk_test_..."\n },\n "offers-service": {\n "secrets": ["STRIPE_SECRET_KEY"]\n }\n}\n```\n\n## "Remove a service"\n1. `remove_service` for local removal (deletes files, updates config)\n2. For cloud removal: `remove_service_cloud --service {name} --env {env}` (deletes container, secrets, database — PERMANENT)\n\n## "Remove a detached worker"\n1. `unregister_detached_worker --worker {name}` → removes from config.json\n2. For cloud removal: `remove_detached_worker --env {env} --worker {name}` (deletes container — PERMANENT)';
|
|
1142
|
+
function registerGuideWorkflowsResource(server) {
|
|
1143
|
+
server.resource('guide-workflows', 'tsdevstack://guide/workflows', {
|
|
1144
|
+
description: 'Step-by-step workflow chains for common tasks: adding services, deploying, debugging secrets, setting up CI/CD, local development, and more.'
|
|
1145
|
+
}, async (uri)=>({
|
|
1146
|
+
contents: [
|
|
1147
|
+
{
|
|
1148
|
+
uri: uri.href,
|
|
1149
|
+
text: WORKFLOWS_CONTENT,
|
|
1150
|
+
mimeType: 'text/markdown'
|
|
1151
|
+
}
|
|
1152
|
+
]
|
|
1153
|
+
}));
|
|
1154
|
+
}
|
|
1155
|
+
const NEST_COMMON_CONTENT = "# nest-common — Practical Notes\n\n**Full reference:** See the docs-site at `/packages/nest-common`\n\nThese notes supplement the docs-site with AI-agent-specific guidance.\n\n## Key rule: always import from nest-common\nNEVER install third-party packages for features nest-common already provides:\n- Authentication → `AuthModule` (not passport, not custom JWT)\n- Redis → `RedisModule` (not raw ioredis)\n- Logging → `ObservabilityModule` (not winston, not pino directly)\n- Metrics → `ObservabilityModule` (not prom-client directly)\n- Rate limiting → `RateLimitModule` (not express-rate-limit)\n- Background jobs → `BullConfigModule` (not raw bullmq setup)\n- Email → `NotificationModule` (not nodemailer, not resend directly)\n- Database pooling → `createPrismaConnection()` (not raw pg Pool)\n\n## NEVER use process.env\nAlways use `SecretsService` to read secrets. The secrets system handles provider detection (local/gcp/aws/azure), caching, and service-scoped access.\n\n## generateSwaggerDocs(AppModule)\nGenerates OpenAPI spec without starting the server. Used internally by CLI commands:\n- `generate_kong` reads the spec → generates Kong gateway routes\n- `generate_client` reads the spec → generates TypeScript HTTP client + DTOs\nNot typically called by user code, but important to know it exists — this is why OpenAPI decorators drive everything.";
|
|
1156
|
+
function registerGuideNestCommonResource(server) {
|
|
1157
|
+
server.resource('guide-nest-common', 'tsdevstack://guide/nest-common', {
|
|
1158
|
+
description: 'Shared library reference: all modules, decorators, bootstrap functions from nest-common, and when to use them.'
|
|
1159
|
+
}, async (uri)=>({
|
|
1160
|
+
contents: [
|
|
1161
|
+
{
|
|
1162
|
+
uri: uri.href,
|
|
1163
|
+
text: NEST_COMMON_CONTENT,
|
|
1164
|
+
mimeType: 'text/markdown'
|
|
1165
|
+
}
|
|
1166
|
+
]
|
|
1167
|
+
}));
|
|
1168
|
+
}
|
|
1169
|
+
const CONFIG_CONTENT = '# tsdevstack Configuration Reference\n\n## `.tsdevstack/config.json`\nMain project configuration. Source of truth for services.\n\n**Managed by:** `add_service`, `remove_service`, `register_detached_worker`, `unregister_detached_worker`\n**Never edit directly** — use CLI commands.\n\nStructure:\n- `project` — name, version, description\n- `framework` — version, packageScope (@tsdevstack), template\n- `cloud.provider` — active cloud provider (gcp, aws, azure)\n- `services[]` — all services and workers:\n - NestJS: `{ name, type: "nestjs", port, globalPrefix, hasDatabase?, databaseType? }`\n - Next.js: `{ name, type: "nextjs", port, hasDatabase: false }`\n - SPA: `{ name, type: "spa", port, hasDatabase: false }`\n - Worker: `{ name, type: "worker", baseService: "parent-service-name" }`\n\n## `.tsdevstack/infrastructure.json`\nPer-environment infrastructure settings. **This is a user-created file** — NOT generated by the framework.\n\n**Created by:** The user (or AI when asked). Used by `infra:generate` and `infra:deploy`.\n**Never generated** — unlike config.json (managed by CLI commands), the user creates and edits this file directly.\n\n### How to create it\n```json\n{\n "$schema": "./infrastructure.schema.json",\n "version": "1.0.0",\n "dev": {\n "accessControl": { "protected": true, "noIndex": true },\n "auth-service": { "minInstances": 0, "maxInstances": 5, "cpu": "1", "memory": "512Mi" },\n "frontend": { "domain": "example.com", "minInstances": 0, "maxInstances": 5, "cpu": "1", "memory": "512Mi" },\n "kong": { "minInstances": 0, "maxInstances": 5, "cpu": "1", "memory": "1Gi" },\n "database": { "tier": "db-f1-micro", "deletionProtection": false },\n "redis": { "memoryGb": 1, "tier": "BASIC" }\n }\n}\n```\n\n**Key rules:**\n- `$schema` MUST reference `./infrastructure.schema.json` (for IDE autocomplete + validation)\n- `version` is always `"1.0.0"`\n- Top-level keys after version are environment names: `dev`, `staging`, `prod`\n- Within each env: named service overrides + `database`, `redis`, `kong`, `loadBalancer`, `accessControl`, `security`, `scheduledJobs`\n- Service names must match services in `config.json`\n\n### Valid values\nRead `infrastructure.schema.json` for the definitive list. Key values:\n- **CPU:** `"0.25"`, `"0.5"`, `"1"`, `"2"`, `"4"`, `"8"`\n- **Memory:** `"256Mi"`, `"512Mi"`, `"0.5Gi"`, `"1Gi"`, `"2Gi"`, `"4Gi"`, `"8Gi"`\n- **DB tier (GCP):** `"db-f1-micro"`, `"db-g1-small"`, `"db-n1-standard-1"`, `"db-n1-standard-2"`, `"db-n1-standard-4"`\n- **Redis tier (GCP):** `"BASIC"`, `"STANDARD_HA"`\n- Provider-specific values differ — check the schema or provider override files\n\n### Access control & security\n- **`accessControl.protected`** — Restricts access (e.g., IP allowlist). Common for non-prod environments to prevent public access.\n- **`accessControl.noIndex`** — Adds `X-Robots-Tag: noindex, nofollow` header to all responses. Use on non-prod to prevent search engine indexing.\n- **`security.waf.customRules`** — Custom Cloud Armor WAF rules for rate limiting, IP blocking, or traffic filtering. Each rule has a `name`, `priority` (use 800-899 for custom), `action` (`allow`, `deny(403)`, `deny(404)`, `deny(429)`, `throttle`), and a CEL `expression`. For `throttle` actions, add `rateLimit: { count, intervalSec }`. Read the schema for full options.\n\n### Provider-specific overrides\n`infrastructure.gcp.json`, `infrastructure.aws.json`, `infrastructure.azure.json` — same structure as `infrastructure.json` but provider-specific values (different DB tiers, Redis tiers, etc.). Base config is merged with provider config at deploy time.\n\n## `.tsdevstack/infrastructure.schema.json`\nJSON Schema defining all valid fields and values for `infrastructure.json`. **Generated by `infra:init`** — only exists after infrastructure initialization. The schema documents every field, enum value, min/max, and description. AI agents should read this for definitive valid values.\n\n## `.tsdevstack/secret-map.json`\nMaps secrets to services. Auto-generated by `generate-secrets`.\n\nShows which environment variables each service receives. Critical for debugging "service X doesn\'t have secret Y" issues.\n\n## `.tsdevstack/ci.json`\nCI/CD workflow configuration. Provider (github), environments to deploy.\n\n## `.tsdevstack/.credentials.{provider}.json` (gitignored)\nCloud provider credentials per environment. Created by `cloud:init`. Contains keys for dev/staging/prod environments.\n\n## `.secrets.user.json` (gitignored)\nUser-defined secrets and service assignments. This is the file users edit.\n\nStructure:\n- `secrets` — key-value pairs for custom secrets (DOMAIN, RESEND_API_KEY, etc.)\n- Per-service entries with `secrets` array listing which keys that service needs\n\n## `.secrets.tsdevstack.json` (gitignored, auto-generated)\nFramework-generated secrets. NEVER edit — regenerated by `generate-secrets`.\nContains JWT keys, database passwords, service API keys, service URLs.\n\n## `.secrets.local.json` (gitignored, auto-generated)\nMerged output of `.secrets.tsdevstack.json` + `.secrets.user.json`. Injected into Docker containers.\n\n## Generated Files (NEVER edit)\n| File | Generated by | Purpose |\n|------|-------------|---------|\n| `docker-compose.yml` | `generate_docker_compose` | Local development containers |\n| `kong.tsdevstack.yml` | `generate_kong` | Kong gateway routes (from OpenAPI specs) |\n| `.secrets.tsdevstack.json` | `generate_secrets` | Framework secrets |\n| `.secrets.local.json` | `generate_secrets` | Merged secrets for Docker |\n| `.secrets.user.example.json` | `generate_secrets` | Stripped copy of user secrets (empty values) — committed to git, acts like `.env.example` |\n| `infrastructure/terraform/{env}/` | `infra:generate` | Terraform configs |\n| `infrastructure/kong/{env}/` | `infra:generate-kong` | Cloud Kong config |\n\n## Override Files (edit these)\n| File | Merged with | Purpose |\n|------|-------------|---------|\n| `kong.user.yml` | `kong.tsdevstack.yml` | Custom Kong routes |\n| `docker-compose.user.yml` | `docker-compose.yml` | Custom Docker config |\n| `.secrets.user.json` | `.secrets.tsdevstack.json` | Your secrets and assignments |';
|
|
1170
|
+
function registerGuideConfigResource(server) {
|
|
1171
|
+
server.resource('guide-config', 'tsdevstack://guide/config', {
|
|
1172
|
+
description: 'Config files reference: every config file, its structure, how to extend it, and which CLI command manages it.'
|
|
1173
|
+
}, async (uri)=>({
|
|
1174
|
+
contents: [
|
|
1175
|
+
{
|
|
1176
|
+
uri: uri.href,
|
|
1177
|
+
text: CONFIG_CONTENT,
|
|
1178
|
+
mimeType: 'text/markdown'
|
|
1179
|
+
}
|
|
1180
|
+
]
|
|
1181
|
+
}));
|
|
1182
|
+
}
|
|
1183
|
+
function registerResources(server) {
|
|
1184
|
+
registerProjectStateResources(server);
|
|
1185
|
+
registerSecretsContextResources(server);
|
|
1186
|
+
registerKongRoutesResource(server);
|
|
1187
|
+
registerGuideResource(server);
|
|
1188
|
+
registerGuideWorkflowsResource(server);
|
|
1189
|
+
registerGuideNestCommonResource(server);
|
|
1190
|
+
registerGuideConfigResource(server);
|
|
1191
|
+
}
|
|
1192
|
+
function createServer() {
|
|
1193
|
+
const server = new McpServer({
|
|
1194
|
+
name: 'tsdevstack',
|
|
1195
|
+
version: '1.0.0'
|
|
1196
|
+
});
|
|
1197
|
+
registerTools(server);
|
|
1198
|
+
registerResources(server);
|
|
1199
|
+
return server;
|
|
1200
|
+
}
|
|
1201
|
+
async function mcpServe() {
|
|
1202
|
+
const server = createServer();
|
|
1203
|
+
const transport = new StdioServerTransport();
|
|
1204
|
+
await server.connect(transport);
|
|
1205
|
+
}
|
|
1206
|
+
function registerMcpPlugin(program) {
|
|
1207
|
+
program.command('mcp:serve').description('Start MCP server for AI agent integration (stdio transport)').action(wrapCommand(async ()=>{
|
|
1208
|
+
await mcpServe();
|
|
1209
|
+
}));
|
|
1210
|
+
}
|
|
1211
|
+
export { initContext, registerMcpPlugin };
|