@dypai-ai/mcp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,19 @@
1
+ {
2
+ "name": "@dypai-ai/mcp",
3
+ "version": "1.0.0",
4
+ "description": "DYPAI MCP Server — AI agent toolkit for building and deploying full-stack apps",
5
+ "type": "module",
6
+ "bin": {
7
+ "dypai-mcp": "src/index.mjs"
8
+ },
9
+ "files": [
10
+ "src/"
11
+ ],
12
+ "keywords": ["dypai", "mcp", "ai", "deploy", "backend", "database", "api", "cloudflare-pages"],
13
+ "license": "MIT",
14
+ "repository": {
15
+ "type": "git",
16
+ "url": "https://github.com/DYPAI-SOLUTIONS/dypai-mcp"
17
+ },
18
+ "homepage": "https://dypai.ai"
19
+ }
package/src/api.mjs ADDED
@@ -0,0 +1,60 @@
1
+ /**
2
+ * DYPAI API Client — HTTP client for the DYPAI control plane.
3
+ * All MCP tools that need backend data go through here.
4
+ */
5
+
6
+ import https from "https"
7
+ import http from "http"
8
+
9
+ const DEFAULT_API_URL = "https://dyapi.dypai.dev"
10
+
11
+ function getConfig() {
12
+ return {
13
+ token: process.env.DYPAI_TOKEN || "",
14
+ apiUrl: process.env.DYPAI_API_URL || DEFAULT_API_URL,
15
+ }
16
+ }
17
+
18
+ export function request(method, path, body) {
19
+ const { token, apiUrl } = getConfig()
20
+ const url = `${apiUrl}${path}`
21
+
22
+ return new Promise((resolve, reject) => {
23
+ const parsed = new URL(url)
24
+ const mod = parsed.protocol === "https:" ? https : http
25
+ const data = body ? JSON.stringify(body) : null
26
+ const headers = {
27
+ Authorization: `Bearer ${token}`,
28
+ "Content-Type": "application/json",
29
+ }
30
+ if (data) headers["Content-Length"] = Buffer.byteLength(data)
31
+
32
+ const req = mod.request({
33
+ hostname: parsed.hostname,
34
+ port: parsed.port,
35
+ path: parsed.pathname + (parsed.search || ""),
36
+ method,
37
+ headers,
38
+ }, (res) => {
39
+ let buf = ""
40
+ res.on("data", (c) => (buf += c))
41
+ res.on("end", () => {
42
+ if (res.statusCode >= 200 && res.statusCode < 300) {
43
+ try { resolve(JSON.parse(buf)) } catch { resolve(buf) }
44
+ } else {
45
+ reject(new Error(`HTTP ${res.statusCode}: ${buf.slice(0, 500)}`))
46
+ }
47
+ })
48
+ })
49
+ req.on("error", reject)
50
+ if (data) req.write(data)
51
+ req.end()
52
+ })
53
+ }
54
+
55
+ export const api = {
56
+ get: (path) => request("GET", path),
57
+ post: (path, body) => request("POST", path, body),
58
+ patch: (path, body) => request("PATCH", path, body),
59
+ delete: (path) => request("DELETE", path),
60
+ }
package/src/index.mjs ADDED
@@ -0,0 +1,270 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * @dypai-ai/mcp — Local MCP server for DYPAI.
5
+ *
6
+ * Runs as a stdio MCP server in the user's IDE.
7
+ * Local tools (deploy, scaffold) have filesystem access.
8
+ * Backend tools (SQL, endpoints, etc.) proxy to the remote MCP server.
9
+ *
10
+ * Config in IDE:
11
+ * {
12
+ * "mcpServers": {
13
+ * "dypai": {
14
+ * "command": "npx",
15
+ * "args": ["-y", "@dypai-ai/mcp@latest"],
16
+ * "env": { "DYPAI_TOKEN": "xxx" }
17
+ * }
18
+ * }
19
+ * }
20
+ */
21
+
22
+ import { createInterface } from "readline"
23
+ import { deployTool } from "./tools/deploy.mjs"
24
+ import { scaffoldTool } from "./tools/scaffold.mjs"
25
+ import { addDomainTool, listDomainsTool, removeDomainTool } from "./tools/domains.mjs"
26
+ import { frontendStatusTool, buildStatusTool, listDeploymentsTool, getDeploymentLogsTool } from "./tools/status.mjs"
27
+ import { bulkUpsertTool } from "./tools/bulk-upsert.mjs"
28
+ import { proxyToolCall } from "./tools/proxy.mjs"
29
+
30
+ // ── Local tools (filesystem access) ─────────────────────────────────────────
31
+
32
+ const LOCAL_TOOLS = [
33
+ // ── Frontend & Deploy ─────────────────────────────────────────────────────
34
+ deployTool,
35
+ scaffoldTool,
36
+ buildStatusTool,
37
+ listDeploymentsTool,
38
+ getDeploymentLogsTool,
39
+ // ── Domains ───────────────────────────────────────────────────────────────
40
+ addDomainTool,
41
+ listDomainsTool,
42
+ removeDomainTool,
43
+ // ── Data ──────────────────────────────────────────────────────────────────
44
+ bulkUpsertTool,
45
+ ]
46
+
47
+ const localToolMap = new Map(LOCAL_TOOLS.map(t => [t.name, t]))
48
+
49
+ // ── Remote tools (loaded from MCP server at startup) ────────────────────────
50
+
51
+ // Fallback definitions in case remote MCP is unreachable
52
+ const FALLBACK_REMOTE_TOOLS = [
53
+ { name: "list_projects", description: "List all your projects.", inputSchema: { type: "object", properties: {}, required: [] } },
54
+ { name: "get_project", description: "Get project details.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: ["project_id"] } },
55
+ { name: "create_project", description: "Create a new project (free plan).", inputSchema: { type: "object", properties: { name: { type: "string" } }, required: ["name"] } },
56
+ { name: "execute_sql", description: "Run SQL on the project's database.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, sql: { type: "string" } }, required: ["sql"] } },
57
+ { name: "get_app_tables", description: "List database tables.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
58
+ { name: "create_endpoint", description: "Create an API endpoint.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string" }, method: { type: "string" }, workflow_code: { type: "object" } }, required: ["name", "method", "workflow_code"] } },
59
+ { name: "update_endpoint", description: "Update an endpoint.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string" } }, required: ["name"] } },
60
+ { name: "delete_endpoint", description: "Delete an endpoint.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string" } }, required: ["name"] } },
61
+ { name: "search_endpoints", description: "Search endpoints.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
62
+ { name: "test_workflow", description: "Test an endpoint.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_name: { type: "string" } }, required: ["endpoint_name"] } },
63
+ { name: "manage_users", description: "Manage auth users.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, operation: { type: "string" } }, required: ["operation"] } },
64
+ { name: "manage_roles", description: "Manage roles.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, operation: { type: "string" } }, required: ["operation"] } },
65
+ { name: "get_auth_users", description: "List users.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
66
+ { name: "list_buckets", description: "List storage buckets.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
67
+ { name: "search_docs", description: "Search documentation.", inputSchema: { type: "object", properties: { query: { type: "string" } }, required: ["query"] } },
68
+ { name: "search_templates", description: "Search workflow templates.", inputSchema: { type: "object", properties: { query: { type: "string" } }, required: ["query"] } },
69
+ { name: "search_nodes", description: "Search workflow nodes.", inputSchema: { type: "object", properties: { query: { type: "string" } }, required: ["query"] } },
70
+ ]
71
+
72
+ // Descriptions match the remote MCP server (dypai-mcp) exactly
73
+ const REMOTE_TOOLS = [
74
+ // ── Project ───────────────────────────────────────────────────────────────
75
+ { name: "list_projects", description: "Lists all projects you have access to across your organizations. Returns project id, name, description, organization, subscription plan, and status. Use this as the first step to discover which projects are available, then pass project_id to other tools.", inputSchema: { type: "object", properties: { organization_id: { type: "string", description: "Optional. Filter projects by organization UUID." } }, required: [] } },
76
+ { name: "get_project", description: "Gets detailed information about a specific project. Returns project name, description, organization, plan, status, engine URL, frontend slug, and timestamps.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: ["project_id"] } },
77
+ { name: "create_project", description: "Create a new DYPAI project (free plan). Creates a full project with database, API engine, GitHub repo, and frontend hosting. Provisioning takes ~1 minute.", inputSchema: { type: "object", properties: { name: { type: "string", description: "Project name (e.g. 'My Veterinary App')" }, organization_id: { type: "string", description: "Optional. Uses default org if omitted." }, description: { type: "string" }, template_slug: { type: "string", description: "Optional. Start from a template (e.g. 'clinic', 'gym'). Use search_templates to browse." } }, required: ["name"] } },
78
+ { name: "get_app_credentials", description: "Lists available credentials in the current application. Returns API keys, anon key, service role key, and engine URL needed for SDK configuration.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
79
+
80
+ // ── Database ──────────────────────────────────────────────────────────────
81
+ { name: "execute_sql", description: "Executes any SQL query on the project database (PostgreSQL). Supports SELECT, INSERT, UPDATE, DELETE, CREATE TABLE, ALTER TABLE, DROP TABLE. Platform schemas (system, auth, storage) are read-only for security.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, sql: { type: "string", description: "SQL query to execute" } }, required: ["sql"] } },
82
+ { name: "get_app_tables", description: "Query the project's database tables. Returns all tables with their columns, types, constraints, and indexes.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
83
+
84
+ // ── API Endpoints ─────────────────────────────────────────────────────────
85
+ { name: "create_endpoint", description: "Creates an endpoint as a workflow (nodes + edges). The workflow defines what the endpoint does: database queries, HTTP calls, AI agents, logic, transformations, etc.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string", description: "URL-friendly name (e.g. 'get-products')" }, method: { type: "string", description: "GET, POST, PUT, DELETE, PATCH" }, description: { type: "string" }, workflow_code: { type: "object", description: "Workflow definition with nodes and edges" } }, required: ["name", "method", "workflow_code"] } },
86
+ { name: "update_endpoint", description: "Updates fields of an existing endpoint. Only fields in 'updates' are modified — everything else stays the same.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string" }, workflow_code: { type: "object" }, description: { type: "string" }, method: { type: "string" } }, required: ["name"] } },
87
+ { name: "delete_endpoint", description: "Permanently deletes an endpoint. Irreversible operation.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, name: { type: "string" } }, required: ["name"] } },
88
+ { name: "search_endpoints", description: "Search endpoints or retrieve the full detail of one by ID. Returns name, method, description, group, workflow code.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, query: { type: "string" }, endpoint_id: { type: "string" } }, required: [] } },
89
+ { name: "test_workflow", description: "Test an endpoint's workflow with sample input data. Returns the execution result including node outputs and errors. Great for debugging.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_name: { type: "string" }, input: { type: "object", description: "Test input data" }, method: { type: "string" } }, required: ["endpoint_name"] } },
90
+ { name: "edit_workflow", description: "Edit a workflow using find-and-replace on the JSON text. Best for quick fixes — SQL queries, parameters, strings. Works like a code editor. USE THIS FOR MOST EDITS instead of rewriting the entire workflow.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_id: { type: "string" }, old_string: { type: "string", description: "Text to find in the workflow JSON" }, new_string: { type: "string", description: "Replacement text" } }, required: ["endpoint_id", "old_string", "new_string"] } },
91
+ { name: "add_node", description: "Add a new node to an existing workflow and connect it. Specify the node type, parameters, and where to insert it.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_id: { type: "string" }, node_type: { type: "string" }, parameters: { type: "object" }, after_node: { type: "string", description: "ID of the node to insert after" } }, required: ["endpoint_id", "node_type"] } },
92
+ { name: "remove_node", description: "Remove a node from a workflow. By default, reconnects surrounding nodes to maintain the flow.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_id: { type: "string" }, node_id: { type: "string" } }, required: ["endpoint_id", "node_id"] } },
93
+ { name: "get_endpoint_versions", description: "List version history for an endpoint. Shows previous workflow snapshots that can be restored with rollback_endpoint.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_name: { type: "string" } }, required: ["endpoint_name"] } },
94
+ { name: "rollback_endpoint", description: "Rollback an endpoint's workflow to a previous version. Use get_endpoint_versions first to see available versions.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, endpoint_name: { type: "string" }, version_id: { type: "string" } }, required: ["endpoint_name", "version_id"] } },
95
+ { name: "search_nodes", description: "Discover and query available nodes for building workflows. Shows what building blocks exist: HTTP requests, database, email, Stripe, Telegram, AI agent, etc.", inputSchema: { type: "object", properties: { query: { type: "string", description: "What you need (e.g. 'send email', 'stripe', 'database')" } }, required: ["query"] } },
96
+ { name: "manage_endpoint_groups", description: "Organize endpoints into groups (folders). Operations: create, list, update, delete.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, operation: { type: "string", description: "create, list, update, delete" } }, required: ["operation"] } },
97
+
98
+ // ── Auth & Users ──────────────────────────────────────────────────────────
99
+ { name: "manage_users", description: "Manage authenticated users. Operations: create, delete, ban, unban, update_role, set_password.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, operation: { type: "string", description: "create, delete, ban, unban, update_role, set_password" } }, required: ["operation"] } },
100
+ { name: "manage_roles", description: "Manage user roles. Operations: create, list, update, delete.", inputSchema: { type: "object", properties: { project_id: { type: "string" }, operation: { type: "string", description: "create, list, update, delete" } }, required: ["operation"] } },
101
+ { name: "get_auth_users", description: "List authenticated users with their email, role, status, and last login.", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
102
+
103
+ // ── Storage ───────────────────────────────────────────────────────────────
104
+ { name: "list_buckets", description: "Manage storage buckets for the project. List buckets with their configuration (public/private, file size limits, allowed MIME types).", inputSchema: { type: "object", properties: { project_id: { type: "string" } }, required: [] } },
105
+
106
+ // ── Knowledge ─────────────────────────────────────────────────────────────
107
+ { name: "search_docs", description: "Search DYPAI documentation. Use this when unsure about SDK usage, auth patterns, workflow nodes, or platform features. Returns relevant documentation chunks.", inputSchema: { type: "object", properties: { query: { type: "string", description: "What you want to learn about" } }, required: ["query"] } },
108
+ { name: "search_templates", description: "Search workflow templates by description. Returns ready-to-use workflow code for common patterns: CRUD operations, payment gateways, email sending, AI chatbots, data pipelines, etc.", inputSchema: { type: "object", properties: { query: { type: "string", description: "What the workflow should do (e.g. 'send email', 'stripe payment')" }, category: { type: "string", description: "Optional: AI, Database, Payments, Communication, Logic, Storage" } }, required: ["query"] } },
109
+ ]
110
+
111
+ // ── Server Instructions ──────────────────────────────────────────────────────
112
+
113
+ const SERVER_INSTRUCTIONS = `You are building full-stack applications on the DYPAI platform. You handle backend (endpoints, database, auth) AND frontend (SDK integration, UI code).
114
+
115
+ ## Getting Started
116
+ 1. Call list_projects() to find your project_id
117
+ 2. Use get_app_tables and search_endpoints to understand existing state
118
+ 3. BEFORE implementing any feature, call search_docs with the topic (e.g. "auth", "upload files", "real-time") to get the correct SDK patterns
119
+ 4. Build backend first (tables + endpoints), then frontend
120
+
121
+ ## Build Backend
122
+ 1. Create tables with execute_sql (check get_app_tables first to avoid duplicates)
123
+ 2. Create endpoints with create_endpoint using workflow_code
124
+ 3. Test with test_workflow immediately after creating/updating
125
+ 4. Use search_templates to find ready-made workflow patterns
126
+ 5. Use search_nodes to discover available node types
127
+ 6. Use search_docs when unsure about patterns
128
+
129
+ ## Build Frontend
130
+ - SDK client is already configured at src/lib/dypai.ts — just import it:
131
+ import { dypai } from './lib/dypai'
132
+ - API calls: dypai.api.get(name), dypai.api.post(name, body), dypai.api.put(), dypai.api.delete()
133
+ - Auth: dypai.auth.signInWithPassword(), signUp(), signOut(), getSession()
134
+ - Files: dypai.api.upload(name, file) — always upload from browser
135
+ - Every method returns { data, error } — never throws
136
+ - Do NOT call the API directly with fetch() — always use the SDK
137
+ - Do NOT create auth endpoints — auth is built-in via SDK (dypai.auth.*)
138
+
139
+ ## Deploy Frontend
140
+ - Use deploy_frontend tool with the project's source directory
141
+ - Cloudflare Pages builds automatically (Vite, Next.js, Astro, etc.)
142
+ - Site goes live at https://{slug}.dypai.app within ~1 minute
143
+ - Check build progress with get_build_status
144
+ - View build logs with get_deployment_logs if something fails
145
+
146
+ ## Import Data
147
+ - Use bulk_upsert to import CSV or JSON files into database tables
148
+ - Great for seeding initial data (products, categories, config)
149
+
150
+ ## Custom Domains
151
+ - Use add_domain to connect a user's own domain
152
+ - The user configures a CNAME record at their registrar
153
+ - SSL is automatic via Cloudflare
154
+
155
+ ## Endpoint Rules
156
+ - Auth modes: jwt (default, for users), api_key (server-to-server), public (read-only public data)
157
+ - NEVER use public for endpoints that write data
158
+ - Placeholders: \${input.<field>}, \${nodes.<node_id>.<field>}, \${current_user_id}
159
+ - \${current_user_id} only works with jwt auth mode
160
+ - Always test endpoints after creating them
161
+
162
+ ## Common Mistakes
163
+ - Do NOT create auth endpoints — auth is built-in via SDK (dypai.auth.*)
164
+ - Do NOT use fetch() directly — use the SDK (dypai.api.*)
165
+ - Do NOT assume endpoints exist — check with search_endpoints first
166
+ - Do NOT skip testing — always test_workflow after create/update
167
+ `
168
+
169
+ // ── MCP Protocol (JSON-RPC over stdio) ──────────────────────────────────────
170
+
171
+ const allTools = [...LOCAL_TOOLS, ...REMOTE_TOOLS]
172
+
173
+ function makeResponse(id, result) {
174
+ return JSON.stringify({ jsonrpc: "2.0", id, result })
175
+ }
176
+
177
+ function makeError(id, code, message) {
178
+ return JSON.stringify({ jsonrpc: "2.0", id, error: { code, message } })
179
+ }
180
+
181
+ async function handleRequest(msg) {
182
+ const { id, method, params } = msg
183
+
184
+ if (method === "initialize") {
185
+ return makeResponse(id, {
186
+ protocolVersion: "2024-11-05",
187
+ capabilities: { tools: {} },
188
+ serverInfo: { name: "dypai", version: "1.0.0" },
189
+ instructions: SERVER_INSTRUCTIONS,
190
+ })
191
+ }
192
+
193
+ if (method === "tools/list") {
194
+ return makeResponse(id, {
195
+ tools: allTools.map(t => ({
196
+ name: t.name,
197
+ description: t.description,
198
+ inputSchema: t.inputSchema,
199
+ })),
200
+ })
201
+ }
202
+
203
+ if (method === "tools/call") {
204
+ const { name, arguments: args } = params || {}
205
+
206
+ try {
207
+ let result
208
+
209
+ // Local tool — execute directly
210
+ if (localToolMap.has(name)) {
211
+ result = await localToolMap.get(name).execute(args || {})
212
+ }
213
+ // Remote tool — proxy to MCP server
214
+ else if (REMOTE_TOOLS.some(t => t.name === name)) {
215
+ result = await proxyToolCall(name, args || {})
216
+ }
217
+ else {
218
+ return makeError(id, -32601, `Unknown tool: ${name}`)
219
+ }
220
+
221
+ return makeResponse(id, {
222
+ content: [{ type: "text", text: typeof result === "string" ? result : JSON.stringify(result, null, 2) }],
223
+ })
224
+ } catch (e) {
225
+ return makeResponse(id, {
226
+ content: [{ type: "text", text: `Error: ${e.message}` }],
227
+ isError: true,
228
+ })
229
+ }
230
+ }
231
+
232
+ if (method === "notifications/initialized" || method === "notifications/cancelled") {
233
+ return null // notifications don't get responses
234
+ }
235
+
236
+ return makeError(id, -32601, `Method not found: ${method}`)
237
+ }
238
+
239
+ // ── Stdio Transport ─────────────────────────────────────────────────────────
240
+
241
+ const rl = createInterface({ input: process.stdin })
242
+ let buffer = ""
243
+
244
+ process.stdin.on("data", (chunk) => {
245
+ buffer += chunk.toString()
246
+
247
+ // MCP uses newline-delimited JSON
248
+ let newlineIdx
249
+ while ((newlineIdx = buffer.indexOf("\n")) !== -1) {
250
+ const line = buffer.slice(0, newlineIdx).trim()
251
+ buffer = buffer.slice(newlineIdx + 1)
252
+
253
+ if (!line) continue
254
+
255
+ try {
256
+ const msg = JSON.parse(line)
257
+ handleRequest(msg).then((response) => {
258
+ if (response) {
259
+ process.stdout.write(response + "\n")
260
+ }
261
+ }).catch((e) => {
262
+ process.stderr.write(`MCP error: ${e.message}\n`)
263
+ })
264
+ } catch (e) {
265
+ process.stderr.write(`JSON parse error: ${e.message}\n`)
266
+ }
267
+ }
268
+ })
269
+
270
+ process.stderr.write("DYPAI MCP server started (stdio)\n")
@@ -0,0 +1,155 @@
1
+ /**
2
+ * bulk_upsert — Import data from a local CSV or JSON file into a database table.
3
+ *
4
+ * Reads the file from disk (like deploy_frontend), sends to the API.
5
+ * Supports upsert: if a row with the same unique key exists, it updates instead of inserting.
6
+ *
7
+ * Great for seeding initial data (products, categories, users, etc.).
8
+ */
9
+
10
+ import { readFileSync, existsSync } from "fs"
11
+ import { basename } from "path"
12
+ import { proxyToolCall } from "./proxy.mjs"
13
+
14
+ export const bulkUpsertTool = {
15
+ name: "bulk_upsert",
16
+ description: `Import data from a local CSV or JSON file into a database table.
17
+
18
+ Reads the file from disk and bulk inserts/updates rows. Supports:
19
+ - CSV files (comma-separated, with header row)
20
+ - JSON files (array of objects)
21
+
22
+ If upsert_key is provided, existing rows with matching key are updated instead of duplicated.
23
+ NOTE: upsert_key requires a UNIQUE constraint on that column. Create one first with execute_sql if needed:
24
+ ALTER TABLE products ADD CONSTRAINT products_name_unique UNIQUE (name);
25
+
26
+ Great for seeding initial data: products, categories, config, etc.`,
27
+
28
+ inputSchema: {
29
+ type: "object",
30
+ properties: {
31
+ project_id: {
32
+ type: "string",
33
+ description: "Project UUID.",
34
+ },
35
+ file_path: {
36
+ type: "string",
37
+ description: "Absolute path to CSV or JSON file (e.g. /Users/me/data/products.csv)",
38
+ },
39
+ table: {
40
+ type: "string",
41
+ description: "Target table name (e.g. 'products', 'categories')",
42
+ },
43
+ upsert_key: {
44
+ type: "string",
45
+ description: "Optional. Column name for upsert (e.g. 'email', 'slug'). If a row with this value exists, it updates instead of inserting.",
46
+ },
47
+ },
48
+ required: ["project_id", "file_path", "table"],
49
+ },
50
+
51
+ async execute({ project_id, file_path, table, upsert_key }) {
52
+ if (!existsSync(file_path)) {
53
+ return { error: `File not found: ${file_path}` }
54
+ }
55
+
56
+ const fileName = basename(file_path)
57
+ const ext = fileName.split(".").pop()?.toLowerCase()
58
+
59
+ if (!["csv", "json"].includes(ext)) {
60
+ return { error: "File must be .csv or .json" }
61
+ }
62
+
63
+ // Read and parse data
64
+ let rows
65
+ try {
66
+ const raw = readFileSync(file_path, "utf-8")
67
+
68
+ if (ext === "json") {
69
+ const parsed = JSON.parse(raw)
70
+ rows = Array.isArray(parsed) ? parsed : [parsed]
71
+ } else {
72
+ // Parse CSV
73
+ const lines = raw.trim().split("\n")
74
+ if (lines.length < 2) return { error: "CSV must have a header row and at least one data row" }
75
+
76
+ const headers = lines[0].split(",").map(h => h.trim().replace(/^"|"$/g, ""))
77
+ rows = lines.slice(1).map(line => {
78
+ const values = line.split(",").map(v => v.trim().replace(/^"|"$/g, ""))
79
+ const obj = {}
80
+ headers.forEach((h, i) => { obj[h] = values[i] || null })
81
+ return obj
82
+ })
83
+ }
84
+ } catch (e) {
85
+ return { error: `Failed to parse file: ${e.message}` }
86
+ }
87
+
88
+ if (!rows || !rows.length) {
89
+ return { error: "No data rows found in file" }
90
+ }
91
+
92
+ // Build SQL for bulk insert
93
+ const columns = Object.keys(rows[0])
94
+ const placeholders = rows.map((_, rowIdx) =>
95
+ `(${columns.map((_, colIdx) => `$${rowIdx * columns.length + colIdx + 1}`).join(", ")})`
96
+ ).join(",\n")
97
+
98
+ const values = rows.flatMap(row => columns.map(col => row[col]))
99
+
100
+ let sql
101
+ if (upsert_key && columns.includes(upsert_key)) {
102
+ const updateCols = columns.filter(c => c !== upsert_key).map(c => `${c} = EXCLUDED.${c}`).join(", ")
103
+ sql = `INSERT INTO ${table} (${columns.join(", ")}) VALUES ${placeholders} ON CONFLICT (${upsert_key}) DO UPDATE SET ${updateCols}`
104
+ } else {
105
+ sql = `INSERT INTO ${table} (${columns.join(", ")}) VALUES ${placeholders}`
106
+ }
107
+
108
+ // Execute via the MCP remote (execute_sql doesn't support parameterized, so we inline values)
109
+ // For safety, use individual INSERTs via execute_sql
110
+ let inserted = 0
111
+ let errors = []
112
+
113
+ // Batch in chunks of 50 rows
114
+ const BATCH_SIZE = 50
115
+ for (let i = 0; i < rows.length; i += BATCH_SIZE) {
116
+ const batch = rows.slice(i, i + BATCH_SIZE)
117
+ const batchCols = Object.keys(batch[0])
118
+ const batchValues = batch.map(row =>
119
+ `(${batchCols.map(col => {
120
+ const v = row[col]
121
+ if (v === null || v === undefined || v === "") return "NULL"
122
+ if (typeof v === "number" || !isNaN(Number(v))) return String(v)
123
+ return `'${String(v).replace(/'/g, "''")}'`
124
+ }).join(", ")})`
125
+ ).join(",\n")
126
+
127
+ let batchSql
128
+ if (upsert_key && batchCols.includes(upsert_key)) {
129
+ const updateCols = batchCols.filter(c => c !== upsert_key).map(c => `${c} = EXCLUDED.${c}`).join(", ")
130
+ batchSql = `INSERT INTO ${table} (${batchCols.join(", ")}) VALUES\n${batchValues}\nON CONFLICT (${upsert_key}) DO UPDATE SET ${updateCols}`
131
+ } else {
132
+ batchSql = `INSERT INTO ${table} (${batchCols.join(", ")}) VALUES\n${batchValues}`
133
+ }
134
+
135
+ try {
136
+ const result = await proxyToolCall("execute_sql", { project_id, sql: batchSql })
137
+ if (result?.error) throw new Error(result.error)
138
+ inserted += batch.length
139
+ } catch (e) {
140
+ errors.push(`Batch ${Math.floor(i / BATCH_SIZE) + 1}: ${e.message.slice(0, 100)}`)
141
+ }
142
+ }
143
+
144
+ return {
145
+ success: errors.length === 0,
146
+ table,
147
+ rows_total: rows.length,
148
+ rows_inserted: inserted,
149
+ rows_failed: rows.length - inserted,
150
+ upsert_key: upsert_key || null,
151
+ errors: errors.length > 0 ? errors : undefined,
152
+ message: `${inserted} of ${rows.length} rows imported into "${table}"${upsert_key ? ` (upsert on ${upsert_key})` : ""}.`,
153
+ }
154
+ },
155
+ }
@@ -0,0 +1,133 @@
1
+ /**
2
+ * deploy_frontend — Reads source files from disk and deploys to DYPAI.
3
+ *
4
+ * This is the key tool that requires local filesystem access.
5
+ * The AI passes sourceDirectory, this tool reads all source files,
6
+ * sends them to the API, which commits to GitHub → CF Pages builds.
7
+ */
8
+
9
+ import { readFileSync, readdirSync, statSync, existsSync } from "fs"
10
+ import { join, relative } from "path"
11
+ import { api } from "../api.mjs"
12
+
13
+ const MAX_SOURCE_SIZE = 50 * 1024 * 1024
14
+ const IGNORE_DIRS = new Set(["node_modules", "dist", "build", "out", ".git", ".next", ".cache", ".turbo", ".vercel", ".output", "coverage"])
15
+ const SOURCE_EXTS = new Set([".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs", ".css", ".scss", ".less", ".html", ".json", ".toml", ".yaml", ".yml", ".md", ".mdx", ".txt", ".svg", ".ico", ".webmanifest"])
16
+ const IMAGE_EXTS = new Set([".png", ".jpg", ".jpeg", ".gif", ".webp", ".avif", ".ico", ".svg"])
17
+ const BLOCKED = new Set([".env", ".env.local", ".env.production", ".env.development", ".DS_Store", "Thumbs.db"])
18
+ const MAX_IMAGE = 2 * 1024 * 1024
19
+
20
+ function collectSource(dir) {
21
+ const files = []
22
+ let total = 0
23
+
24
+ function walk(d, rel) {
25
+ if (total > MAX_SOURCE_SIZE) return
26
+ for (const entry of readdirSync(d)) {
27
+ if (IGNORE_DIRS.has(entry)) continue
28
+ if (entry.startsWith(".") && statSync(join(d, entry)).isDirectory()) continue
29
+ if (BLOCKED.has(entry)) continue
30
+ const full = join(d, entry)
31
+ const path = rel ? `${rel}/${entry}` : entry
32
+ try {
33
+ const stat = statSync(full)
34
+ if (stat.isDirectory()) { walk(full, path); continue }
35
+ if (!stat.isFile()) continue
36
+ const ext = entry.includes(".") ? `.${entry.split(".").pop().toLowerCase()}` : ""
37
+ let ok = SOURCE_EXTS.has(ext)
38
+ if (!rel && /^(package\.json|package-lock\.json|bun\.lockb|vite\.config|tsconfig|tailwind\.config|postcss\.config|next\.config|astro\.config|nuxt\.config)/.test(entry)) ok = true
39
+ if (IMAGE_EXTS.has(ext) && stat.size <= MAX_IMAGE) ok = true
40
+ if (ok) {
41
+ const content = readFileSync(full)
42
+ if (total + content.length > MAX_SOURCE_SIZE) return
43
+ total += content.length
44
+ files.push({ path, content: content.toString("base64") })
45
+ }
46
+ } catch {}
47
+ }
48
+ }
49
+
50
+ walk(dir, "")
51
+ return { files, total }
52
+ }
53
+
54
+ function detectFramework(dir) {
55
+ const pkgPath = join(dir, "package.json")
56
+ if (!existsSync(pkgPath)) return null
57
+ try {
58
+ const pkg = JSON.parse(readFileSync(pkgPath, "utf-8"))
59
+ const deps = { ...pkg.dependencies, ...pkg.devDependencies }
60
+ if (deps["next"]) return "Next.js"
61
+ if (deps["astro"]) return "Astro"
62
+ if (deps["nuxt"]) return "Nuxt"
63
+ if (deps["@sveltejs/kit"]) return "SvelteKit"
64
+ if (deps["vite"]) return "Vite"
65
+ if (deps["react-scripts"]) return "Create React App"
66
+ if (deps["react"]) return "React"
67
+ return "Node.js"
68
+ } catch { return null }
69
+ }
70
+
71
+ export const deployTool = {
72
+ name: "deploy_frontend",
73
+ description: `Deploy frontend source code from a local directory.
74
+
75
+ Reads all source files from the specified directory, uploads them to DYPAI,
76
+ and Cloudflare Pages automatically builds and deploys the project.
77
+
78
+ Supports: React, Vite, Next.js, Astro, SvelteKit, Nuxt, CRA, and more.
79
+ The build runs in the cloud — no local Node.js or npm required.
80
+
81
+ After deploying, the site is live at https://{slug}.dypai.app within ~1 minute.`,
82
+
83
+ inputSchema: {
84
+ type: "object",
85
+ properties: {
86
+ sourceDirectory: {
87
+ type: "string",
88
+ description: "Absolute path to the project source directory (e.g. /Users/me/my-app). Must contain a package.json.",
89
+ },
90
+ project_id: {
91
+ type: "string",
92
+ description: "Project UUID. Required.",
93
+ },
94
+ },
95
+ required: ["sourceDirectory", "project_id"],
96
+ },
97
+
98
+ async execute({ sourceDirectory, project_id }) {
99
+ if (!existsSync(sourceDirectory)) {
100
+ return { error: `Directory not found: ${sourceDirectory}` }
101
+ }
102
+
103
+ if (!existsSync(join(sourceDirectory, "package.json"))) {
104
+ return { error: `No package.json found in ${sourceDirectory}. Is this a frontend project?` }
105
+ }
106
+
107
+ const framework = detectFramework(sourceDirectory)
108
+ const { files, total } = collectSource(sourceDirectory)
109
+
110
+ if (!files.length) {
111
+ return { error: "No source files found." }
112
+ }
113
+
114
+ try {
115
+ const result = await api.post(
116
+ `/api/engine/${project_id}/frontend/deploy/source`,
117
+ { files }
118
+ )
119
+
120
+ return {
121
+ success: true,
122
+ url: result.url,
123
+ files_pushed: files.length,
124
+ size_bytes: total,
125
+ framework: framework,
126
+ build: "cloudflare_pages",
127
+ message: `Deployed ${files.length} files (${Math.round(total / 1024)} KB). ${framework || "Project"} building in the cloud. Live in ~1 minute at ${result.url}`,
128
+ }
129
+ } catch (e) {
130
+ return { error: `Deploy failed: ${e.message}` }
131
+ }
132
+ },
133
+ }
@@ -0,0 +1,73 @@
1
+ /**
2
+ * Domain management tools — add, list, remove custom domains.
3
+ */
4
+
5
+ import { api } from "../api.mjs"
6
+
7
+ export const addDomainTool = {
8
+ name: "add_domain",
9
+ description: `Add a custom domain to your project's frontend.
10
+
11
+ Returns DNS instructions (CNAME record) that the user needs to configure
12
+ at their domain registrar. SSL is automatic once DNS propagates.`,
13
+
14
+ inputSchema: {
15
+ type: "object",
16
+ properties: {
17
+ project_id: { type: "string", description: "Project UUID." },
18
+ domain: { type: "string", description: "Domain to add (e.g. www.example.com)" },
19
+ },
20
+ required: ["project_id", "domain"],
21
+ },
22
+
23
+ async execute({ project_id, domain }) {
24
+ try {
25
+ return await api.post(`/api/engine/${project_id}/frontend/domains`, { domain })
26
+ } catch (e) {
27
+ return { error: e.message }
28
+ }
29
+ },
30
+ }
31
+
32
+ export const listDomainsTool = {
33
+ name: "list_domains",
34
+ description: "List all custom domains configured for the project's frontend.",
35
+
36
+ inputSchema: {
37
+ type: "object",
38
+ properties: {
39
+ project_id: { type: "string", description: "Project UUID." },
40
+ },
41
+ required: ["project_id"],
42
+ },
43
+
44
+ async execute({ project_id }) {
45
+ try {
46
+ return await api.get(`/api/engine/${project_id}/frontend/domains`)
47
+ } catch (e) {
48
+ return { error: e.message }
49
+ }
50
+ },
51
+ }
52
+
53
+ export const removeDomainTool = {
54
+ name: "remove_domain",
55
+ description: "Remove a custom domain from the project's frontend.",
56
+
57
+ inputSchema: {
58
+ type: "object",
59
+ properties: {
60
+ project_id: { type: "string", description: "Project UUID." },
61
+ domain: { type: "string", description: "Domain to remove." },
62
+ },
63
+ required: ["project_id", "domain"],
64
+ },
65
+
66
+ async execute({ project_id, domain }) {
67
+ try {
68
+ return await api.delete(`/api/engine/${project_id}/frontend/domains/${domain}`)
69
+ } catch (e) {
70
+ return { error: e.message }
71
+ }
72
+ },
73
+ }
@@ -0,0 +1,134 @@
1
+ /**
2
+ * Proxy to remote DYPAI MCP server.
3
+ *
4
+ * Connects to mcp.dypai.dev as an MCP client (HTTP streamable transport).
5
+ * Forwards tool calls and returns results.
6
+ *
7
+ * This preserves all the server-side logic (SQL validation, DB connections,
8
+ * security checks, tracking) without reimplementing it locally.
9
+ */
10
+
11
+ import https from "https"
12
+ import http from "http"
13
+
14
+ const MCP_BASE = process.env.DYPAI_MCP_URL || "https://mcp.dypai.dev"
15
+ const MCP_ENDPOINT = `${MCP_BASE}/mcp`
16
+
17
+ let sessionId = null
18
+
19
+ function mcpRequest(body) {
20
+ const token = process.env.DYPAI_TOKEN || ""
21
+
22
+ return new Promise((resolve, reject) => {
23
+ const parsed = new URL(MCP_ENDPOINT)
24
+ const mod = parsed.protocol === "https:" ? https : http
25
+ const data = JSON.stringify(body)
26
+
27
+ const headers = {
28
+ "Content-Type": "application/json",
29
+ "Content-Length": Buffer.byteLength(data),
30
+ Authorization: `Bearer ${token}`,
31
+ Accept: "application/json, text/event-stream",
32
+ }
33
+ if (sessionId) {
34
+ headers["Mcp-Session-Id"] = sessionId
35
+ }
36
+
37
+ const req = mod.request({
38
+ hostname: parsed.hostname,
39
+ port: parsed.port,
40
+ path: parsed.pathname,
41
+ method: "POST",
42
+ headers,
43
+ }, (res) => {
44
+ // Capture session ID from response
45
+ const newSession = res.headers["mcp-session-id"]
46
+ if (newSession) sessionId = newSession
47
+
48
+ let buf = ""
49
+ res.on("data", (c) => (buf += c))
50
+ res.on("end", () => {
51
+ if (res.statusCode >= 200 && res.statusCode < 300) {
52
+ // Handle SSE responses (text/event-stream)
53
+ if (buf.includes("data: ")) {
54
+ const lines = buf.split("\n").filter(l => l.startsWith("data: "))
55
+ const lastData = lines[lines.length - 1]
56
+ if (lastData) {
57
+ try {
58
+ resolve(JSON.parse(lastData.replace("data: ", "")))
59
+ } catch {
60
+ resolve({ result: buf })
61
+ }
62
+ } else {
63
+ resolve({ result: buf })
64
+ }
65
+ } else {
66
+ try { resolve(JSON.parse(buf)) } catch { resolve({ result: buf }) }
67
+ }
68
+ } else {
69
+ reject(new Error(`MCP remote error ${res.statusCode}: ${buf.slice(0, 300)}`))
70
+ }
71
+ })
72
+ })
73
+ req.on("error", reject)
74
+ req.write(data)
75
+ req.end()
76
+ })
77
+ }
78
+
79
+ async function ensureInitialized() {
80
+ if (sessionId) return
81
+ try {
82
+ await mcpRequest({
83
+ jsonrpc: "2.0",
84
+ id: "init-proxy",
85
+ method: "initialize",
86
+ params: {
87
+ protocolVersion: "2024-11-05",
88
+ capabilities: {},
89
+ clientInfo: { name: "dypai-mcp-local", version: "1.0.0" },
90
+ },
91
+ })
92
+ // Send initialized notification
93
+ await mcpRequest({
94
+ jsonrpc: "2.0",
95
+ method: "notifications/initialized",
96
+ })
97
+ } catch (e) {
98
+ // Non-fatal — some servers don't require init
99
+ process.stderr.write(`MCP proxy init warning: ${e.message}\n`)
100
+ }
101
+ }
102
+
103
+ export async function proxyToolCall(toolName, args) {
104
+ await ensureInitialized()
105
+
106
+ const response = await mcpRequest({
107
+ jsonrpc: "2.0",
108
+ id: `proxy-${Date.now()}`,
109
+ method: "tools/call",
110
+ params: {
111
+ name: toolName,
112
+ arguments: args || {},
113
+ },
114
+ })
115
+
116
+ // Extract result from JSON-RPC response
117
+ if (response.result) {
118
+ const content = response.result.content
119
+ if (Array.isArray(content) && content[0]?.text) {
120
+ try {
121
+ return JSON.parse(content[0].text)
122
+ } catch {
123
+ return content[0].text
124
+ }
125
+ }
126
+ return response.result
127
+ }
128
+
129
+ if (response.error) {
130
+ throw new Error(response.error.message || "Remote tool call failed")
131
+ }
132
+
133
+ return response
134
+ }
@@ -0,0 +1,114 @@
1
+ /**
2
+ * download_template — Creates a new project from a DYPAI template.
3
+ *
4
+ * Creates a new project directory on disk with
5
+ * SDK pre-configured, MCP ready, and .env set up.
6
+ */
7
+
8
+ import { writeFileSync, mkdirSync, existsSync } from "fs"
9
+ import { join } from "path"
10
+ import { api } from "../api.mjs"
11
+
12
+ export const scaffoldTool = {
13
+ name: "download_template",
14
+ description: `Create a new frontend project from a DYPAI template.
15
+
16
+ Scaffolds a project directory with:
17
+ - React + Vite + TypeScript
18
+ - DYPAI SDK pre-configured
19
+ - MCP config for your IDE
20
+ - .env with engine URL
21
+
22
+ Use search_templates first to find available templates, then pass the template slug here.
23
+ Or use "blank" for an empty starter project.`,
24
+
25
+ inputSchema: {
26
+ type: "object",
27
+ properties: {
28
+ directory: {
29
+ type: "string",
30
+ description: "Absolute path where the project should be created (e.g. /Users/me/my-new-app)",
31
+ },
32
+ project_id: {
33
+ type: "string",
34
+ description: "Project UUID to connect the template to.",
35
+ },
36
+ template: {
37
+ type: "string",
38
+ description: 'Template slug (e.g. "clinic", "gym", "blank"). Use search_templates to find available templates.',
39
+ default: "blank",
40
+ },
41
+ },
42
+ required: ["directory", "project_id"],
43
+ },
44
+
45
+ async execute({ directory, project_id, template = "blank" }) {
46
+ if (existsSync(join(directory, "package.json"))) {
47
+ return { error: `Directory already has a package.json. Pick an empty directory or a new name.` }
48
+ }
49
+
50
+ const token = process.env.DYPAI_TOKEN || ""
51
+ const mcpUrl = process.env.DYPAI_MCP_URL || "https://mcp.dypai.dev/mcp"
52
+ const engineUrl = `https://${project_id}.dypai.dev`
53
+
54
+ // Try to download template from API
55
+ let files = []
56
+ try {
57
+ const res = await api.get(`/api/engine/${project_id}/frontend/template?slug=${template}`)
58
+ if (res.files) files = res.files
59
+ } catch {}
60
+
61
+ // Fallback: create basic Vite starter
62
+ if (!files.length) {
63
+ files = [
64
+ { path: "package.json", content: JSON.stringify({
65
+ name: directory.split("/").pop() || "my-app",
66
+ private: true, version: "0.0.1", type: "module",
67
+ scripts: { dev: "vite", build: "vite build", preview: "vite preview" },
68
+ dependencies: { "@dypai-ai/client-sdk": "latest", react: "^19.0.0", "react-dom": "^19.0.0" },
69
+ devDependencies: { "@vitejs/plugin-react": "^4.3.0", vite: "^6.0.0", typescript: "^5.6.0", "@types/react": "^19.0.0", "@types/react-dom": "^19.0.0" },
70
+ }, null, 2) },
71
+ { path: "vite.config.ts", content: `import { defineConfig } from 'vite'\nimport react from '@vitejs/plugin-react'\n\nexport default defineConfig({ plugins: [react()] })\n` },
72
+ { path: "tsconfig.json", content: JSON.stringify({ compilerOptions: { target: "ES2020", lib: ["ES2020","DOM","DOM.Iterable"], module: "ESNext", skipLibCheck: true, moduleResolution: "bundler", allowImportingTsExtensions: true, isolatedModules: true, noEmit: true, jsx: "react-jsx", strict: true }, include: ["src"] }, null, 2) },
73
+ { path: "index.html", content: `<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"/><meta name="viewport" content="width=device-width,initial-scale=1.0"/><title>My App</title></head><body><div id="root"></div><script type="module" src="/src/main.tsx"></script></body></html>` },
74
+ { path: "src/main.tsx", content: `import { StrictMode } from 'react'\nimport { createRoot } from 'react-dom/client'\nimport App from './App'\n\ncreateRoot(document.getElementById('root')!).render(<StrictMode><App /></StrictMode>)\n` },
75
+ { path: "src/App.tsx", content: `export default function App() {\n return (\n <div style={{ fontFamily: 'system-ui', maxWidth: 600, margin: '4rem auto', padding: '0 1rem' }}>\n <h1>My DYPAI App</h1>\n <p>Start building with your AI agent!</p>\n </div>\n )\n}\n` },
76
+ ]
77
+ }
78
+
79
+ // .env with engine URL
80
+ files.push({ path: ".env", content: `VITE_DYPAI_URL=${engineUrl}\nVITE_PROJECT_ID=${project_id}\n` })
81
+
82
+ // SDK client helper (lib/dypai.ts)
83
+ files.push({ path: "src/lib/dypai.ts", content: `import { createClient } from "@dypai-ai/client-sdk";\n\nexport const dypai = createClient(import.meta.env.VITE_DYPAI_URL);\n` })
84
+
85
+ // Write files to disk
86
+ let created = 0
87
+ for (const file of files) {
88
+ const fullPath = join(directory, file.path)
89
+ const dir = fullPath.substring(0, fullPath.lastIndexOf("/"))
90
+ mkdirSync(dir, { recursive: true })
91
+ writeFileSync(fullPath, file.content)
92
+ created++
93
+ }
94
+
95
+ // Try to run npm install (best-effort, works if Node is available)
96
+ let installed = false
97
+ try {
98
+ const { execSync } = await import("child_process")
99
+ execSync("npm install", { cwd: directory, stdio: "pipe", timeout: 60000 })
100
+ installed = true
101
+ } catch {}
102
+
103
+ return {
104
+ success: true,
105
+ directory,
106
+ files_created: created,
107
+ dependencies_installed: installed,
108
+ template: template || "blank",
109
+ engine_url: engineUrl,
110
+ sdk_client: "src/lib/dypai.ts",
111
+ message: `Project created at ${directory} with ${created} files.${installed ? " Dependencies installed." : " Run 'npm install' to install dependencies."} SDK client ready at src/lib/dypai.ts — import { dypai } from './lib/dypai' to use.`,
112
+ }
113
+ },
114
+ }
@@ -0,0 +1,94 @@
1
+ /**
2
+ * Frontend status + build status tools.
3
+ */
4
+
5
+ import { api } from "../api.mjs"
6
+
7
+ export const listDeploymentsTool = {
8
+ name: "list_deployments",
9
+ description: `List deployment history for the project's frontend. Shows recent deploys with status, commit, duration, and URL.`,
10
+
11
+ inputSchema: {
12
+ type: "object",
13
+ properties: {
14
+ project_id: { type: "string", description: "Project UUID." },
15
+ limit: { type: "number", description: "Max deployments to return (default 10, max 20)." },
16
+ },
17
+ required: ["project_id"],
18
+ },
19
+
20
+ async execute({ project_id, limit }) {
21
+ try {
22
+ return await api.get(`/api/engine/${project_id}/frontend/deployments?limit=${limit || 10}`)
23
+ } catch (e) {
24
+ return { error: e.message }
25
+ }
26
+ },
27
+ }
28
+
29
+ export const getDeploymentLogsTool = {
30
+ name: "get_deployment_logs",
31
+ description: `Get build logs for a specific deployment. Use list_deployments first to get the deployment ID. Useful for debugging failed builds.`,
32
+
33
+ inputSchema: {
34
+ type: "object",
35
+ properties: {
36
+ project_id: { type: "string", description: "Project UUID." },
37
+ deployment_id: { type: "string", description: "Deployment UUID from list_deployments." },
38
+ },
39
+ required: ["project_id", "deployment_id"],
40
+ },
41
+
42
+ async execute({ project_id, deployment_id }) {
43
+ try {
44
+ return await api.get(`/api/engine/${project_id}/frontend/deployments/${deployment_id}/logs`)
45
+ } catch (e) {
46
+ return { error: e.message }
47
+ }
48
+ },
49
+ }
50
+
51
+ export const frontendStatusTool = {
52
+ name: "get_frontend_status",
53
+ description: "Get the current frontend deployment status — URL, status, last deploy time, size.",
54
+
55
+ inputSchema: {
56
+ type: "object",
57
+ properties: {
58
+ project_id: { type: "string", description: "Project UUID." },
59
+ },
60
+ required: ["project_id"],
61
+ },
62
+
63
+ async execute({ project_id }) {
64
+ try {
65
+ return await api.get(`/api/engine/${project_id}/frontend`)
66
+ } catch (e) {
67
+ return { error: e.message }
68
+ }
69
+ },
70
+ }
71
+
72
+ export const buildStatusTool = {
73
+ name: "get_build_status",
74
+ description: `Get the latest build status from Cloudflare Pages.
75
+
76
+ Returns: status (queued/building/success/failure), current stage, progress %, URL.
77
+ Useful to check if a deploy has finished building.`,
78
+
79
+ inputSchema: {
80
+ type: "object",
81
+ properties: {
82
+ project_id: { type: "string", description: "Project UUID." },
83
+ },
84
+ required: ["project_id"],
85
+ },
86
+
87
+ async execute({ project_id }) {
88
+ try {
89
+ return await api.get(`/api/engine/${project_id}/frontend/build-status`)
90
+ } catch (e) {
91
+ return { error: e.message }
92
+ }
93
+ },
94
+ }