@datanimbus/dnio-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Dockerfile +20 -0
- package/docs/README.md +35 -0
- package/docs/architecture.md +171 -0
- package/docs/authentication.md +74 -0
- package/docs/tools/apps.md +59 -0
- package/docs/tools/connectors.md +76 -0
- package/docs/tools/data-pipes.md +286 -0
- package/docs/tools/data-services.md +105 -0
- package/docs/tools/deployment-groups.md +152 -0
- package/docs/tools/plugins.md +94 -0
- package/docs/tools/records.md +97 -0
- package/docs/workflows.md +195 -0
- package/env.example +16 -0
- package/package.json +43 -0
- package/readme.md +144 -0
- package/src/clients/api-keys.js +10 -0
- package/src/clients/apps.js +13 -0
- package/src/clients/base-client.js +78 -0
- package/src/clients/bots.js +10 -0
- package/src/clients/connectors.js +30 -0
- package/src/clients/data-formats.js +40 -0
- package/src/clients/data-pipes.js +33 -0
- package/src/clients/deployment-groups.js +59 -0
- package/src/clients/formulas.js +10 -0
- package/src/clients/functions.js +10 -0
- package/src/clients/plugins.js +39 -0
- package/src/clients/records.js +51 -0
- package/src/clients/services.js +63 -0
- package/src/clients/user-groups.js +10 -0
- package/src/clients/users.js +10 -0
- package/src/examples/ai-sdk-client.js +165 -0
- package/src/examples/claude_desktop_config.json +34 -0
- package/src/examples/express-integration.js +181 -0
- package/src/index.js +283 -0
- package/src/schemas/schema-converter.js +179 -0
- package/src/services/auth-manager.js +277 -0
- package/src/services/dnio-client.js +40 -0
- package/src/services/service-registry.js +150 -0
- package/src/services/session-manager.js +161 -0
- package/src/stdio-bridge.js +185 -0
- package/src/tools/_helpers.js +32 -0
- package/src/tools/api-keys.js +5 -0
- package/src/tools/apps.js +185 -0
- package/src/tools/bots.js +5 -0
- package/src/tools/connectors.js +165 -0
- package/src/tools/data-formats.js +806 -0
- package/src/tools/data-pipes.js +1305 -0
- package/src/tools/data-service-registry.js +500 -0
- package/src/tools/deployment-groups.js +511 -0
- package/src/tools/formulas.js +5 -0
- package/src/tools/functions.js +5 -0
- package/src/tools/mcp-tools-registry.js +38 -0
- package/src/tools/plugins.js +250 -0
- package/src/tools/records.js +217 -0
- package/src/tools/services.js +476 -0
- package/src/tools/user-groups.js +5 -0
- package/src/tools/users.js +5 -0
- package/src/utils/constants.js +135 -0
- package/src/utils/logger.js +63 -0
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# Records (CRUD)
|
|
2
|
+
|
|
3
|
+
Two layers of tools for working with the records inside data services:
|
|
4
|
+
|
|
5
|
+
- **Static, generic** — registered by `src/tools/records.js`. Take a `serviceName` arg and resolve it via the registry.
|
|
6
|
+
- **Dynamic, typed** — registered per-service by `src/tools/data-service-registry.js` after `select_app` succeeds. Each service gets six tools named `<toolPrefix>_list/_get/_create/_update/_delete/_count` with input schemas converted from the service's `definition`.
|
|
7
|
+
|
|
8
|
+
Both layers hit the same backing endpoints via `RecordsClient` (`src/clients/records.js`). All require an app to be selected first.
|
|
9
|
+
|
|
10
|
+
Endpoint pattern: `/api/c/${app}/${servicePath}[/${recordId}][/utils/count]`.
|
|
11
|
+
|
|
12
|
+
## Static, generic tools (`src/tools/records.js`)
|
|
13
|
+
|
|
14
|
+
### `list_records`
|
|
15
|
+
|
|
16
|
+
| | |
|
|
17
|
+
|---|---|
|
|
18
|
+
| **Purpose** | List/search records, with MongoDB-style filter, sort, select, pagination. |
|
|
19
|
+
| **Inputs** | `serviceName` (required), `filter` (string, JSON), `sort` (string, prefix `-` for desc), `select` (CSV), `page` (int, default 1), `count` (int 1–100, default 20). |
|
|
20
|
+
| **Endpoint** | `GET /api/c/${app}/${servicePath}?filter=…&sort=…&select=…&page=…&count=…` |
|
|
21
|
+
| **Returns** | `{ service, count, page, records }` |
|
|
22
|
+
|
|
23
|
+
### `get_record`
|
|
24
|
+
|
|
25
|
+
| | |
|
|
26
|
+
|---|---|
|
|
27
|
+
| **Purpose** | Fetch a single record by ID. |
|
|
28
|
+
| **Inputs** | `serviceName` (required), `recordId` (required), `expand` (boolean, default false). |
|
|
29
|
+
| **Endpoint** | `GET /api/c/${app}/${servicePath}/${recordId}[?expand=true]` |
|
|
30
|
+
| **Returns** | `{ service, record }`. 404 from the platform is converted to a friendly "not found" message instead of an error. |
|
|
31
|
+
|
|
32
|
+
### `create_record`
|
|
33
|
+
|
|
34
|
+
| | |
|
|
35
|
+
|---|---|
|
|
36
|
+
| **Purpose** | Create a record. Use `get_service_schema` first if you don't already know the field shape. |
|
|
37
|
+
| **Inputs** | `serviceName` (required), `data` (required, JSON string). |
|
|
38
|
+
| **Endpoint** | `POST /api/c/${app}/${servicePath}` |
|
|
39
|
+
| **Returns** | `{ service, action: 'created', record }` |
|
|
40
|
+
|
|
41
|
+
### `update_record`
|
|
42
|
+
|
|
43
|
+
| | |
|
|
44
|
+
|---|---|
|
|
45
|
+
| **Purpose** | Patch fields on an existing record. Partial updates are supported. |
|
|
46
|
+
| **Inputs** | `serviceName` (required), `recordId` (required), `data` (required, JSON string). |
|
|
47
|
+
| **Endpoint** | `PUT /api/c/${app}/${servicePath}/${recordId}` |
|
|
48
|
+
| **Returns** | `{ service, action: 'updated', recordId, record }` |
|
|
49
|
+
|
|
50
|
+
### `delete_record`
|
|
51
|
+
|
|
52
|
+
| | |
|
|
53
|
+
|---|---|
|
|
54
|
+
| **Purpose** | Permanently delete a record. Marked as a destructive tool. |
|
|
55
|
+
| **Inputs** | `serviceName` (required), `recordId` (required). |
|
|
56
|
+
| **Endpoint** | `DELETE /api/c/${app}/${servicePath}/${recordId}` |
|
|
57
|
+
| **Returns** | `{ service, action: 'deleted', recordId, result }` |
|
|
58
|
+
|
|
59
|
+
### `count_records`
|
|
60
|
+
|
|
61
|
+
| | |
|
|
62
|
+
|---|---|
|
|
63
|
+
| **Purpose** | Count records, with optional filter. Has a 1-second client timeout because the count endpoint is expected to be fast and is also used for pod liveness checks. |
|
|
64
|
+
| **Inputs** | `serviceName` (required), `filter` (optional JSON string). |
|
|
65
|
+
| **Endpoint** | `GET /api/c/${app}/${servicePath}/utils/count[?filter=…]` |
|
|
66
|
+
| **Returns** | `{ service, count }` |
|
|
67
|
+
|
|
68
|
+
## Dynamic, typed tools (per-service)
|
|
69
|
+
|
|
70
|
+
After `select_app` lands, `data-service-registry.js` walks each loaded service and registers six tools using the slugified service name as the prefix:
|
|
71
|
+
|
|
72
|
+
| Tool | Verb | Notes |
|
|
73
|
+
|---|---|---|
|
|
74
|
+
| `<prefix>_list` | GET | Same args/behaviour as `list_records` but pre-bound to that service. |
|
|
75
|
+
| `<prefix>_get` | GET | Single record by ID. |
|
|
76
|
+
| `<prefix>_create` | POST | `data` argument is a typed object (Zod schema generated from the service's `definition` via `src/schemas/schema-converter.js`). |
|
|
77
|
+
| `<prefix>_update` | PUT | Same as above; partial updates allowed. |
|
|
78
|
+
| `<prefix>_delete` | DELETE | Destructive. |
|
|
79
|
+
| `<prefix>_count` | GET | Optional filter. |
|
|
80
|
+
|
|
81
|
+
The `toolPrefix` is computed as `name.toLowerCase().replace(/[^a-z0-9]+/g, '_').replace(/^_|_$/g, '')`. So a service named `userAccount` becomes `useraccount_list`, `useraccount_create`, etc.
|
|
82
|
+
|
|
83
|
+
The typed `_create` and `_update` tools accept `data` as a real object (matched against the converted Zod schema), unlike the generic tools which accept a JSON string.
|
|
84
|
+
|
|
85
|
+
## Service resolution
|
|
86
|
+
|
|
87
|
+
The static tools accept any of these as `serviceName`:
|
|
88
|
+
- The exact `_id` (e.g. `SRVC13120`).
|
|
89
|
+
- The exact name (case-insensitive).
|
|
90
|
+
- The slugified `toolPrefix`.
|
|
91
|
+
|
|
92
|
+
Resolution lives in `ServiceRegistry.resolveService(identifier)`. If no match, the tool returns an error that includes the available service names.
|
|
93
|
+
|
|
94
|
+
## Why both layers exist
|
|
95
|
+
|
|
96
|
+
- The **dynamic** tools are the ergonomic path: typed inputs, no need to remember service names, no JSON-string serialization for create/update.
|
|
97
|
+
- The **static** tools are the fallback: they work even when the agent doesn't yet know the schema, and they're listed in MCP's tool catalog before any app is selected.
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
# End-to-End Workflows
|
|
2
|
+
|
|
3
|
+
Three reference walkthroughs that combine multiple domains. Each describes the tool calls in order — what the agent runs, in what sequence — and what gets auto-injected by the server.
|
|
4
|
+
|
|
5
|
+
## 1. Create a data service from scratch
|
|
6
|
+
|
|
7
|
+
Goal: define a new `student-record` data service in app `dx-mcp` and deploy it.
|
|
8
|
+
|
|
9
|
+
```
|
|
10
|
+
list_apps # discover apps
|
|
11
|
+
select_app(appName: 'dx-mcp') # required first
|
|
12
|
+
list_connectors # optional: see existing connectors
|
|
13
|
+
# (default DB + STORAGE auto-attached if omitted)
|
|
14
|
+
get_data_service_spec # mandatory: read the spec the LLM should follow
|
|
15
|
+
create_data_service(data: '{
|
|
16
|
+
"name": "student-record",
|
|
17
|
+
"description": "Stores student records",
|
|
18
|
+
"definition": [
|
|
19
|
+
{ "key": "_id", "type": "String", "prefix": "STU", "counter": 1001,
|
|
20
|
+
"properties": {"name":"ID","fieldLength":10,"_typeChanged":"id"} },
|
|
21
|
+
{ "key": "fullName", "type": "String",
|
|
22
|
+
"properties": {"name":"fullName","required":true,"_typeChanged":"String"} },
|
|
23
|
+
{ "key": "grade", "type": "Number",
|
|
24
|
+
"properties": {"name":"grade","precision":0,"_typeChanged":"Number"} }
|
|
25
|
+
]
|
|
26
|
+
}')
|
|
27
|
+
deploy_data_service(serviceId: 'SRVC<id>')
|
|
28
|
+
refresh_services # so the new typed CRUD tools appear
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
**Auto-injected by `create_data_service`:**
|
|
32
|
+
- `payload.app = registry.selectedApp` — required by the platform.
|
|
33
|
+
- `payload.connectors` — defaults if missing (default DB + default STORAGE).
|
|
34
|
+
- `payload.role` — `{ roles, fields }` with three standard roles (No Access / Manage / View) and a permission map mirroring the `definition`.
|
|
35
|
+
|
|
36
|
+
After `refresh_services`, six typed tools (`student_record_list`, `student_record_create`, …) appear automatically.
|
|
37
|
+
|
|
38
|
+
## 2. Build, configure, and publish a flow
|
|
39
|
+
|
|
40
|
+
Goal: build an HTTP-triggered flow that receives a JSON file, parses it, saves to S3, and returns a response.
|
|
41
|
+
|
|
42
|
+
```
|
|
43
|
+
list_apps
|
|
44
|
+
select_app(appName: 'dx-mcp')
|
|
45
|
+
|
|
46
|
+
# Plugin discovery: find or install the trigger
|
|
47
|
+
list_trigger_plugins # check installed
|
|
48
|
+
list_marketplace_plugins(search: 'HTTP') # if not installed
|
|
49
|
+
install_plugins(marketIds: [<V1_HTTP_SERVER marketId>])
|
|
50
|
+
list_trigger_plugins # confirm + grab full plugin object
|
|
51
|
+
|
|
52
|
+
# And process plugins
|
|
53
|
+
list_process_plugins(search: 'Parse') # → V1_PARSE_JSON
|
|
54
|
+
list_process_plugins(search: 'Save File') # → V1_SAVE_FILE
|
|
55
|
+
list_process_plugins(search: 'Response') # → V1_RESPONSE
|
|
56
|
+
|
|
57
|
+
# Need a connector for V1_SAVE_FILE
|
|
58
|
+
list_connectors(category: 'STORAGE') # check existing
|
|
59
|
+
list_connector_types(category: 'STORAGE') # if creating a new one
|
|
60
|
+
create_connector(name: 'My S3', marketItemId: '<id>',
|
|
61
|
+
values: '{"bucket":"…","accessKeyId":"…","secretAccessKey":"…","region":"…"}')
|
|
62
|
+
|
|
63
|
+
# Create the flow with the trigger
|
|
64
|
+
create_flow(
|
|
65
|
+
name: 'upload_to_s3',
|
|
66
|
+
triggerPlugin: <V1_HTTP_SERVER plugin>,
|
|
67
|
+
triggerOptions: { method: 'POST', path: '/upload' }
|
|
68
|
+
)
|
|
69
|
+
# → returns { flowId: 'FLOW…', status: 'Draft' }
|
|
70
|
+
|
|
71
|
+
# Wire process nodes
|
|
72
|
+
add_node_to_flow(
|
|
73
|
+
flowId,
|
|
74
|
+
pluginObject: <V1_PARSE_JSON plugin>,
|
|
75
|
+
name: 'parse_body',
|
|
76
|
+
afterNodeId: 'http_server',
|
|
77
|
+
mappings: [{ key: 'data', target: {…}, source: [{nodeId:'http_server', dataPath:'data', …}],
|
|
78
|
+
expression: { type: 'simple', value: "{{http_server['data']}}" } }]
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
add_node_to_flow(
|
|
82
|
+
flowId,
|
|
83
|
+
pluginObject: <V1_SAVE_FILE plugin>,
|
|
84
|
+
name: 'save_file',
|
|
85
|
+
afterNodeId: 'parse_body',
|
|
86
|
+
connector: { _id: 'CON<id>' },
|
|
87
|
+
mappings: [/* map content from parse_body */]
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
add_node_to_flow(
|
|
91
|
+
flowId,
|
|
92
|
+
pluginObject: <V1_RESPONSE plugin>,
|
|
93
|
+
name: 'send_response',
|
|
94
|
+
afterNodeId: 'save_file',
|
|
95
|
+
mappings: [/* map status, body */]
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Move out of Draft
|
|
99
|
+
publish_flow(flowId)
|
|
100
|
+
|
|
101
|
+
get_flow(flowId) # verify status moved past Draft
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Branching example
|
|
105
|
+
|
|
106
|
+
To split a path based on whether content is empty:
|
|
107
|
+
|
|
108
|
+
```
|
|
109
|
+
add_node_to_flow(
|
|
110
|
+
flowId,
|
|
111
|
+
pluginObject: <V1_RESPONSE>,
|
|
112
|
+
name: 'no_content_response',
|
|
113
|
+
afterNodeId: 'parse_body',
|
|
114
|
+
branchCondition: { condition: '_.isEmpty({{parse_body["data"]})', name: 'No Content', color: 'FF9800' }
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
add_node_to_flow(
|
|
118
|
+
flowId,
|
|
119
|
+
pluginObject: <V1_SAVE_FILE>,
|
|
120
|
+
name: 'save_file',
|
|
121
|
+
afterNodeId: 'parse_body',
|
|
122
|
+
branchCondition: { condition: '!_.isEmpty({{parse_body["data"]})', name: 'Has Content', color: '4CAF50' }
|
|
123
|
+
)
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
`parse_body.onSuccess` now has two entries with conditions; the runtime evaluates them and routes accordingly.
|
|
127
|
+
|
|
128
|
+
### CODEBLOCK fallback
|
|
129
|
+
|
|
130
|
+
For trivial logic with no first-class plugin, use `V1_CODEBLOCK`:
|
|
131
|
+
|
|
132
|
+
```
|
|
133
|
+
add_node_to_flow(
|
|
134
|
+
flowId,
|
|
135
|
+
pluginObject: <V1_CODEBLOCK plugin>,
|
|
136
|
+
name: 'enrich_payload',
|
|
137
|
+
afterNodeId: 'parse_body',
|
|
138
|
+
options: {
|
|
139
|
+
code: `async function executeCode(inputData, node, connectorConfig) {
|
|
140
|
+
try {
|
|
141
|
+
const enriched = { ...inputData, processedAt: new Date().toISOString() };
|
|
142
|
+
return { data: enriched };
|
|
143
|
+
} catch (err) {
|
|
144
|
+
logger.error(err);
|
|
145
|
+
throw err;
|
|
146
|
+
}
|
|
147
|
+
}`
|
|
148
|
+
}
|
|
149
|
+
)
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
The function MUST return `{ data: <whatever> }` — see [`tools/data-pipes.md`](tools/data-pipes.md) for the full contract and runtime globals.
|
|
153
|
+
|
|
154
|
+
## 3. Deploy a flow to Kubernetes
|
|
155
|
+
|
|
156
|
+
Goal: take a published flow and turn it into a running K8s deployment.
|
|
157
|
+
|
|
158
|
+
```
|
|
159
|
+
list_apps
|
|
160
|
+
select_app(appName: 'dx-mcp')
|
|
161
|
+
|
|
162
|
+
list_available_flows # what's eligible to deploy
|
|
163
|
+
create_deployment_group(name: 'Apple', flowIds: ['FLOW6156'])
|
|
164
|
+
# → { groupId: 'DG2943', status: 'Draft', group: {…} }
|
|
165
|
+
|
|
166
|
+
start_deployment_group(groupId: 'DG2943')
|
|
167
|
+
# → returns immediately; K8s settles in ~10s
|
|
168
|
+
|
|
169
|
+
# Verify
|
|
170
|
+
get_deployment_group(groupId: 'DG2943') # check status
|
|
171
|
+
get_deployment_group_yamls(groupId: 'DG2943') # see generated K8s YAMLs
|
|
172
|
+
|
|
173
|
+
# Later, after editing + republishing the flow:
|
|
174
|
+
publish_flow(flowId: 'FLOW6156') # update the published version
|
|
175
|
+
sync_deployment_group(groupId: 'DG2943') # roll forward to the new version
|
|
176
|
+
|
|
177
|
+
# Tear down (preserving the group):
|
|
178
|
+
stop_deployment_group(groupId: 'DG2943')
|
|
179
|
+
|
|
180
|
+
# Or delete entirely (frees the bound flows):
|
|
181
|
+
delete_deployment_group(groupId: 'DG2943', confirm: false) # dry-run: shows what would be freed
|
|
182
|
+
delete_deployment_group(groupId: 'DG2943', confirm: true) # actually delete
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
### Rule of thumb: where a flow is
|
|
186
|
+
|
|
187
|
+
A flow can be in one of three states from the deployment-group perspective:
|
|
188
|
+
|
|
189
|
+
| State | Visible in | Action |
|
|
190
|
+
|---|---|---|
|
|
191
|
+
| Draft | `list_flows({status:'Draft'})` | Edit further, then `publish_flow`. |
|
|
192
|
+
| Published & not bound to a group | `list_available_flows` | Add to a group via `create_deployment_group` or `add_flows_to_deployment_group`. |
|
|
193
|
+
| Published & bound to a group | `list_deployment_groups` (under `deployments[]`) | Already deployable — `start`/`stop`/`sync`/remove from group. |
|
|
194
|
+
|
|
195
|
+
If you need to add a flow to a different group, first `remove_flows_from_deployment_group` from its current group — there's no move operation, but remove + add achieves it.
|
package/env.example
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# ─── DNIO Admin Credentials (server-side only, used for management APIs) ─────
|
|
2
|
+
DNIO_USERNAME=<DNIO Admin UserNAme>
|
|
3
|
+
DNIO_PASSWORD=<DNIO Admin User Password>
|
|
4
|
+
DNIO_NAMESPACE=<DNIO Namespace>
|
|
5
|
+
DNIO_TOKEN_TTL=1800
|
|
6
|
+
DNIO_BASE_URL="<DNIO Host URL>"
|
|
7
|
+
MCP_USER_EMAIL=<DNIO MCP User ID>
|
|
8
|
+
MCP_USER_PASSWORD=<DNIO MCP User Password>
|
|
9
|
+
|
|
10
|
+
# ─── MCP Server Settings ─────────────────────────────────────────────────────
|
|
11
|
+
TRANSPORT=http # 'http' for multi-tenant remote | 'stdio' for local dev
|
|
12
|
+
MCP_PORT=3100 # HTTP port
|
|
13
|
+
LOG_LEVEL="info"
|
|
14
|
+
|
|
15
|
+
# ─── Session Settings ────────────────────────────────────────────────────────
|
|
16
|
+
SESSION_TTL=7200 # Session timeout in seconds (default: 2 hours)
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@datanimbus/dnio-mcp",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "MCP Server for DataNimbus BaaS platform - dynamically exposes CRUD tools for all Data Services",
|
|
5
|
+
"main": "src/index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"dnio-mcp": "src/index.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"src/",
|
|
11
|
+
"docs/README.md",
|
|
12
|
+
"docs/architecture.md",
|
|
13
|
+
"docs/authentication.md",
|
|
14
|
+
"docs/workflows.md",
|
|
15
|
+
"docs/tools/",
|
|
16
|
+
"readme.md",
|
|
17
|
+
"env.example",
|
|
18
|
+
"Dockerfile"
|
|
19
|
+
],
|
|
20
|
+
"scripts": {
|
|
21
|
+
"start": "node src/index.js",
|
|
22
|
+
"start:http": "TRANSPORT=http node src/index.js",
|
|
23
|
+
"start:stdio": "TRANSPORT=stdio node src/index.js",
|
|
24
|
+
"dev": "nodemon src/index.js",
|
|
25
|
+
"inspect": "npx @modelcontextprotocol/inspector node src/index.js"
|
|
26
|
+
},
|
|
27
|
+
"dependencies": {
|
|
28
|
+
"@modelcontextprotocol/sdk": "^1.12.0",
|
|
29
|
+
"express": "^4.21.0",
|
|
30
|
+
"got": "^11.8.6",
|
|
31
|
+
"log4js": "^6.9.1",
|
|
32
|
+
"winston": "^3.17.0",
|
|
33
|
+
"zod": "^3.25.0"
|
|
34
|
+
},
|
|
35
|
+
"devDependencies": {
|
|
36
|
+
"@modelcontextprotocol/inspector": "0.16.6",
|
|
37
|
+
"nodemon": "^3.1.0",
|
|
38
|
+
"dotenv": "^17.3.1"
|
|
39
|
+
},
|
|
40
|
+
"engines": {
|
|
41
|
+
"node": ">=18.0.0"
|
|
42
|
+
}
|
|
43
|
+
}
|
package/readme.md
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# @datanimbus/dnio-mcp
|
|
2
|
+
|
|
3
|
+
[](https://www.npmjs.com/package/@datanimbus/dnio-mcp)
|
|
4
|
+
|
|
5
|
+
**Model Context Protocol (MCP) server for the [DataNimbus](https://datanimbus.io) BaaS platform.** Exposes Data Services, Connectors, Plugins, Data Pipes (flows), Deployment Groups, and Data Formats as MCP tools — drive the platform from Claude Desktop, Cursor, or any MCP client through natural language.
|
|
6
|
+
|
|
7
|
+
CRUD tools for every Data Service in the selected app are **registered dynamically** at runtime, so the agent gets typed `list/get/create/update/delete/count` per service the moment you `select_app`.
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## Prerequisites
|
|
12
|
+
|
|
13
|
+
- **Node.js ≥ 18** (for native `fetch`).
|
|
14
|
+
- A running **DataNimbus instance** reachable over HTTPS.
|
|
15
|
+
- **Admin credentials** (`DNIO_USERNAME` / `DNIO_PASSWORD`) for management APIs.
|
|
16
|
+
- A pre-provisioned **MCP service account** on the platform (`MCP_USER_EMAIL` / `MCP_USER_PASSWORD`). Server logs in as the service account for data-plane operations and adds it to all apps on first boot. See [`docs/authentication.md`](docs/authentication.md).
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## Quick Start (Claude Desktop)
|
|
21
|
+
|
|
22
|
+
Add to `claude_desktop_config.json`:
|
|
23
|
+
|
|
24
|
+
```json
|
|
25
|
+
{
|
|
26
|
+
"mcpServers": {
|
|
27
|
+
"@datanimbus/dnio-mcp": {
|
|
28
|
+
"command": "npx",
|
|
29
|
+
"args": [
|
|
30
|
+
"-y",
|
|
31
|
+
"@datanimbus/dnio-mcp@latest"
|
|
32
|
+
],
|
|
33
|
+
"env": {
|
|
34
|
+
"DNIO_BASE_URL": "https://your-datanimbus-instance.com",
|
|
35
|
+
"DNIO_USERNAME": "<admin-username>",
|
|
36
|
+
"DNIO_PASSWORD": "<admin-password>",
|
|
37
|
+
"MCP_USER_EMAIL": "<mcp-service-account-email>",
|
|
38
|
+
"MCP_USER_PASSWORD": "<mcp-service-account-password>",
|
|
39
|
+
"TRANSPORT": "stdio"
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Restart Claude Desktop. The agent now has access to every DNIO domain tool. Same shape works in Cursor (`.cursor/mcp.json`) or any MCP-compliant client.
|
|
47
|
+
|
|
48
|
+
For HTTP transport (multi-tenant / remote deployment):
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
TRANSPORT=http MCP_PORT=3100 \
|
|
52
|
+
DNIO_BASE_URL=https://your-datanimbus-instance.com \
|
|
53
|
+
DNIO_USERNAME=<admin-username> \
|
|
54
|
+
DNIO_PASSWORD=<admin-password> \
|
|
55
|
+
MCP_USER_EMAIL=<mcp-service-account-email> \
|
|
56
|
+
MCP_USER_PASSWORD=<mcp-service-account-password> \
|
|
57
|
+
npx -y @datanimbus/dnio-mcp@latest
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
Server listens at `http://localhost:3100/mcp` (authless — protect at the network layer).
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
## Environment Variables
|
|
65
|
+
|
|
66
|
+
| Variable | Required | Default | Description |
|
|
67
|
+
|---|---|---|---|
|
|
68
|
+
| `DNIO_BASE_URL` | ✅ | — | Platform base URL (e.g. `https://qa.datanimbus.io`). |
|
|
69
|
+
| `DNIO_USERNAME` | ✅ | — | Admin username for management APIs (list apps, fetch schemas, deploy services). |
|
|
70
|
+
| `DNIO_PASSWORD` | ✅ | — | Admin password. |
|
|
71
|
+
| `MCP_USER_EMAIL` | ✅ | — | Service-account email/ID the server logs in as for data-plane operations. |
|
|
72
|
+
| `MCP_USER_PASSWORD` | ✅ | — | Service-account password. |
|
|
73
|
+
| `TRANSPORT` | — | `stdio` | `stdio` for local clients (Claude Desktop / Cursor); `http` for deployed mode. |
|
|
74
|
+
| `MCP_PORT` | — | `3100` | HTTP port (only when `TRANSPORT=http`). |
|
|
75
|
+
| `DNIO_NAMESPACE` | — | `DNIO` | DNIO namespace used for service-account provisioning. |
|
|
76
|
+
| `DNIO_TOKEN_TTL` | — | `1800` | Token TTL in seconds — both admin + MCP-user tokens auto-refresh before expiry. |
|
|
77
|
+
| `LOG_LEVEL` | — | `info` | `debug` / `info` / `warn` / `error`. |
|
|
78
|
+
| `SESSION_TTL` | — | `7200` | HTTP-session lifetime in seconds (HTTP transport only). |
|
|
79
|
+
|
|
80
|
+
Server fails fast with `[FATAL] Missing required env var: <name>` if any required variable is unset.
|
|
81
|
+
|
|
82
|
+
**Two-token model:** admin token for management APIs, MCP-user token for data plane. Both managed in memory, never written to disk, never logged. Tool handlers switch tokens explicitly per operation. Full lifecycle in [`docs/authentication.md`](docs/authentication.md).
|
|
83
|
+
|
|
84
|
+
---
|
|
85
|
+
|
|
86
|
+
## Available Tools
|
|
87
|
+
|
|
88
|
+
Tools are organized into product domains. After `select_app`, **per-service typed CRUD tools** are also registered dynamically (`<servicePrefix>_list / _get / _create / _update / _delete / _count`).
|
|
89
|
+
|
|
90
|
+
### App context
|
|
91
|
+
`list_apps`, `select_app`, `list_services`, `refresh_services`, `get_service_schema`
|
|
92
|
+
|
|
93
|
+
### Records (CRUD)
|
|
94
|
+
`list_records`, `get_record`, `create_record`, `update_record`, `delete_record`, `count_records` — generic, takes `serviceName` arg. Plus the per-service typed tools mentioned above.
|
|
95
|
+
|
|
96
|
+
### Data Services (definitions)
|
|
97
|
+
`get_data_service_spec`, `create_data_service`, `get_data_service`, `list_data_services`, `update_data_service`, `deploy_data_service`, `start_stop_data_service`. Auto-injects `app`, default `connectors`, and `role` (3 standard roles + per-field permission map).
|
|
98
|
+
|
|
99
|
+
### Connectors
|
|
100
|
+
`list_connectors`, `list_connector_types` (marketplace), `create_connector`. Common types: `MONGODB`, `S3`, `SFTP`, `ACTIVEMQ`, `SMTP`.
|
|
101
|
+
|
|
102
|
+
### Plugins (workflow nodes)
|
|
103
|
+
`list_marketplace_plugins`, `install_plugins`, `list_installed_plugins`, `update_plugins`, `uninstall_plugins`. **Note:** `V1_CODEBLOCK` is a platform built-in — pass directly to flow tools without installing.
|
|
104
|
+
|
|
105
|
+
### Data Pipes (flows)
|
|
106
|
+
`list_trigger_plugins`, `list_process_plugins`, `list_flows`, `get_flow`, `create_flow`, `add_node_to_flow`, `update_node_in_flow`, `connect_nodes`, `remove_node_from_flow`, `publish_flow`. Auto-maps fields by name match; surfaces unmapped inputs; returns the public invocation URL for HTTP triggers.
|
|
107
|
+
|
|
108
|
+
### Deployment Groups (Kubernetes)
|
|
109
|
+
`list_available_flows`, `list_deployment_groups`, `get_deployment_group`, `create_deployment_group`, `add_flows_to_deployment_group`, `remove_flows_from_deployment_group`, `rename_deployment_group`, `start_deployment_group`, `stop_deployment_group`, `sync_deployment_group`, `delete_deployment_group`, `get_deployment_group_yamls`. Enforces the one-flow-one-group rule atomically.
|
|
110
|
+
|
|
111
|
+
### Data Formats
|
|
112
|
+
`list_data_formats`, `get_data_format`, `create_data_format`, `update_data_format`, `delete_data_format`, `add_attribute`, `update_attribute`, `remove_attribute`. Handles the rigid HRSF skeleton (Header/Records/Footer) for FLATFILE/TXT/CSV/DELIMITER.
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## Docker
|
|
117
|
+
|
|
118
|
+
```bash
|
|
119
|
+
docker run -d --name dnio-mcp -p 3100:3100 \
|
|
120
|
+
-e DNIO_BASE_URL=https://your-datanimbus-instance.com \
|
|
121
|
+
-e DNIO_USERNAME=<admin-username> \
|
|
122
|
+
-e DNIO_PASSWORD=<admin-password> \
|
|
123
|
+
-e MCP_USER_EMAIL=<mcp-service-account-email> \
|
|
124
|
+
-e MCP_USER_PASSWORD=<mcp-service-account-password> \
|
|
125
|
+
-e TRANSPORT=http \
|
|
126
|
+
ghcr.io/datanimbus/dnio-mcp:latest
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
Or build from source — `Dockerfile` in repo root, K8s manifests in `yamls/`.
|
|
130
|
+
|
|
131
|
+
---
|
|
132
|
+
|
|
133
|
+
## Documentation
|
|
134
|
+
|
|
135
|
+
- [`docs/architecture.md`](docs/architecture.md) — codebase layout, master client + per-domain pattern, transports.
|
|
136
|
+
- [`docs/authentication.md`](docs/authentication.md) — token model, refresh lifecycle, app provisioning.
|
|
137
|
+
- [`docs/workflows.md`](docs/workflows.md) — end-to-end recipes (build a data service, design a flow, deploy to K8s).
|
|
138
|
+
- [`docs/tools/`](docs/tools/) — per-domain tool reference with endpoints + auto-injection rules.
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
## License
|
|
143
|
+
|
|
144
|
+
ISC.
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const got = require('got');
|
|
4
|
+
const logger = require('../utils/logger');
|
|
5
|
+
|
|
6
|
+
class BaseClient {
|
|
7
|
+
constructor({baseUrl, token, timeout = 30000}) {
|
|
8
|
+
this.baseUrl = baseUrl.replace(/\/$/, '');
|
|
9
|
+
this.token = token || null;
|
|
10
|
+
this.timeout = timeout;
|
|
11
|
+
this._buildClient();
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
_buildClient() {
|
|
15
|
+
const headers = {'Content-Type': 'application/json'};
|
|
16
|
+
if (this.token) headers.Authorization = `JWT ${this.token}`;
|
|
17
|
+
this.client = got.extend({
|
|
18
|
+
prefixUrl: this.baseUrl,
|
|
19
|
+
timeout: {request: this.timeout},
|
|
20
|
+
headers,
|
|
21
|
+
responseType: 'json',
|
|
22
|
+
throwHttpErrors: false
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
setToken(token) {
|
|
27
|
+
this.token = token;
|
|
28
|
+
this._buildClient();
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
_logRequest(method, url, options) {
|
|
32
|
+
const [pathPart, queryString] = url.split('?');
|
|
33
|
+
const entry = {method, path: '/' + pathPart};
|
|
34
|
+
if (queryString) entry.query = Object.fromEntries(new URLSearchParams(queryString));
|
|
35
|
+
if (options.json !== undefined) entry.body = JSON.stringify(options.json);
|
|
36
|
+
logger.debug('DNIO API request', entry);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async _request(method, path, options = {}) {
|
|
40
|
+
const url = path.replace(/^\//, '');
|
|
41
|
+
this._logRequest(method, url, options);
|
|
42
|
+
try {
|
|
43
|
+
const response = await this.client(url, {method, ...options});
|
|
44
|
+
if (response.statusCode >= 400) {
|
|
45
|
+
const errMsg = response.body?.message || response.body?.error || `HTTP ${response.statusCode}`;
|
|
46
|
+
const err = new Error(errMsg);
|
|
47
|
+
err.status = response.statusCode;
|
|
48
|
+
err.body = response.body;
|
|
49
|
+
throw err;
|
|
50
|
+
}
|
|
51
|
+
return response.body;
|
|
52
|
+
} catch (error) {
|
|
53
|
+
if (error.status) throw error;
|
|
54
|
+
logger.error(`DNIO API error: ${method} ${path}`, {error: error.message});
|
|
55
|
+
const wrapped = new Error(error.message || 'DNIO API request failed');
|
|
56
|
+
wrapped.status = error.response?.statusCode || 502;
|
|
57
|
+
throw wrapped;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
get(path, options) {
|
|
62
|
+
return this._request('GET', path, options);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
post(path, body, options = {}) {
|
|
66
|
+
return this._request('POST', path, {...options, json: body});
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
put(path, body, options = {}) {
|
|
70
|
+
return this._request('PUT', path, {...options, json: body});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
delete(path, options) {
|
|
74
|
+
return this._request('DELETE', path, options);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
module.exports = BaseClient;
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
class ConnectorsClient {
|
|
4
|
+
constructor(http) {
|
|
5
|
+
this.http = http;
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
list(appName, {category, count = -1, select} = {}) {
|
|
9
|
+
const filter = {app: appName};
|
|
10
|
+
if (category) filter.category = category;
|
|
11
|
+
const params = new URLSearchParams();
|
|
12
|
+
params.set('filter', JSON.stringify(filter));
|
|
13
|
+
params.set('count', String(count));
|
|
14
|
+
params.set('select', select || '_id,name,category,subCategory,type,options,_metadata');
|
|
15
|
+
return this.http.get(`api/a/rbac/${appName}/connector?${params.toString()}`);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
listMarketTypes(appName, {count = 1000, select} = {}) {
|
|
19
|
+
const params = new URLSearchParams();
|
|
20
|
+
params.set('count', String(count));
|
|
21
|
+
params.set('select', select || 'label,thumbnail,type,fields,category,tags,version');
|
|
22
|
+
return this.http.get(`api/a/bm/${appName}/marketplace/connector?${params.toString()}`);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
create(appName, payload) {
|
|
26
|
+
return this.http.post(`api/a/bm/${appName}/connector`, payload);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
module.exports = ConnectorsClient;
|