@2kw/ai-mcp-server 4.0.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +19 -0
- package/README.md +123 -0
- package/dist/client.d.ts +14 -0
- package/dist/client.js +46 -0
- package/dist/errors.d.ts +15 -0
- package/dist/errors.js +22 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +116 -0
- package/dist/mime.d.ts +2 -0
- package/dist/mime.js +56 -0
- package/dist/schema-compat.d.ts +11 -0
- package/dist/schema-compat.js +51 -0
- package/dist/tools/ai-gateway.d.ts +4 -0
- package/dist/tools/ai-gateway.js +107 -0
- package/dist/tools/analytics.d.ts +4 -0
- package/dist/tools/analytics.js +163 -0
- package/dist/tools/billing.d.ts +4 -0
- package/dist/tools/billing.js +76 -0
- package/dist/tools/conversion.d.ts +4 -0
- package/dist/tools/conversion.js +281 -0
- package/dist/tools/datasets.d.ts +4 -0
- package/dist/tools/datasets.js +200 -0
- package/dist/tools/docs.d.ts +4 -0
- package/dist/tools/docs.js +171 -0
- package/dist/tools/evaluators.d.ts +4 -0
- package/dist/tools/evaluators.js +140 -0
- package/dist/tools/experiments.d.ts +4 -0
- package/dist/tools/experiments.js +231 -0
- package/dist/tools/extraction.d.ts +4 -0
- package/dist/tools/extraction.js +245 -0
- package/dist/tools/prompts.d.ts +4 -0
- package/dist/tools/prompts.js +373 -0
- package/dist/tools/providers.d.ts +4 -0
- package/dist/tools/providers.js +148 -0
- package/dist/tools/schema-labels.d.ts +4 -0
- package/dist/tools/schema-labels.js +88 -0
- package/dist/tools/schema-testing.d.ts +4 -0
- package/dist/tools/schema-testing.js +96 -0
- package/dist/tools/schema-versions.d.ts +4 -0
- package/dist/tools/schema-versions.js +127 -0
- package/dist/tools/schemas.d.ts +4 -0
- package/dist/tools/schemas.js +136 -0
- package/dist/tools/scores.d.ts +4 -0
- package/dist/tools/scores.js +43 -0
- package/dist/tools/tracing.d.ts +4 -0
- package/dist/tools/tracing.js +124 -0
- package/dist/tools/transcription.d.ts +4 -0
- package/dist/tools/transcription.js +76 -0
- package/package.json +45 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { formatErrorForMcp } from "../errors.js";
|
|
3
|
+
const dateRange = {
|
|
4
|
+
startDate: z.string().optional().describe("Start date (YYYY-MM-DD)"),
|
|
5
|
+
endDate: z.string().optional().describe("End date (YYYY-MM-DD)"),
|
|
6
|
+
};
|
|
7
|
+
export function register(server, client) {
|
|
8
|
+
server.tool("2kw_analytics_summary", "Cross-surface summary for the org: total operations, spend, p95 duration, and per-surface breakdown (extractions, conversions, chat, transcription, experiments). Default range is the trailing 30 days.", dateRange, async (params) => {
|
|
9
|
+
try {
|
|
10
|
+
const { data } = await client.GET("/v1/analytics/summary", {
|
|
11
|
+
params: { query: { startDate: params.startDate, endDate: params.endDate } },
|
|
12
|
+
});
|
|
13
|
+
return {
|
|
14
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
catch (error) {
|
|
18
|
+
return {
|
|
19
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
20
|
+
isError: true,
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
server.tool("2kw_analytics_time_series", "Operations count grouped by date bucket and surface. Useful for charting activity over time.", {
|
|
25
|
+
...dateRange,
|
|
26
|
+
groupBy: z
|
|
27
|
+
.enum(["day", "week", "month"])
|
|
28
|
+
.optional()
|
|
29
|
+
.describe("Bucket size (default: day)"),
|
|
30
|
+
}, async (params) => {
|
|
31
|
+
try {
|
|
32
|
+
const { data } = await client.GET("/v1/analytics/time-series", {
|
|
33
|
+
params: {
|
|
34
|
+
query: {
|
|
35
|
+
startDate: params.startDate,
|
|
36
|
+
endDate: params.endDate,
|
|
37
|
+
groupBy: params.groupBy ?? "day",
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
});
|
|
41
|
+
return {
|
|
42
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
catch (error) {
|
|
46
|
+
return {
|
|
47
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
48
|
+
isError: true,
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
});
|
|
52
|
+
server.tool("2kw_analytics_schemas", "Top schemas by extraction count with success rate, token usage, and cost (extraction surface only).", {
|
|
53
|
+
...dateRange,
|
|
54
|
+
limit: z.number().int().min(1).max(100).optional().describe("Max rows (default 10)"),
|
|
55
|
+
}, async (params) => {
|
|
56
|
+
try {
|
|
57
|
+
const { data } = await client.GET("/v1/analytics/schemas", {
|
|
58
|
+
params: {
|
|
59
|
+
query: {
|
|
60
|
+
startDate: params.startDate,
|
|
61
|
+
endDate: params.endDate,
|
|
62
|
+
limit: params.limit ?? 10,
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
});
|
|
66
|
+
return {
|
|
67
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
catch (error) {
|
|
71
|
+
return {
|
|
72
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
73
|
+
isError: true,
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
});
|
|
77
|
+
server.tool("2kw_analytics_providers", "Provider usage breakdown (extraction surface only): counts, success rate, tokens, cost, avg duration.", dateRange, async (params) => {
|
|
78
|
+
try {
|
|
79
|
+
const { data } = await client.GET("/v1/analytics/providers", {
|
|
80
|
+
params: { query: { startDate: params.startDate, endDate: params.endDate } },
|
|
81
|
+
});
|
|
82
|
+
return {
|
|
83
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
catch (error) {
|
|
87
|
+
return {
|
|
88
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
89
|
+
isError: true,
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
});
|
|
93
|
+
server.tool("2kw_analytics_errors", "Top failures across all surfaces, grouped by (surface, error message), with count and last occurrence.", {
|
|
94
|
+
...dateRange,
|
|
95
|
+
limit: z.number().int().min(1).max(100).optional().describe("Max rows (default 10)"),
|
|
96
|
+
}, async (params) => {
|
|
97
|
+
try {
|
|
98
|
+
const { data } = await client.GET("/v1/analytics/errors", {
|
|
99
|
+
params: {
|
|
100
|
+
query: {
|
|
101
|
+
startDate: params.startDate,
|
|
102
|
+
endDate: params.endDate,
|
|
103
|
+
limit: params.limit ?? 10,
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
});
|
|
107
|
+
return {
|
|
108
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
catch (error) {
|
|
112
|
+
return {
|
|
113
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
114
|
+
isError: true,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
});
|
|
118
|
+
server.tool("2kw_analytics_quality", "Total experiment runs in range + average evaluator score. Snapshot of overall quality signal.", dateRange, async (params) => {
|
|
119
|
+
try {
|
|
120
|
+
const { data } = await client.GET("/v1/analytics/quality", {
|
|
121
|
+
params: { query: { startDate: params.startDate, endDate: params.endDate } },
|
|
122
|
+
});
|
|
123
|
+
return {
|
|
124
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
catch (error) {
|
|
128
|
+
return {
|
|
129
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
130
|
+
isError: true,
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
});
|
|
134
|
+
server.tool("2kw_analytics_quality_trend", "Average evaluator score over time, bucketed by day/week/month.", {
|
|
135
|
+
...dateRange,
|
|
136
|
+
groupBy: z
|
|
137
|
+
.enum(["day", "week", "month"])
|
|
138
|
+
.optional()
|
|
139
|
+
.describe("Bucket size (default: day)"),
|
|
140
|
+
}, async (params) => {
|
|
141
|
+
try {
|
|
142
|
+
const { data } = await client.GET("/v1/analytics/quality/trend", {
|
|
143
|
+
params: {
|
|
144
|
+
query: {
|
|
145
|
+
startDate: params.startDate,
|
|
146
|
+
endDate: params.endDate,
|
|
147
|
+
groupBy: params.groupBy ?? "day",
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
});
|
|
151
|
+
return {
|
|
152
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
catch (error) {
|
|
156
|
+
return {
|
|
157
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
158
|
+
isError: true,
|
|
159
|
+
};
|
|
160
|
+
}
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
//# sourceMappingURL=analytics.js.map
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { formatErrorForMcp } from "../errors.js";
|
|
3
|
+
export function register(server, client) {
|
|
4
|
+
server.tool("2kw_get_billing_tier", "Show the org's current subscription tier and its feature flags.", {}, async () => {
|
|
5
|
+
try {
|
|
6
|
+
const { data } = await client.GET("/v1/billing/tier");
|
|
7
|
+
return {
|
|
8
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
9
|
+
};
|
|
10
|
+
}
|
|
11
|
+
catch (error) {
|
|
12
|
+
return {
|
|
13
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
14
|
+
isError: true,
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
});
|
|
18
|
+
server.tool("2kw_list_billing_tiers", "List every available subscription tier with its limits (for comparison or upgrade guidance).", {}, async () => {
|
|
19
|
+
try {
|
|
20
|
+
const { data } = await client.GET("/v1/billing/tiers");
|
|
21
|
+
return {
|
|
22
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
catch (error) {
|
|
26
|
+
return {
|
|
27
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
28
|
+
isError: true,
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
});
|
|
32
|
+
server.tool("2kw_get_billing_limits", "Current usage vs. tier limits across resources (schemas, prompts, team members, etc.) plus period window and feature flags.", {}, async () => {
|
|
33
|
+
try {
|
|
34
|
+
const { data } = await client.GET("/v1/billing/limits");
|
|
35
|
+
return {
|
|
36
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
catch (error) {
|
|
40
|
+
return {
|
|
41
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
42
|
+
isError: true,
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
});
|
|
46
|
+
server.tool("2kw_billing_check", "Check whether the org can create another resource of the given type before attempting it. Returns a single boolean answer.", {
|
|
47
|
+
resource: z
|
|
48
|
+
.enum(["schema", "prompt", "team-member"])
|
|
49
|
+
.describe("Resource type to check"),
|
|
50
|
+
}, async (params) => {
|
|
51
|
+
try {
|
|
52
|
+
let data;
|
|
53
|
+
switch (params.resource) {
|
|
54
|
+
case "schema":
|
|
55
|
+
({ data } = await client.GET("/v1/billing/can-create-schema"));
|
|
56
|
+
break;
|
|
57
|
+
case "prompt":
|
|
58
|
+
({ data } = await client.GET("/v1/billing/can-create-prompt"));
|
|
59
|
+
break;
|
|
60
|
+
case "team-member":
|
|
61
|
+
({ data } = await client.GET("/v1/billing/can-add-team-member"));
|
|
62
|
+
break;
|
|
63
|
+
}
|
|
64
|
+
return {
|
|
65
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
catch (error) {
|
|
69
|
+
return {
|
|
70
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
71
|
+
isError: true,
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
//# sourceMappingURL=billing.js.map
|
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { readFile } from "node:fs/promises";
|
|
3
|
+
import { basename } from "node:path";
|
|
4
|
+
import { BackboneApiError, formatErrorForMcp } from "../errors.js";
|
|
5
|
+
import { getMimeType } from "../mime.js";
|
|
6
|
+
/**
|
|
7
|
+
* Map user-friendly format names to backend OutputFormat enum values.
|
|
8
|
+
*/
|
|
9
|
+
function mapOutputFormat(format) {
|
|
10
|
+
const map = {
|
|
11
|
+
markdown: "MD", md: "MD",
|
|
12
|
+
text: "TEXT", txt: "TEXT",
|
|
13
|
+
json: "JSON",
|
|
14
|
+
html: "HTML",
|
|
15
|
+
};
|
|
16
|
+
return map[format.toLowerCase()] ?? format.toUpperCase();
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Extract content from an ExportDocument that uses per-format content fields.
|
|
20
|
+
*/
|
|
21
|
+
function formatExportDocument(doc) {
|
|
22
|
+
const parts = [];
|
|
23
|
+
if (doc.mdContent)
|
|
24
|
+
parts.push({ type: "text", text: `<document filename="${doc.filename}" format="markdown">\n${doc.mdContent}\n</document>` });
|
|
25
|
+
if (doc.textContent)
|
|
26
|
+
parts.push({ type: "text", text: `<document filename="${doc.filename}" format="text">\n${doc.textContent}\n</document>` });
|
|
27
|
+
if (doc.htmlContent)
|
|
28
|
+
parts.push({ type: "text", text: `<document filename="${doc.filename}" format="html">\n${doc.htmlContent}\n</document>` });
|
|
29
|
+
if (doc.jsonContent)
|
|
30
|
+
parts.push({ type: "text", text: JSON.stringify(doc.jsonContent, null, 2) });
|
|
31
|
+
return parts;
|
|
32
|
+
}
|
|
33
|
+
const TERMINAL_STATUSES = new Set(["SUCCESS", "PARTIAL_SUCCESS", "FAILURE", "COMPLETED", "FAILED"]);
|
|
34
|
+
/**
|
|
35
|
+
* Poll an async task until terminal status, then fetch and return the result.
|
|
36
|
+
*/
|
|
37
|
+
async function waitForTask(client, taskId, pollSeconds = 30) {
|
|
38
|
+
// eslint-disable-next-line no-constant-condition
|
|
39
|
+
while (true) {
|
|
40
|
+
const { data } = await client.GET("/v1/convert/tasks/{taskId}", {
|
|
41
|
+
params: { path: { taskId }, query: { wait: pollSeconds } },
|
|
42
|
+
});
|
|
43
|
+
const status = data?.taskStatus;
|
|
44
|
+
if (status && TERMINAL_STATUSES.has(status.toUpperCase()))
|
|
45
|
+
break;
|
|
46
|
+
}
|
|
47
|
+
const { data } = await client.GET("/v1/convert/tasks/{taskId}/result", {
|
|
48
|
+
params: { path: { taskId } },
|
|
49
|
+
});
|
|
50
|
+
return data;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Format a ConvertResponse into MCP text content parts.
|
|
54
|
+
*/
|
|
55
|
+
function formatConvertResponse(result) {
|
|
56
|
+
const parts = [];
|
|
57
|
+
if (result.documents?.length) {
|
|
58
|
+
for (const doc of result.documents) {
|
|
59
|
+
parts.push(...formatExportDocument(doc));
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
if (result.errors?.length) {
|
|
63
|
+
parts.push({
|
|
64
|
+
type: "text",
|
|
65
|
+
text: `Errors:\n${result.errors.map((e) => `- ${e.filename ?? "unknown"}: ${e.errorMessage}`).join("\n")}`,
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
if (parts.length === 0) {
|
|
69
|
+
parts.push({ type: "text", text: `Conversion completed with status: ${result.status}` });
|
|
70
|
+
}
|
|
71
|
+
return parts;
|
|
72
|
+
}
|
|
73
|
+
export function register(server, client) {
|
|
74
|
+
// ── convert_document ────────────────────────────────────────────────────
|
|
75
|
+
server.tool("2kw_convert_document", "Convert documents (PDF, DOCX, XLSX, images, etc.) to Markdown, text, HTML, or JSON. Accepts URLs, base64 data, or local file paths. Local files are automatically read and base64-encoded. Use this to convert binary files before passing them to 2kw_create_extraction. Pipeline options: 'fast' (default, fast text extraction), 'ocr' (OCR with layout analysis), or 'vlm' (vision language model for image-heavy/complex layouts). Set async=true with waitForCompletion=true for long-running pipelines like VLM — this submits asynchronously and automatically polls until the result is ready.", {
|
|
76
|
+
sources: z
|
|
77
|
+
.array(z.object({
|
|
78
|
+
type: z.enum(["url", "base64", "file"]).describe("Source type: 'url' for HTTP URLs, 'base64' for base64-encoded data, 'file' for local file paths"),
|
|
79
|
+
url: z.string().optional().describe("URL of the document (when type='url')"),
|
|
80
|
+
data: z.string().optional().describe("Base64-encoded document data (when type='base64')"),
|
|
81
|
+
filename: z.string().optional().describe("Filename (required for base64/file types)"),
|
|
82
|
+
path: z.string().optional().describe("Local file path (when type='file')"),
|
|
83
|
+
}))
|
|
84
|
+
.describe("List of document sources to convert"),
|
|
85
|
+
pipeline: z
|
|
86
|
+
.string()
|
|
87
|
+
.optional()
|
|
88
|
+
.describe("Processing pipeline: 'fast' (default, fast text extraction), 'ocr' (OCR with layout analysis), or 'vlm' (vision language model for image-heavy documents)"),
|
|
89
|
+
options: z
|
|
90
|
+
.object({
|
|
91
|
+
outputFormats: z.array(z.string()).optional().describe("Output formats, e.g. ['md', 'text', 'html', 'json']. Only relevant for ocr/vlm pipelines."),
|
|
92
|
+
ocrEngine: z.string().optional().describe("OCR engine (e.g. 'easyocr', 'tesseract')"),
|
|
93
|
+
ocrLanguages: z.array(z.string()).optional().describe("OCR languages (e.g. ['en', 'de'])"),
|
|
94
|
+
pdfBackend: z.string().optional().describe("PDF backend (e.g. 'dlparser', 'pypdfium2')"),
|
|
95
|
+
tableStructure: z.boolean().optional().describe("Enable table structure detection"),
|
|
96
|
+
maxPages: z.number().optional().describe("Maximum number of pages to process"),
|
|
97
|
+
imageExportMode: z.string().optional().describe("Image export mode (e.g. 'placeholder', 'embedded')"),
|
|
98
|
+
imagesScale: z.number().optional().describe("Scale factor for exported images"),
|
|
99
|
+
pictureClassification: z.boolean().optional().describe("Enable picture classification"),
|
|
100
|
+
})
|
|
101
|
+
.optional()
|
|
102
|
+
.describe("Options for ocr/vlm pipelines. Ignored when pipeline is 'fast'."),
|
|
103
|
+
async: z
|
|
104
|
+
.boolean()
|
|
105
|
+
.optional()
|
|
106
|
+
.default(false)
|
|
107
|
+
.describe("If true, submit as async task. Combine with waitForCompletion=true to get the final result in a single call."),
|
|
108
|
+
waitForCompletion: z
|
|
109
|
+
.boolean()
|
|
110
|
+
.optional()
|
|
111
|
+
.default(false)
|
|
112
|
+
.describe("When async=true, automatically poll until done and return the final result instead of a task ID. Recommended for VLM pipeline."),
|
|
113
|
+
}, async ({ sources, pipeline, options: pipelineOptions, async: isAsync, waitForCompletion }) => {
|
|
114
|
+
try {
|
|
115
|
+
// Separate file sources (use multipart) from url/base64 sources (use JSON)
|
|
116
|
+
const fileSources = [];
|
|
117
|
+
const apiSources = [];
|
|
118
|
+
for (const src of sources) {
|
|
119
|
+
if (src.type === "url") {
|
|
120
|
+
if (!src.url)
|
|
121
|
+
throw new Error("url is required for type='url'");
|
|
122
|
+
apiSources.push({ kind: "http", url: src.url });
|
|
123
|
+
}
|
|
124
|
+
else if (src.type === "base64") {
|
|
125
|
+
if (!src.data || !src.filename)
|
|
126
|
+
throw new Error("data and filename are required for type='base64'");
|
|
127
|
+
apiSources.push({ kind: "base64", content: src.data, filename: src.filename, mimeType: getMimeType(src.filename) });
|
|
128
|
+
}
|
|
129
|
+
else if (src.type === "file") {
|
|
130
|
+
const filePath = src.path ?? src.filename;
|
|
131
|
+
if (!filePath)
|
|
132
|
+
throw new Error("path or filename is required for type='file'");
|
|
133
|
+
const name = src.filename ?? basename(filePath);
|
|
134
|
+
fileSources.push({ path: filePath, filename: name });
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
// Build pipeline options
|
|
138
|
+
const nestedOpts = {};
|
|
139
|
+
if (pipelineOptions) {
|
|
140
|
+
if (pipelineOptions.outputFormats)
|
|
141
|
+
nestedOpts.outputFormats = pipelineOptions.outputFormats.map(mapOutputFormat);
|
|
142
|
+
if (pipelineOptions.ocrEngine)
|
|
143
|
+
nestedOpts.ocrEngine = pipelineOptions.ocrEngine;
|
|
144
|
+
if (pipelineOptions.ocrLanguages)
|
|
145
|
+
nestedOpts.ocrLanguages = pipelineOptions.ocrLanguages;
|
|
146
|
+
if (pipelineOptions.pdfBackend)
|
|
147
|
+
nestedOpts.pdfBackend = pipelineOptions.pdfBackend;
|
|
148
|
+
if (pipelineOptions.tableStructure !== undefined)
|
|
149
|
+
nestedOpts.tableStructure = pipelineOptions.tableStructure;
|
|
150
|
+
if (pipelineOptions.maxPages !== undefined)
|
|
151
|
+
nestedOpts.maxPages = pipelineOptions.maxPages;
|
|
152
|
+
if (pipelineOptions.imageExportMode)
|
|
153
|
+
nestedOpts.imageExportMode = pipelineOptions.imageExportMode;
|
|
154
|
+
if (pipelineOptions.imagesScale !== undefined)
|
|
155
|
+
nestedOpts.imagesScale = pipelineOptions.imagesScale;
|
|
156
|
+
if (pipelineOptions.pictureClassification !== undefined)
|
|
157
|
+
nestedOpts.pictureClassification = pipelineOptions.pictureClassification;
|
|
158
|
+
}
|
|
159
|
+
// Use multipart upload for file sources (avoids base64 33% size overhead)
|
|
160
|
+
if (fileSources.length > 0 && apiSources.length === 0) {
|
|
161
|
+
const formData = new FormData();
|
|
162
|
+
for (const f of fileSources) {
|
|
163
|
+
const buffer = await readFile(f.path);
|
|
164
|
+
const blob = new Blob([buffer], { type: getMimeType(f.filename) });
|
|
165
|
+
formData.append("files", blob, f.filename);
|
|
166
|
+
}
|
|
167
|
+
if (Object.keys(nestedOpts).length > 0) {
|
|
168
|
+
formData.append("options", new Blob([JSON.stringify(nestedOpts)], { type: "application/json" }));
|
|
169
|
+
}
|
|
170
|
+
const { baseUrl, apiKey } = client._config;
|
|
171
|
+
const endpoint = isAsync ? "/v1/convert/file/async" : "/v1/convert/file";
|
|
172
|
+
const url = pipeline
|
|
173
|
+
? `${baseUrl}${endpoint}?pipeline=${encodeURIComponent(pipeline)}`
|
|
174
|
+
: `${baseUrl}${endpoint}`;
|
|
175
|
+
const res = await fetch(url, {
|
|
176
|
+
method: "POST",
|
|
177
|
+
headers: { Authorization: `Bearer ${apiKey}` },
|
|
178
|
+
body: formData,
|
|
179
|
+
});
|
|
180
|
+
if (!res.ok) {
|
|
181
|
+
let body;
|
|
182
|
+
try {
|
|
183
|
+
body = await res.json();
|
|
184
|
+
}
|
|
185
|
+
catch {
|
|
186
|
+
body = { error: res.statusText, message: `HTTP ${res.status}: ${res.statusText}`, status: res.status, timestamp: new Date().toISOString() };
|
|
187
|
+
}
|
|
188
|
+
throw new BackboneApiError(body);
|
|
189
|
+
}
|
|
190
|
+
const data = await res.json();
|
|
191
|
+
if (isAsync) {
|
|
192
|
+
const taskId = data?.taskId;
|
|
193
|
+
if (waitForCompletion && taskId) {
|
|
194
|
+
const result = await waitForTask(client, taskId);
|
|
195
|
+
return { content: formatConvertResponse(result) };
|
|
196
|
+
}
|
|
197
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
198
|
+
}
|
|
199
|
+
return { content: formatConvertResponse(data) };
|
|
200
|
+
}
|
|
201
|
+
// Fall back to JSON body for url/base64 sources (or mixed)
|
|
202
|
+
// For mixed sources with files, base64-encode as before (rare case)
|
|
203
|
+
for (const f of fileSources) {
|
|
204
|
+
const fileBuffer = await readFile(f.path);
|
|
205
|
+
apiSources.push({ kind: "base64", content: fileBuffer.toString("base64"), filename: f.filename, mimeType: getMimeType(f.filename) });
|
|
206
|
+
}
|
|
207
|
+
const opts = {};
|
|
208
|
+
if (pipeline)
|
|
209
|
+
opts.pipeline = pipeline;
|
|
210
|
+
if (Object.keys(nestedOpts).length > 0)
|
|
211
|
+
opts.options = nestedOpts;
|
|
212
|
+
const body = {
|
|
213
|
+
sources: apiSources,
|
|
214
|
+
options: Object.keys(opts).length > 0 ? opts : undefined,
|
|
215
|
+
};
|
|
216
|
+
if (isAsync) {
|
|
217
|
+
const { data } = await client.POST("/v1/convert/source/async", { body });
|
|
218
|
+
const taskId = data?.taskId;
|
|
219
|
+
if (waitForCompletion && taskId) {
|
|
220
|
+
const result = await waitForTask(client, taskId);
|
|
221
|
+
return { content: formatConvertResponse(result) };
|
|
222
|
+
}
|
|
223
|
+
return {
|
|
224
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
const { data } = await client.POST("/v1/convert/source", { body });
|
|
228
|
+
return { content: formatConvertResponse(data) };
|
|
229
|
+
}
|
|
230
|
+
catch (error) {
|
|
231
|
+
return {
|
|
232
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
233
|
+
isError: true,
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
});
|
|
237
|
+
// ── get_task_status ─────────────────────────────────────────────────────
|
|
238
|
+
server.tool("2kw_get_task_status", "Check the status of an async conversion task. Supports long polling with the wait parameter.", {
|
|
239
|
+
taskId: z.string().describe("The async task ID returned by convert_document"),
|
|
240
|
+
wait: z
|
|
241
|
+
.number()
|
|
242
|
+
.optional()
|
|
243
|
+
.describe("Long-poll timeout in seconds (max 60)"),
|
|
244
|
+
}, async ({ taskId, wait }) => {
|
|
245
|
+
try {
|
|
246
|
+
const { data } = await client.GET("/v1/convert/tasks/{taskId}", {
|
|
247
|
+
params: {
|
|
248
|
+
path: { taskId },
|
|
249
|
+
query: { wait },
|
|
250
|
+
},
|
|
251
|
+
});
|
|
252
|
+
return {
|
|
253
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
254
|
+
};
|
|
255
|
+
}
|
|
256
|
+
catch (error) {
|
|
257
|
+
return {
|
|
258
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
259
|
+
isError: true,
|
|
260
|
+
};
|
|
261
|
+
}
|
|
262
|
+
});
|
|
263
|
+
// ── get_task_result ─────────────────────────────────────────────────────
|
|
264
|
+
server.tool("2kw_get_task_result", "Get the result of a completed async conversion task.", {
|
|
265
|
+
taskId: z.string().describe("The async task ID"),
|
|
266
|
+
}, async ({ taskId }) => {
|
|
267
|
+
try {
|
|
268
|
+
const { data } = await client.GET("/v1/convert/tasks/{taskId}/result", {
|
|
269
|
+
params: { path: { taskId } },
|
|
270
|
+
});
|
|
271
|
+
return { content: formatConvertResponse(data) };
|
|
272
|
+
}
|
|
273
|
+
catch (error) {
|
|
274
|
+
return {
|
|
275
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
276
|
+
isError: true,
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
//# sourceMappingURL=conversion.js.map
|