@2kw/ai-mcp-server 4.0.0-dev.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +19 -0
- package/README.md +123 -0
- package/dist/client.d.ts +14 -0
- package/dist/client.js +46 -0
- package/dist/errors.d.ts +15 -0
- package/dist/errors.js +22 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +116 -0
- package/dist/mime.d.ts +2 -0
- package/dist/mime.js +56 -0
- package/dist/schema-compat.d.ts +11 -0
- package/dist/schema-compat.js +51 -0
- package/dist/tools/ai-gateway.d.ts +4 -0
- package/dist/tools/ai-gateway.js +107 -0
- package/dist/tools/analytics.d.ts +4 -0
- package/dist/tools/analytics.js +163 -0
- package/dist/tools/billing.d.ts +4 -0
- package/dist/tools/billing.js +76 -0
- package/dist/tools/conversion.d.ts +4 -0
- package/dist/tools/conversion.js +281 -0
- package/dist/tools/datasets.d.ts +4 -0
- package/dist/tools/datasets.js +200 -0
- package/dist/tools/docs.d.ts +4 -0
- package/dist/tools/docs.js +171 -0
- package/dist/tools/evaluators.d.ts +4 -0
- package/dist/tools/evaluators.js +140 -0
- package/dist/tools/experiments.d.ts +4 -0
- package/dist/tools/experiments.js +231 -0
- package/dist/tools/extraction.d.ts +4 -0
- package/dist/tools/extraction.js +245 -0
- package/dist/tools/prompts.d.ts +4 -0
- package/dist/tools/prompts.js +373 -0
- package/dist/tools/providers.d.ts +4 -0
- package/dist/tools/providers.js +148 -0
- package/dist/tools/schema-labels.d.ts +4 -0
- package/dist/tools/schema-labels.js +88 -0
- package/dist/tools/schema-testing.d.ts +4 -0
- package/dist/tools/schema-testing.js +96 -0
- package/dist/tools/schema-versions.d.ts +4 -0
- package/dist/tools/schema-versions.js +127 -0
- package/dist/tools/schemas.d.ts +4 -0
- package/dist/tools/schemas.js +136 -0
- package/dist/tools/scores.d.ts +4 -0
- package/dist/tools/scores.js +43 -0
- package/dist/tools/tracing.d.ts +4 -0
- package/dist/tools/tracing.js +124 -0
- package/dist/tools/transcription.d.ts +4 -0
- package/dist/tools/transcription.js +76 -0
- package/package.json +45 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { formatErrorForMcp } from "../errors.js";
|
|
3
|
+
export function register(server, client) {
|
|
4
|
+
// ── list_experiments ────────────────────────────────────────────
|
|
5
|
+
server.tool("2kw_list_experiments", "List experiments in the organization with optional search, status filter, and pagination.", {
|
|
6
|
+
search: z.string().optional().describe("Filter experiments by name"),
|
|
7
|
+
status: z
|
|
8
|
+
.enum(["DRAFT", "RUNNING", "COMPLETED", "FAILED"])
|
|
9
|
+
.optional()
|
|
10
|
+
.describe("Filter by experiment status"),
|
|
11
|
+
page: z.number().optional().default(0).describe("Page number (0-based)"),
|
|
12
|
+
size: z.number().optional().default(20).describe("Page size"),
|
|
13
|
+
sort: z.string().optional().describe("Sort field and direction"),
|
|
14
|
+
}, async ({ search, status, page, size, sort }) => {
|
|
15
|
+
try {
|
|
16
|
+
const { data } = await client.GET("/v1/experiments", {
|
|
17
|
+
params: {
|
|
18
|
+
query: {
|
|
19
|
+
search,
|
|
20
|
+
status,
|
|
21
|
+
pageable: {
|
|
22
|
+
page,
|
|
23
|
+
size,
|
|
24
|
+
sort: sort ? [sort] : undefined,
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
});
|
|
29
|
+
const result = data;
|
|
30
|
+
const lines = (result.content ?? []).map((e) => `- ${e.name} (id: ${e.id}, status: ${e.status ?? "DRAFT"}, type: ${e.type ?? "—"})${e.description ? ` — ${e.description}` : ""}`);
|
|
31
|
+
return {
|
|
32
|
+
content: [
|
|
33
|
+
{
|
|
34
|
+
type: "text",
|
|
35
|
+
text: `Experiments (${result.totalElements} total, page ${(result.number ?? 0) + 1}/${result.totalPages}):\n${lines.join("\n") || "(none)"}`,
|
|
36
|
+
},
|
|
37
|
+
],
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
catch (error) {
|
|
41
|
+
return {
|
|
42
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
43
|
+
isError: true,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
// ── get_experiment ──────────────────────────────────────────────
|
|
48
|
+
server.tool("2kw_get_experiment", "Get an experiment by its ID, including status and dataset version reference.", {
|
|
49
|
+
experimentId: z.string().describe("The experiment ID"),
|
|
50
|
+
}, async ({ experimentId }) => {
|
|
51
|
+
try {
|
|
52
|
+
const { data } = await client.GET("/v1/experiments/{id}", {
|
|
53
|
+
params: { path: { id: experimentId } },
|
|
54
|
+
});
|
|
55
|
+
return {
|
|
56
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
catch (error) {
|
|
60
|
+
return {
|
|
61
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
62
|
+
isError: true,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
// ── create_experiment ───────────────────────────────────────────
|
|
67
|
+
server.tool("2kw_create_experiment", "Create a new experiment, optionally linked to a dataset version.", {
|
|
68
|
+
name: z.string().min(1).describe("Experiment name"),
|
|
69
|
+
description: z.string().optional().describe("Experiment description"),
|
|
70
|
+
type: z.string().optional().describe("Experiment type"),
|
|
71
|
+
datasetVersionId: z
|
|
72
|
+
.string()
|
|
73
|
+
.optional()
|
|
74
|
+
.describe("Dataset version ID to evaluate against"),
|
|
75
|
+
metadata: z.unknown().optional().describe("Arbitrary metadata (JSON object)"),
|
|
76
|
+
}, async ({ name, description, type, datasetVersionId, metadata }) => {
|
|
77
|
+
try {
|
|
78
|
+
const body = { name };
|
|
79
|
+
if (description !== undefined)
|
|
80
|
+
body.description = description;
|
|
81
|
+
if (type !== undefined)
|
|
82
|
+
body.type = type;
|
|
83
|
+
if (datasetVersionId !== undefined)
|
|
84
|
+
body.datasetVersionId = datasetVersionId;
|
|
85
|
+
if (metadata !== undefined)
|
|
86
|
+
body.metadata = metadata;
|
|
87
|
+
const { data } = await client.POST("/v1/experiments", {
|
|
88
|
+
body: body,
|
|
89
|
+
});
|
|
90
|
+
return {
|
|
91
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
catch (error) {
|
|
95
|
+
return {
|
|
96
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
97
|
+
isError: true,
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
});
|
|
101
|
+
// ── add_variant ─────────────────────────────────────────────────
|
|
102
|
+
server.tool("2kw_add_variant", "Add a variant to an experiment with a task type and configuration.", {
|
|
103
|
+
experimentId: z.string().describe("The experiment ID"),
|
|
104
|
+
name: z.string().min(1).describe("Variant name"),
|
|
105
|
+
taskType: z.string().min(1).describe("Task type (e.g., EXTRACTION, CLASSIFICATION)"),
|
|
106
|
+
configuration: z.unknown().describe("Variant configuration (JSON object)"),
|
|
107
|
+
description: z.string().optional().describe("Variant description"),
|
|
108
|
+
sortOrder: z.number().optional().describe("Sort order for display"),
|
|
109
|
+
}, async ({ experimentId, name, taskType, configuration, description, sortOrder }) => {
|
|
110
|
+
try {
|
|
111
|
+
const body = { name, taskType, configuration };
|
|
112
|
+
if (description !== undefined)
|
|
113
|
+
body.description = description;
|
|
114
|
+
if (sortOrder !== undefined)
|
|
115
|
+
body.sortOrder = sortOrder;
|
|
116
|
+
const { data } = await client.POST("/v1/experiments/{id}/variants", {
|
|
117
|
+
params: { path: { id: experimentId } },
|
|
118
|
+
body: body,
|
|
119
|
+
});
|
|
120
|
+
return {
|
|
121
|
+
content: [{ type: "text", text: JSON.stringify(data, null, 2) }],
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
return {
|
|
126
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
127
|
+
isError: true,
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
});
|
|
131
|
+
// ── run_experiment ──────────────────────────────────────────────
|
|
132
|
+
server.tool("2kw_run_experiment", "Start an experiment run. Returns immediately with the run ID — use 2kw_get_experiment_runs to poll for completion.", {
|
|
133
|
+
experimentId: z.string().describe("The experiment ID"),
|
|
134
|
+
}, async ({ experimentId }) => {
|
|
135
|
+
try {
|
|
136
|
+
const { data } = await client.POST("/v1/experiments/{id}/runs", {
|
|
137
|
+
params: { path: { id: experimentId } },
|
|
138
|
+
});
|
|
139
|
+
const run = data;
|
|
140
|
+
return {
|
|
141
|
+
content: [
|
|
142
|
+
{
|
|
143
|
+
type: "text",
|
|
144
|
+
text: `Run started (id: ${run?.id}, status: ${run?.status ?? "PENDING"}).\n\nUse 2kw_get_experiment_runs to check progress, then 2kw_get_run_results to retrieve results once completed.\n\n${JSON.stringify(run, null, 2)}`,
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
catch (error) {
|
|
150
|
+
return {
|
|
151
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
152
|
+
isError: true,
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
});
|
|
156
|
+
// ── get_experiment_runs ─────────────────────────────────────────
|
|
157
|
+
server.tool("2kw_get_experiment_runs", "List runs for an experiment with status, progress, and timing information.", {
|
|
158
|
+
experimentId: z.string().describe("The experiment ID"),
|
|
159
|
+
page: z.number().optional().default(0).describe("Page number (0-based)"),
|
|
160
|
+
size: z.number().optional().default(20).describe("Page size"),
|
|
161
|
+
}, async ({ experimentId, page, size }) => {
|
|
162
|
+
try {
|
|
163
|
+
const { data } = await client.GET("/v1/experiments/{id}/runs", {
|
|
164
|
+
params: {
|
|
165
|
+
path: { id: experimentId },
|
|
166
|
+
query: { pageable: { page, size } },
|
|
167
|
+
},
|
|
168
|
+
});
|
|
169
|
+
const result = data;
|
|
170
|
+
const lines = (result.content ?? []).map((r) => {
|
|
171
|
+
const progress = r.itemsTotal
|
|
172
|
+
? `${r.itemsCompleted ?? 0}/${r.itemsTotal}${r.itemsFailed ? ` (${r.itemsFailed} failed)` : ""}`
|
|
173
|
+
: "—";
|
|
174
|
+
return `- Run ${r.id} [${r.status}] variant=${r.variantName ?? r.variantId} progress=${progress}${r.completedAt ? ` completed=${r.completedAt}` : ""}`;
|
|
175
|
+
});
|
|
176
|
+
return {
|
|
177
|
+
content: [
|
|
178
|
+
{
|
|
179
|
+
type: "text",
|
|
180
|
+
text: `Runs (${result.totalElements} total, page ${(result.number ?? 0) + 1}/${result.totalPages}):\n${lines.join("\n") || "(none)"}`,
|
|
181
|
+
},
|
|
182
|
+
],
|
|
183
|
+
};
|
|
184
|
+
}
|
|
185
|
+
catch (error) {
|
|
186
|
+
return {
|
|
187
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
188
|
+
isError: true,
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
// ── get_run_results ─────────────────────────────────────────────
|
|
193
|
+
server.tool("2kw_get_run_results", "Get detailed results for an experiment run, including output, token usage, cost, and errors per dataset item.", {
|
|
194
|
+
experimentId: z.string().describe("The experiment ID"),
|
|
195
|
+
runId: z.string().describe("The run ID"),
|
|
196
|
+
page: z.number().optional().default(0).describe("Page number (0-based)"),
|
|
197
|
+
size: z.number().optional().default(20).describe("Page size"),
|
|
198
|
+
}, async ({ experimentId, runId, page, size }) => {
|
|
199
|
+
try {
|
|
200
|
+
const { data } = await client.GET("/v1/experiments/{id}/runs/{runId}/results", {
|
|
201
|
+
params: {
|
|
202
|
+
path: { id: experimentId, runId },
|
|
203
|
+
query: { pageable: { page, size } },
|
|
204
|
+
},
|
|
205
|
+
});
|
|
206
|
+
const result = data;
|
|
207
|
+
const lines = (result.content ?? []).map((r) => {
|
|
208
|
+
const cost = r.estimatedCost != null ? `$${r.estimatedCost}` : "—";
|
|
209
|
+
const tokens = `${r.inputTokens ?? 0}in/${r.outputTokens ?? 0}out`;
|
|
210
|
+
const duration = r.durationMs != null ? `${r.durationMs}ms` : "—";
|
|
211
|
+
const error = r.error ? ` ERROR: ${JSON.stringify(r.error)}` : "";
|
|
212
|
+
return `- Item ${r.datasetItemId}: ${duration}, ${tokens}, ${cost}${error}`;
|
|
213
|
+
});
|
|
214
|
+
return {
|
|
215
|
+
content: [
|
|
216
|
+
{
|
|
217
|
+
type: "text",
|
|
218
|
+
text: `Results (${result.totalElements} total, page ${(result.number ?? 0) + 1}/${result.totalPages}):\n${lines.join("\n") || "(none)"}`,
|
|
219
|
+
},
|
|
220
|
+
],
|
|
221
|
+
};
|
|
222
|
+
}
|
|
223
|
+
catch (error) {
|
|
224
|
+
return {
|
|
225
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
226
|
+
isError: true,
|
|
227
|
+
};
|
|
228
|
+
}
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
//# sourceMappingURL=experiments.js.map
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { readFile } from "node:fs/promises";
|
|
3
|
+
import { basename, extname } from "node:path";
|
|
4
|
+
import { formatErrorForMcp } from "../errors.js";
|
|
5
|
+
const IMAGE_MIME_TYPES = {
|
|
6
|
+
".png": "image/png",
|
|
7
|
+
".jpg": "image/jpeg",
|
|
8
|
+
".jpeg": "image/jpeg",
|
|
9
|
+
".gif": "image/gif",
|
|
10
|
+
".webp": "image/webp",
|
|
11
|
+
};
|
|
12
|
+
export function register(server, client) {
|
|
13
|
+
// ── create_extraction ───────────────────────────────────────────────────
|
|
14
|
+
server.tool("2kw_create_extraction", "Extract structured data from text and/or images using a schema and AI model. Supports text-only, vision (images), or combined extraction. Use a vision-capable model (e.g. openai/gpt-4o) when providing images. IMPORTANT: inputText accepts only plain text/markdown strings. For binary files (PDF, DOCX, XLSX, etc.), first convert them using 2kw_convert_document to get markdown, then pass the mdContent here as inputText. Use async=true for large documents to avoid gateway timeouts.", {
|
|
15
|
+
schemaId: z.string().describe("The schema ID to extract with"),
|
|
16
|
+
schemaVersionId: z
|
|
17
|
+
.string()
|
|
18
|
+
.optional()
|
|
19
|
+
.describe("Specific schema version ID (uses latest active version if omitted)"),
|
|
20
|
+
inputText: z
|
|
21
|
+
.string()
|
|
22
|
+
.optional()
|
|
23
|
+
.describe("The text to extract structured data from (required unless inputImages provided)"),
|
|
24
|
+
inputImages: z
|
|
25
|
+
.array(z.object({
|
|
26
|
+
type: z.enum(["base64", "file"]).describe("Source type: 'base64' for pre-encoded data, 'file' for local file path"),
|
|
27
|
+
data: z.string().optional().describe("Base64-encoded image data (when type='base64')"),
|
|
28
|
+
path: z.string().optional().describe("Local file path to image (when type='file')"),
|
|
29
|
+
mimeType: z.string().optional().describe("MIME type (auto-detected for files, required for base64)"),
|
|
30
|
+
}))
|
|
31
|
+
.optional()
|
|
32
|
+
.describe("Images for vision extraction (max 10). Requires a vision-capable model."),
|
|
33
|
+
model: z.string().describe("AI model in 'provider/model' format"),
|
|
34
|
+
async: z
|
|
35
|
+
.boolean()
|
|
36
|
+
.optional()
|
|
37
|
+
.default(false)
|
|
38
|
+
.describe("If true, submit as async extraction and return immediately (not supported with images)"),
|
|
39
|
+
}, async ({ schemaId, schemaVersionId, inputText, inputImages, model, async: isAsync }) => {
|
|
40
|
+
try {
|
|
41
|
+
// Resolve images: read local files and convert to base64
|
|
42
|
+
const resolvedImages = [];
|
|
43
|
+
if (inputImages?.length) {
|
|
44
|
+
for (const img of inputImages) {
|
|
45
|
+
if (img.type === "file") {
|
|
46
|
+
if (!img.path)
|
|
47
|
+
throw new Error("path is required for type='file'");
|
|
48
|
+
const fileBuffer = await readFile(img.path);
|
|
49
|
+
const ext = extname(img.path).toLowerCase();
|
|
50
|
+
const mimeType = img.mimeType ?? IMAGE_MIME_TYPES[ext];
|
|
51
|
+
if (!mimeType)
|
|
52
|
+
throw new Error(`Cannot determine MIME type for ${basename(img.path)}. Supported: ${Object.keys(IMAGE_MIME_TYPES).join(", ")}`);
|
|
53
|
+
resolvedImages.push({ data: fileBuffer.toString("base64"), mimeType });
|
|
54
|
+
}
|
|
55
|
+
else {
|
|
56
|
+
if (!img.data)
|
|
57
|
+
throw new Error("data is required for type='base64'");
|
|
58
|
+
if (!img.mimeType)
|
|
59
|
+
throw new Error("mimeType is required for type='base64'");
|
|
60
|
+
resolvedImages.push({ data: img.data, mimeType: img.mimeType });
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
const body = {
|
|
65
|
+
schemaId,
|
|
66
|
+
model,
|
|
67
|
+
...(inputText && { inputText }),
|
|
68
|
+
...(resolvedImages.length > 0 && { inputImages: resolvedImages }),
|
|
69
|
+
...(schemaVersionId && { schemaVersionId }),
|
|
70
|
+
};
|
|
71
|
+
if (isAsync) {
|
|
72
|
+
const { data } = await client.POST("/v1/extractions/async", { body });
|
|
73
|
+
const result = data;
|
|
74
|
+
return {
|
|
75
|
+
content: [
|
|
76
|
+
{
|
|
77
|
+
type: "text",
|
|
78
|
+
text: `Extraction submitted (id: ${result.id}, status: ${result.status}). Poll with 2kw_get_extraction to check progress.`,
|
|
79
|
+
},
|
|
80
|
+
],
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
const { data } = await client.POST("/v1/extractions", { body });
|
|
84
|
+
const result = data;
|
|
85
|
+
if (result.status === "PENDING" || result.status === "PROCESSING") {
|
|
86
|
+
return {
|
|
87
|
+
content: [
|
|
88
|
+
{
|
|
89
|
+
type: "text",
|
|
90
|
+
text: `Extraction submitted (id: ${result.id}, status: ${result.status}). Poll with 2kw_get_extraction to check progress.`,
|
|
91
|
+
},
|
|
92
|
+
],
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
return { content: formatExtractionResult(result) };
|
|
96
|
+
}
|
|
97
|
+
catch (error) {
|
|
98
|
+
return {
|
|
99
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
100
|
+
isError: true,
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
// ── get_extraction ──────────────────────────────────────────────────────
|
|
105
|
+
server.tool("2kw_get_extraction", "Get an extraction by its ID. Use this to check the status of async extractions or retrieve results.", {
|
|
106
|
+
extractionId: z.string().describe("The extraction ID"),
|
|
107
|
+
}, async ({ extractionId }) => {
|
|
108
|
+
try {
|
|
109
|
+
const { data } = await client.GET("/v1/extractions/{id}", {
|
|
110
|
+
params: { path: { id: extractionId } },
|
|
111
|
+
});
|
|
112
|
+
return { content: formatExtractionResult(data) };
|
|
113
|
+
}
|
|
114
|
+
catch (error) {
|
|
115
|
+
return {
|
|
116
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
117
|
+
isError: true,
|
|
118
|
+
};
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
// ── list_extractions ────────────────────────────────────────────────────
|
|
122
|
+
server.tool("2kw_list_extractions", "List extractions in the organization with optional filtering by status, schema version, or search term.", {
|
|
123
|
+
status: z
|
|
124
|
+
.enum(["PENDING", "PROCESSING", "COMPLETED", "FAILED"])
|
|
125
|
+
.optional()
|
|
126
|
+
.describe("Filter by extraction status"),
|
|
127
|
+
schemaVersionId: z.string().optional().describe("Filter by schema version ID"),
|
|
128
|
+
search: z.string().optional().describe("Search by model name"),
|
|
129
|
+
page: z.number().optional().default(0).describe("Page number (0-based)"),
|
|
130
|
+
size: z.number().optional().default(20).describe("Page size"),
|
|
131
|
+
sort: z.string().optional().describe("Sort field and direction (default: createdAt,desc)"),
|
|
132
|
+
}, async ({ status, schemaVersionId, search, page, size, sort }) => {
|
|
133
|
+
try {
|
|
134
|
+
const { data } = await client.GET("/v1/extractions", {
|
|
135
|
+
params: {
|
|
136
|
+
query: {
|
|
137
|
+
status,
|
|
138
|
+
schemaVersionId,
|
|
139
|
+
search,
|
|
140
|
+
pageable: {
|
|
141
|
+
page,
|
|
142
|
+
size,
|
|
143
|
+
sort: sort ? [sort] : undefined,
|
|
144
|
+
},
|
|
145
|
+
},
|
|
146
|
+
},
|
|
147
|
+
});
|
|
148
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
149
|
+
const result = data;
|
|
150
|
+
const lines = (result?.content ?? []).map(
|
|
151
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
152
|
+
(e) => `- ${e.id} [${e.status}] model=${e.model} schema=${e.schemaId}${e.errorMessage ? ` error: ${e.errorMessage}` : ""}`);
|
|
153
|
+
return {
|
|
154
|
+
content: [
|
|
155
|
+
{
|
|
156
|
+
type: "text",
|
|
157
|
+
text: `Extractions (${result?.totalElements} total, page ${(result?.number ?? 0) + 1}/${result?.totalPages}):\n${lines.join("\n") || "(none)"}`,
|
|
158
|
+
},
|
|
159
|
+
],
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
catch (error) {
|
|
163
|
+
return {
|
|
164
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
165
|
+
isError: true,
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
});
|
|
169
|
+
// ── estimate_tokens ─────────────────────────────────────────────────────
|
|
170
|
+
server.tool("2kw_estimate_tokens", "Estimate token usage for an extraction without executing it. Useful for cost estimation.", {
|
|
171
|
+
schemaId: z.string().describe("The schema ID"),
|
|
172
|
+
schemaVersionId: z.string().optional().describe("Specific schema version ID"),
|
|
173
|
+
inputText: z.string().describe("The text to estimate tokens for"),
|
|
174
|
+
}, async ({ schemaId, schemaVersionId, inputText }) => {
|
|
175
|
+
try {
|
|
176
|
+
const { data } = await client.POST("/v1/extractions/estimate", {
|
|
177
|
+
body: {
|
|
178
|
+
schemaId,
|
|
179
|
+
inputText,
|
|
180
|
+
...(schemaVersionId && { schemaVersionId }),
|
|
181
|
+
},
|
|
182
|
+
});
|
|
183
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
184
|
+
const result = data;
|
|
185
|
+
return {
|
|
186
|
+
content: [
|
|
187
|
+
{
|
|
188
|
+
type: "text",
|
|
189
|
+
text: `Token estimate:\n Input tokens: ${result?.inputTokens}\n Estimated output tokens: ${result?.estimatedOutputTokens}\n Strategy: ${result?.strategy}`,
|
|
190
|
+
},
|
|
191
|
+
],
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
catch (error) {
|
|
195
|
+
return {
|
|
196
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
197
|
+
isError: true,
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
});
|
|
201
|
+
// ── rerun_extraction ────────────────────────────────────────────────────
|
|
202
|
+
server.tool("2kw_rerun_extraction", "Re-run an existing extraction with the same configuration. Creates a new extraction.", {
|
|
203
|
+
extractionId: z.string().describe("The extraction ID to re-run"),
|
|
204
|
+
}, async ({ extractionId }) => {
|
|
205
|
+
try {
|
|
206
|
+
const { data } = await client.POST("/v1/extractions/{id}/rerun", {
|
|
207
|
+
params: { path: { id: extractionId } },
|
|
208
|
+
});
|
|
209
|
+
return { content: formatExtractionResult(data) };
|
|
210
|
+
}
|
|
211
|
+
catch (error) {
|
|
212
|
+
return {
|
|
213
|
+
content: [{ type: "text", text: formatErrorForMcp(error) }],
|
|
214
|
+
isError: true,
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
function formatExtractionResult(extraction) {
|
|
220
|
+
const parts = [];
|
|
221
|
+
parts.push({
|
|
222
|
+
type: "text",
|
|
223
|
+
text: `Extraction ${extraction.id} [${extraction.status}]\nModel: ${extraction.model}${extraction.strategy ? `\nStrategy: ${extraction.strategy}` : ""}`,
|
|
224
|
+
});
|
|
225
|
+
if (extraction.status === "COMPLETED" && extraction.result) {
|
|
226
|
+
parts.push({
|
|
227
|
+
type: "text",
|
|
228
|
+
text: `Result:\n${JSON.stringify(extraction.result, null, 2)}`,
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
if (extraction.status === "FAILED" && extraction.errorMessage) {
|
|
232
|
+
parts.push({
|
|
233
|
+
type: "text",
|
|
234
|
+
text: `Error: ${extraction.errorMessage}`,
|
|
235
|
+
});
|
|
236
|
+
}
|
|
237
|
+
if (extraction.estimatedInputTokens != null) {
|
|
238
|
+
parts.push({
|
|
239
|
+
type: "text",
|
|
240
|
+
text: `Tokens: ~${extraction.estimatedInputTokens} input, ~${extraction.estimatedOutputTokens} output`,
|
|
241
|
+
});
|
|
242
|
+
}
|
|
243
|
+
return parts;
|
|
244
|
+
}
|
|
245
|
+
//# sourceMappingURL=extraction.js.map
|