@decocms/bindings 0.2.4 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +3 -3
  2. package/package.json +9 -35
  3. package/src/core/binder.ts +241 -0
  4. package/src/core/client/README.md +3 -0
  5. package/{dist/core/client/http-client-transport.js → src/core/client/http-client-transport.ts} +24 -12
  6. package/src/core/client/index.ts +1 -0
  7. package/src/core/client/mcp-client.ts +149 -0
  8. package/src/core/client/mcp.ts +93 -0
  9. package/src/core/client/proxy.ts +151 -0
  10. package/src/core/connection.ts +38 -0
  11. package/src/core/subset.ts +514 -0
  12. package/src/index.ts +15 -0
  13. package/src/well-known/agent.ts +60 -0
  14. package/src/well-known/collections.ts +416 -0
  15. package/src/well-known/language-model.ts +383 -0
  16. package/test/index.test.ts +942 -0
  17. package/tsconfig.json +11 -0
  18. package/vitest.config.ts +8 -0
  19. package/dist/core/binder.d.ts +0 -3
  20. package/dist/core/binder.js +0 -77
  21. package/dist/core/binder.js.map +0 -1
  22. package/dist/core/client/http-client-transport.d.ts +0 -12
  23. package/dist/core/client/http-client-transport.js.map +0 -1
  24. package/dist/core/client/index.d.ts +0 -3
  25. package/dist/core/client/index.js +0 -5
  26. package/dist/core/client/index.js.map +0 -1
  27. package/dist/core/client/mcp-client.d.ts +0 -233
  28. package/dist/core/client/mcp-client.js +0 -99
  29. package/dist/core/client/mcp-client.js.map +0 -1
  30. package/dist/core/client/mcp.d.ts +0 -3
  31. package/dist/core/client/mcp.js +0 -29
  32. package/dist/core/client/mcp.js.map +0 -1
  33. package/dist/core/client/proxy.d.ts +0 -10
  34. package/dist/core/client/proxy.js +0 -104
  35. package/dist/core/client/proxy.js.map +0 -1
  36. package/dist/core/connection.d.ts +0 -30
  37. package/dist/core/connection.js +0 -1
  38. package/dist/core/connection.js.map +0 -1
  39. package/dist/core/subset.d.ts +0 -17
  40. package/dist/core/subset.js +0 -319
  41. package/dist/core/subset.js.map +0 -1
  42. package/dist/index-D0aUdNls.d.ts +0 -153
  43. package/dist/index.d.ts +0 -3
  44. package/dist/index.js +0 -7
  45. package/dist/index.js.map +0 -1
  46. package/dist/well-known/agent.d.ts +0 -903
  47. package/dist/well-known/agent.js +0 -27
  48. package/dist/well-known/agent.js.map +0 -1
  49. package/dist/well-known/collections.d.ts +0 -537
  50. package/dist/well-known/collections.js +0 -134
  51. package/dist/well-known/collections.js.map +0 -1
  52. package/dist/well-known/language-model.d.ts +0 -2836
  53. package/dist/well-known/language-model.js +0 -209
  54. package/dist/well-known/language-model.js.map +0 -1
@@ -0,0 +1,383 @@
1
+ /**
2
+ * Language Model Well-Known Binding
3
+ *
4
+ * Defines the interface for AI model providers.
5
+ * Any MCP that implements this binding can provide AI models and streaming endpoints.
6
+ *
7
+ * This binding includes:
8
+ * - LLM operations (metadata, stream, generate)
9
+ * - Collection bindings for LIST and GET operations (read-only)
10
+ * - Streaming endpoint information is included directly in the model entity schema.
11
+ */
12
+
13
+ import { z } from "zod";
14
+ import { bindingClient, type ToolBinder } from "../core/binder";
15
+ import {
16
+ BaseCollectionEntitySchema,
17
+ createCollectionBindings,
18
+ } from "./collections";
19
+
20
+ /**
21
+ * Language Model Call Options Schema
22
+ * Based on LanguageModelV2CallOptions from @ai-sdk/provider
23
+ */
24
+ export const LanguageModelCallOptionsSchema = z.object({
25
+ // Core parameters
26
+ prompt: z
27
+ .any()
28
+ .describe(
29
+ "A language mode prompt is a standardized prompt type (messages, system, etc.)",
30
+ ),
31
+
32
+ // Generation parameters
33
+ maxOutputTokens: z
34
+ .number()
35
+ .optional()
36
+ .describe("Maximum number of tokens to generate"),
37
+ temperature: z
38
+ .number()
39
+ .optional()
40
+ .describe(
41
+ "Temperature setting. The range depends on the provider and model",
42
+ ),
43
+ topP: z.number().optional().describe("Nucleus sampling parameter"),
44
+ topK: z
45
+ .number()
46
+ .optional()
47
+ .describe(
48
+ "Only sample from the top K options for each subsequent token. Used to remove long tail low probability responses",
49
+ ),
50
+ presencePenalty: z
51
+ .number()
52
+ .optional()
53
+ .describe(
54
+ "Presence penalty setting. It affects the likelihood of the model to repeat information that is already in the prompt",
55
+ ),
56
+ frequencyPenalty: z
57
+ .number()
58
+ .optional()
59
+ .describe(
60
+ "Frequency penalty setting. It affects the likelihood of the model to repeatedly use the same words or phrases",
61
+ ),
62
+ seed: z
63
+ .number()
64
+ .optional()
65
+ .describe(
66
+ "The seed (integer) to use for random sampling. If set and supported by the model, calls will generate deterministic results",
67
+ ),
68
+
69
+ // Stop sequences
70
+ stopSequences: z
71
+ .array(z.string())
72
+ .optional()
73
+ .describe(
74
+ "Stop sequences. If set, the model will stop generating text when one of the stop sequences is generated",
75
+ ),
76
+
77
+ // Response format
78
+ responseFormat: z
79
+ .union([
80
+ z.object({ type: z.literal("text") }),
81
+ z.object({
82
+ type: z.literal("json"),
83
+ schema: z
84
+ .any()
85
+ .optional()
86
+ .describe("JSON schema that the generated output should conform to"),
87
+ name: z
88
+ .string()
89
+ .optional()
90
+ .describe("Name of output that should be generated"),
91
+ description: z
92
+ .string()
93
+ .optional()
94
+ .describe("Description of the output that should be generated"),
95
+ }),
96
+ ])
97
+ .optional()
98
+ .describe(
99
+ "Response format. The output can either be text or JSON. Default is text",
100
+ ),
101
+
102
+ // Tools
103
+ tools: z
104
+ .array(z.any())
105
+ .optional()
106
+ .describe("The tools that are available for the model"),
107
+ toolChoice: z
108
+ .any()
109
+ .optional()
110
+ .describe("Specifies how the tool should be selected. Defaults to 'auto'"),
111
+
112
+ // Stream options
113
+ includeRawChunks: z
114
+ .boolean()
115
+ .optional()
116
+ .describe(
117
+ "Include raw chunks in the stream. Only applicable for streaming calls",
118
+ ),
119
+
120
+ // Abort signal
121
+ abortSignal: z
122
+ .any()
123
+ .optional()
124
+ .describe("Abort signal for cancelling the operation"),
125
+
126
+ // Additional options
127
+ headers: z
128
+ .record(z.string(), z.union([z.string(), z.undefined()]))
129
+ .optional()
130
+ .describe("Additional HTTP headers to be sent with the request"),
131
+ providerOptions: z
132
+ .any()
133
+ .optional()
134
+ .describe("Additional provider-specific options"),
135
+ });
136
+
137
+ /**
138
+ * Language Model Generate Output Schema
139
+ * Based on the return type of LanguageModelV2.doGenerate from @ai-sdk/provider
140
+ */
141
+ export const LanguageModelGenerateOutputSchema = z.object({
142
+ // Ordered content that the model has generated
143
+ content: z
144
+ .array(z.any())
145
+ .describe(
146
+ "Ordered content that the model has generated (text, tool-calls, reasoning, files, sources)",
147
+ ),
148
+
149
+ // Finish reason (required)
150
+ finishReason: z
151
+ .enum([
152
+ "stop",
153
+ "length",
154
+ "content-filter",
155
+ "tool-calls",
156
+ "error",
157
+ "other",
158
+ "unknown",
159
+ ])
160
+ .describe("Reason why generation stopped"),
161
+
162
+ // Usage information (required)
163
+ usage: z
164
+ .object({
165
+ inputTokens: z.number().optional(),
166
+ outputTokens: z.number().optional(),
167
+ totalTokens: z.number().optional(),
168
+ reasoningTokens: z.number().optional(),
169
+ })
170
+ .passthrough()
171
+ .transform((val) => ({
172
+ inputTokens: val.inputTokens,
173
+ outputTokens: val.outputTokens,
174
+ totalTokens: val.totalTokens,
175
+ reasoningTokens: val.reasoningTokens,
176
+ ...val,
177
+ }))
178
+ .describe("Usage information for the language model call"),
179
+
180
+ // Provider metadata
181
+ providerMetadata: z
182
+ .any()
183
+ .optional()
184
+ .describe("Additional provider-specific metadata"),
185
+
186
+ // Request information for telemetry and debugging
187
+ request: z
188
+ .object({
189
+ body: z
190
+ .any()
191
+ .optional()
192
+ .describe("Request HTTP body sent to the provider API"),
193
+ })
194
+ .optional()
195
+ .describe("Optional request information for telemetry and debugging"),
196
+
197
+ // Response information for telemetry and debugging
198
+ response: z
199
+ .object({
200
+ id: z.string().optional().describe("ID for the generated response"),
201
+ timestamp: z
202
+ .date()
203
+ .optional()
204
+ .describe("Timestamp for the start of the generated response"),
205
+ modelId: z
206
+ .string()
207
+ .optional()
208
+ .describe("The ID of the response model that was used"),
209
+ headers: z
210
+ .record(z.string(), z.string())
211
+ .optional()
212
+ .describe("Response headers"),
213
+ body: z.any().optional().describe("Response HTTP body"),
214
+ })
215
+ .optional()
216
+ .describe("Optional response information for telemetry and debugging"),
217
+
218
+ // Warnings for the call (required)
219
+ warnings: z
220
+ .array(z.any())
221
+ .describe("Warnings for the call, e.g. unsupported settings"),
222
+ });
223
+
224
+ /**
225
+ * Language Model Stream Output Schema
226
+ * Based on the return type of LanguageModelV2.doStream from @ai-sdk/provider
227
+ */
228
+ export const LanguageModelStreamOutputSchema = z.object({
229
+ // Stream of language model output parts
230
+ stream: z.any().describe("ReadableStream of LanguageModelV2StreamPart"),
231
+
232
+ // Request information for telemetry and debugging
233
+ request: z
234
+ .object({
235
+ body: z
236
+ .any()
237
+ .optional()
238
+ .describe("Request HTTP body sent to the provider API"),
239
+ })
240
+ .optional()
241
+ .describe("Optional request information for telemetry and debugging"),
242
+
243
+ // Response information
244
+ response: z
245
+ .object({
246
+ headers: z
247
+ .record(z.string(), z.string())
248
+ .optional()
249
+ .describe("Response headers"),
250
+ })
251
+ .optional()
252
+ .describe("Optional response data"),
253
+ });
254
+
255
+ export const LanguageModelMetadataSchema = z.object({
256
+ supportedUrls: z
257
+ .record(z.string(), z.array(z.string()))
258
+ .describe("Supported URL patterns by media type for the provider"),
259
+ });
260
+
261
+ /**
262
+ * Simple Model schema for LLM operations
263
+ */
264
+ export const ModelSchema = z.object({
265
+ modelId: z.string().describe("The ID of the model"),
266
+ // Model-specific fields
267
+ logo: z.string().nullable(),
268
+ description: z.string().nullable(),
269
+ capabilities: z.array(z.string()),
270
+ limits: z
271
+ .object({
272
+ contextWindow: z.number(),
273
+ maxOutputTokens: z.number(),
274
+ })
275
+ .nullable(),
276
+ costs: z
277
+ .object({
278
+ input: z.number(),
279
+ output: z.number(),
280
+ })
281
+ .nullable(),
282
+ // Provider information
283
+ provider: z
284
+ .enum([
285
+ "openai",
286
+ "anthropic",
287
+ "google",
288
+ "x-ai",
289
+ "deepseek",
290
+ "openai-compatible",
291
+ "openrouter",
292
+ ])
293
+ .nullable(),
294
+ });
295
+
296
+ export const LanguageModelInputSchema = z.object({
297
+ modelId: z.string().describe("The ID of the model"),
298
+ callOptions: LanguageModelCallOptionsSchema,
299
+ });
300
+
301
+ /**
302
+ * Model entity schema for AI models (Collection Entity)
303
+ * Extends BaseCollectionEntitySchema with model-specific fields
304
+ * Base schema already includes: id, title, created_at, updated_at, created_by, updated_by
305
+ */
306
+ export const ModelCollectionEntitySchema = BaseCollectionEntitySchema.extend({
307
+ // Model-specific fields
308
+ logo: z.string().nullable(),
309
+ description: z.string().nullable(),
310
+ capabilities: z.array(z.string()),
311
+ limits: z
312
+ .object({
313
+ contextWindow: z.number(),
314
+ maxOutputTokens: z.number(),
315
+ })
316
+ .nullable(),
317
+ costs: z
318
+ .object({
319
+ input: z.number(),
320
+ output: z.number(),
321
+ })
322
+ .nullable(),
323
+ // Provider information
324
+ provider: z
325
+ .enum([
326
+ "openai",
327
+ "anthropic",
328
+ "google",
329
+ "xai",
330
+ "deepseek",
331
+ "openai-compatible",
332
+ "openrouter",
333
+ ])
334
+ .nullable(),
335
+ });
336
+
337
+ /**
338
+ * LLM Collection Binding (internal)
339
+ *
340
+ * Collection bindings for language models (read-only).
341
+ * Provides LIST and GET operations for AI models.
342
+ */
343
+ const LLM_COLLECTION_BINDING = createCollectionBindings(
344
+ "llm",
345
+ ModelCollectionEntitySchema,
346
+ { readOnly: true },
347
+ );
348
+
349
+ /**
350
+ * Language Model Binding
351
+ *
352
+ * Defines the interface for AI model providers.
353
+ * Any MCP that implements this binding can provide AI models.
354
+ *
355
+ * Required tools:
356
+ * - LLM_METADATA: Get metadata for a specific model
357
+ * - LLM_DO_STREAM: Stream a language model response
358
+ * - LLM_DO_GENERATE: Generate a language model response
359
+ * - COLLECTION_LLM_LIST: List available AI models with their capabilities
360
+ * - COLLECTION_LLM_GET: Get a single model by ID
361
+ */
362
+ export const LANGUAGE_MODEL_BINDING = [
363
+ {
364
+ name: "LLM_METADATA" as const,
365
+ inputSchema: z.object({
366
+ modelId: z.string().describe("The ID of the model"),
367
+ }),
368
+ outputSchema: LanguageModelMetadataSchema,
369
+ },
370
+ {
371
+ name: "LLM_DO_STREAM" as const,
372
+ inputSchema: LanguageModelInputSchema,
373
+ streamable: true,
374
+ },
375
+ {
376
+ name: "LLM_DO_GENERATE" as const,
377
+ inputSchema: LanguageModelInputSchema,
378
+ outputSchema: LanguageModelGenerateOutputSchema,
379
+ },
380
+ ...LLM_COLLECTION_BINDING,
381
+ ] satisfies ToolBinder[];
382
+
383
+ export const LanguageModelBinding = bindingClient(LANGUAGE_MODEL_BINDING);