@databricks/ai-sdk-provider 0.0.1 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,2009 @@
1
+ import { combineHeaders, createEventSourceResponseHandler, createJsonErrorResponseHandler, createJsonResponseHandler, parseProviderOptions, postJsonToApi, withoutTrailingSlash } from "@ai-sdk/provider-utils";
2
+ import { z } from "zod/v4";
3
+ import { randomUUID } from "node:crypto";
4
+ import { UnsupportedFunctionalityError } from "@ai-sdk/provider";
5
+
6
+ //#region src/chat-agent-language-model/chat-agent-schema.ts
7
+ const chatAgentToolCallSchema = z.object({
8
+ type: z.literal("function"),
9
+ function: z.object({
10
+ name: z.string(),
11
+ arguments: z.string()
12
+ }),
13
+ id: z.string()
14
+ });
15
+ const chatAgentAssistantMessageSchema = z.object({
16
+ role: z.literal("assistant"),
17
+ content: z.string(),
18
+ id: z.string(),
19
+ name: z.string().optional(),
20
+ tool_calls: z.array(chatAgentToolCallSchema).optional()
21
+ });
22
+ const chatAgentToolMessageSchema = z.object({
23
+ role: z.literal("tool"),
24
+ name: z.string(),
25
+ content: z.string(),
26
+ tool_call_id: z.string(),
27
+ id: z.string(),
28
+ attachments: z.record(z.string(), z.unknown()).optional()
29
+ });
30
+ const chatAgentUserMessageSchema = z.object({
31
+ role: z.literal("user"),
32
+ content: z.string(),
33
+ id: z.string()
34
+ });
35
+ const chatAgentMessageSchema = z.discriminatedUnion("role", [
36
+ chatAgentAssistantMessageSchema,
37
+ chatAgentToolMessageSchema,
38
+ chatAgentUserMessageSchema
39
+ ]);
40
+ const chatAgentChunkSchema = z.object({
41
+ id: z.string(),
42
+ delta: chatAgentMessageSchema
43
+ });
44
+ const chatAgentResponseSchema = z.object({
45
+ id: z.string(),
46
+ messages: z.array(chatAgentMessageSchema)
47
+ });
48
+
49
+ //#endregion
50
+ //#region src/tools.ts
51
+ const DATABRICKS_TOOL_CALL_ID = "databricks-tool-call";
52
+ /**
53
+ * The AI-SDK requires that tools used by the model are defined ahead of time.
54
+ *
55
+ * Since tool calls can be orchestrated by Databricks' agents we don't know the name, input, or output schemas
56
+ * of the tools until the model is called.
57
+ *
58
+ * In the DatabricksProvider we transform all tool calls to fit this definition, and keep the
59
+ * original name as part of the metadata. This allows us to parse any tool orchestrated by Databricks' agents,
60
+ * while still being able to render the tool call and result in the UI, and pass it back to the model with the correct name.
61
+ */
62
+ const DATABRICKS_TOOL_DEFINITION = {
63
+ name: DATABRICKS_TOOL_CALL_ID,
64
+ description: "Databricks tool call",
65
+ inputSchema: z.any(),
66
+ outputSchema: z.any()
67
+ };
68
+
69
+ //#endregion
70
+ //#region src/chat-agent-language-model/chat-agent-convert-to-message-parts.ts
71
+ const convertChatAgentChunkToMessagePart = (chunk) => {
72
+ const parts = [];
73
+ if (chunk.delta.role === "assistant") {
74
+ if (chunk.delta.content) parts.push({
75
+ type: "text-delta",
76
+ id: chunk.delta.id,
77
+ delta: chunk.delta.content
78
+ });
79
+ chunk.delta.tool_calls?.forEach((toolCall) => {
80
+ parts.push({
81
+ type: "tool-call",
82
+ toolCallId: toolCall.id,
83
+ input: toolCall.function.arguments,
84
+ toolName: toolCall.function.name
85
+ });
86
+ });
87
+ } else if (chunk.delta.role === "tool") parts.push({
88
+ type: "tool-result",
89
+ toolCallId: chunk.delta.tool_call_id,
90
+ result: chunk.delta.content,
91
+ toolName: DATABRICKS_TOOL_CALL_ID
92
+ });
93
+ return parts;
94
+ };
95
+ const convertChatAgentResponseToMessagePart = (response) => {
96
+ const parts = [];
97
+ for (const message of response.messages) if (message.role === "assistant") {
98
+ parts.push({
99
+ type: "text",
100
+ text: message.content
101
+ });
102
+ for (const part of message.tool_calls ?? []) parts.push({
103
+ type: "tool-call",
104
+ toolCallId: part.id,
105
+ input: part.function.arguments,
106
+ toolName: part.function.name
107
+ });
108
+ } else if (message.role === "tool") parts.push({
109
+ type: "tool-result",
110
+ toolCallId: message.tool_call_id,
111
+ result: message.content,
112
+ toolName: DATABRICKS_TOOL_CALL_ID
113
+ });
114
+ return parts;
115
+ };
116
+
117
+ //#endregion
118
+ //#region src/chat-agent-language-model/chat-agent-convert-to-input.ts
119
+ const convertLanguageModelV2PromptToChatAgentResponse = (prompt) => {
120
+ const messages = [];
121
+ let messageIndex = 0;
122
+ for (const msg of prompt) switch (msg.role) {
123
+ case "system": break;
124
+ case "user": {
125
+ const converted = convertUserMessage(msg, messageIndex);
126
+ messages.push(converted);
127
+ messageIndex++;
128
+ break;
129
+ }
130
+ case "assistant": {
131
+ const converted = convertAssistantMessage(msg, messageIndex);
132
+ messages.push(...converted);
133
+ messageIndex += converted.length;
134
+ break;
135
+ }
136
+ case "tool": {
137
+ const converted = convertToolMessage(msg, messageIndex);
138
+ messages.push(...converted);
139
+ messageIndex += converted.length;
140
+ break;
141
+ }
142
+ }
143
+ return messages;
144
+ };
145
+ const convertUserMessage = (msg, messageIndex) => {
146
+ const text = (msg.content ?? []).filter((part) => part.type === "text").map((part) => part.text).join("\n");
147
+ return {
148
+ role: "user",
149
+ content: text,
150
+ id: `user-${messageIndex}`
151
+ };
152
+ };
153
+ const convertAssistantMessage = (msg, startIndex) => {
154
+ const messages = [];
155
+ let messageIndex = startIndex;
156
+ const textContent = (msg.content ?? []).filter((part) => part.type === "text" || part.type === "reasoning").map((part) => part.type === "text" ? part.text : part.text).join("\n");
157
+ const toolCalls = (msg.content ?? []).filter((part) => part.type === "tool-call").map((call) => ({
158
+ type: "function",
159
+ id: call.toolCallId,
160
+ function: {
161
+ name: call.toolName,
162
+ arguments: typeof call.input === "string" ? call.input : JSON.stringify(call.input ?? {})
163
+ }
164
+ }));
165
+ messages.push({
166
+ role: "assistant",
167
+ content: textContent,
168
+ id: `assistant-${messageIndex++}`,
169
+ tool_calls: toolCalls.length > 0 ? toolCalls : void 0
170
+ });
171
+ for (const part of msg.content ?? []) if (part.type === "tool-result") messages.push({
172
+ role: "tool",
173
+ name: part.toolName,
174
+ content: convertToolResultOutput(part.output),
175
+ tool_call_id: part.toolCallId,
176
+ id: `tool-${messageIndex++}`
177
+ });
178
+ return messages;
179
+ };
180
+ const convertToolMessage = (msg, startIndex) => {
181
+ const messages = [];
182
+ let messageIndex = startIndex;
183
+ for (const part of msg.content ?? []) if (part.type === "tool-result") messages.push({
184
+ role: "tool",
185
+ name: part.toolName,
186
+ content: convertToolResultOutput(part.output),
187
+ tool_call_id: part.toolCallId,
188
+ id: `tool-${messageIndex++}`
189
+ });
190
+ return messages;
191
+ };
192
+ const convertToolResultOutput = (output) => {
193
+ switch (output.type) {
194
+ case "text":
195
+ case "error-text": return output.value;
196
+ case "json":
197
+ case "error-json": return JSON.stringify(output.value);
198
+ case "content": return output.value.map((p) => p.type === "text" ? p.text : "").filter(Boolean).join("\n");
199
+ default: return "";
200
+ }
201
+ };
202
+
203
+ //#endregion
204
+ //#region src/stream-transformers/compose-stream-part-transformers.ts
205
+ /**
206
+ * Compose an arbitrary number of `DatabricksStreamPartTransformer`s.
207
+ *
208
+ * The returned function has the exact same signature as a normal transformer,
209
+ * but its `out`‑element type is inferred from the **last** transformer you pass
210
+ * in.
211
+ *
212
+ * Runtime behaviour:
213
+ * 1️⃣ Call the first transformer with the supplied `parts` and the
214
+ * caller‑provided `last` (usually `null`).
215
+ * 2️⃣ Take its `out` and `last` and feed them to the next transformer.
216
+ * 3️⃣ …repeat until the last transformer runs.
217
+ * 4️⃣ Return the `out`/`last` of that final transformer.
218
+ */
219
+ function composeDatabricksStreamPartTransformers(...transformers) {
220
+ return (initialParts, last = null) => {
221
+ let currentParts = initialParts;
222
+ for (const fn of transformers) {
223
+ const result = fn(currentParts, last);
224
+ currentParts = result.out;
225
+ }
226
+ return { out: currentParts };
227
+ };
228
+ }
229
+
230
+ //#endregion
231
+ //#region src/stream-transformers/databricks-delta-boundary.ts
232
+ /**
233
+ * Injects start/end deltas for sequential streams.
234
+ */
235
+ const applyDeltaBoundaryTransform = (parts, last) => {
236
+ const out = [];
237
+ const lastDeltaType = maybeGetDeltaType(last);
238
+ for (const incoming of parts) {
239
+ const incomingDeltaType = maybeGetDeltaType(incoming);
240
+ const incomingId = getPartId$1(incoming);
241
+ const lastId = getPartId$1(last);
242
+ const incomingMatchesLast = Boolean(isDeltaPart(last) && isDeltaPart(incoming)) && Boolean(lastDeltaType && incomingDeltaType) && Boolean(lastDeltaType === incomingDeltaType) && Boolean(incomingId && lastId && incomingId === lastId);
243
+ if (incomingMatchesLast) {
244
+ out.push(incoming);
245
+ continue;
246
+ }
247
+ if (isDeltaPart(last)) out.push({
248
+ type: `${getDeltaType(last)}-end`,
249
+ id: last.id
250
+ });
251
+ if (isDeltaPart(incoming)) {
252
+ out.push({
253
+ type: `${getDeltaType(incoming)}-start`,
254
+ id: incoming.id
255
+ }, incoming);
256
+ continue;
257
+ }
258
+ out.push(incoming);
259
+ continue;
260
+ }
261
+ return { out };
262
+ };
263
+ const isDeltaIsh = (part) => part?.type.startsWith("text-") || part?.type.startsWith("reasoning-") || false;
264
+ const maybeGetDeltaType = (part) => {
265
+ if (!isDeltaIsh(part)) return null;
266
+ if (part.type.startsWith("text-")) return "text";
267
+ if (part.type.startsWith("reasoning-")) return "reasoning";
268
+ return null;
269
+ };
270
+ const getDeltaType = (part) => {
271
+ if (part.type.startsWith("text-")) return "text";
272
+ if (part.type.startsWith("reasoning-")) return "reasoning";
273
+ throw new Error(`Unknown delta type: ${part.type}`);
274
+ };
275
+ const isDeltaPart = (part) => part?.type === "text-delta" || part?.type === "reasoning-delta";
276
+ const getPartId$1 = (part) => {
277
+ if (part && "id" in part) return part.id;
278
+ return void 0;
279
+ };
280
+
281
+ //#endregion
282
+ //#region src/stream-transformers/databricks-stream-transformer.ts
283
+ /**
284
+ * Allows stream transformations to be composed together.
285
+ *
286
+ * Currently only used to automatically inject start/end
287
+ * deltas since the APIs does not supply the necessary events.
288
+ */
289
+ const getDatabricksLanguageModelTransformStream = () => {
290
+ let lastChunk = null;
291
+ const deltaEndByTypeAndId = new Set();
292
+ const transformerStreamParts = composeDatabricksStreamPartTransformers(applyDeltaBoundaryTransform);
293
+ return new TransformStream({
294
+ transform(chunk, controller) {
295
+ const { out } = transformerStreamParts([chunk], lastChunk);
296
+ out.forEach((transformedChunk) => {
297
+ const group = getDeltaGroup(transformedChunk.type);
298
+ const endKey = makeEndKey(getPartId(transformedChunk), group);
299
+ if (endKey && deltaEndByTypeAndId.has(endKey)) return;
300
+ if (transformedChunk.type === "text-end" || transformedChunk.type === "reasoning-end") {
301
+ /**
302
+ * We register when a delta ends.
303
+ * We rely on response.output_item.done chunks to display non streamed data
304
+ * so we need to deduplicate them with their corresponding delta chunks.
305
+ */
306
+ const endGroup = getDeltaGroup(transformedChunk.type);
307
+ const key = makeEndKey(getPartId(transformedChunk), endGroup);
308
+ if (key) deltaEndByTypeAndId.add(key);
309
+ }
310
+ controller.enqueue(transformedChunk);
311
+ });
312
+ lastChunk = out[out.length - 1] ?? lastChunk;
313
+ },
314
+ flush(controller) {
315
+ if (lastChunk?.type === "text-delta") controller.enqueue({
316
+ type: "text-end",
317
+ id: lastChunk.id
318
+ });
319
+ if (lastChunk?.type === "reasoning-delta") controller.enqueue({
320
+ type: "reasoning-end",
321
+ id: lastChunk.id
322
+ });
323
+ }
324
+ });
325
+ };
326
+ const getDeltaGroup = (type) => {
327
+ if (type.startsWith("text-")) return "text";
328
+ if (type.startsWith("reasoning-")) return "reasoning";
329
+ return null;
330
+ };
331
+ const getPartId = (part) => {
332
+ if ("id" in part) return part.id;
333
+ return void 0;
334
+ };
335
+ const makeEndKey = (id, group) => id && group ? `${group}:${id}` : null;
336
+
337
+ //#endregion
338
+ //#region src/chat-agent-language-model/chat-agent-language-model.ts
339
+ var DatabricksChatAgentLanguageModel = class {
340
+ specificationVersion = "v2";
341
+ modelId;
342
+ config;
343
+ constructor(modelId, config) {
344
+ this.modelId = modelId;
345
+ this.config = config;
346
+ }
347
+ get provider() {
348
+ return this.config.provider;
349
+ }
350
+ supportedUrls = {};
351
+ async doGenerate(options) {
352
+ const networkArgs = this.getArgs({
353
+ config: this.config,
354
+ options,
355
+ stream: false,
356
+ modelId: this.modelId
357
+ });
358
+ const { value: response } = await postJsonToApi({
359
+ ...networkArgs,
360
+ successfulResponseHandler: createJsonResponseHandler(chatAgentResponseSchema),
361
+ failedResponseHandler: createJsonErrorResponseHandler({
362
+ errorSchema: z.any(),
363
+ errorToMessage: (error) => JSON.stringify(error),
364
+ isRetryable: () => false
365
+ })
366
+ });
367
+ return {
368
+ content: convertChatAgentResponseToMessagePart(response),
369
+ finishReason: "stop",
370
+ usage: {
371
+ inputTokens: 0,
372
+ outputTokens: 0,
373
+ totalTokens: 0
374
+ },
375
+ warnings: []
376
+ };
377
+ }
378
+ async doStream(options) {
379
+ const networkArgs = this.getArgs({
380
+ config: this.config,
381
+ options,
382
+ stream: true,
383
+ modelId: this.modelId
384
+ });
385
+ const { responseHeaders, value: response } = await postJsonToApi({
386
+ ...networkArgs,
387
+ failedResponseHandler: createJsonErrorResponseHandler({
388
+ errorSchema: z.any(),
389
+ errorToMessage: (error) => JSON.stringify(error),
390
+ isRetryable: () => false
391
+ }),
392
+ successfulResponseHandler: createEventSourceResponseHandler(chatAgentChunkSchema)
393
+ });
394
+ let finishReason = "unknown";
395
+ return {
396
+ stream: response.pipeThrough(new TransformStream({
397
+ start(controller) {
398
+ controller.enqueue({
399
+ type: "stream-start",
400
+ warnings: []
401
+ });
402
+ },
403
+ transform(chunk, controller) {
404
+ if (options.includeRawChunks) controller.enqueue({
405
+ type: "raw",
406
+ rawValue: chunk.rawValue
407
+ });
408
+ if (!chunk.success) {
409
+ finishReason = "error";
410
+ controller.enqueue({
411
+ type: "error",
412
+ error: chunk.error
413
+ });
414
+ return;
415
+ }
416
+ const parts = convertChatAgentChunkToMessagePart(chunk.value);
417
+ for (const part of parts) controller.enqueue(part);
418
+ },
419
+ flush(controller) {
420
+ controller.enqueue({
421
+ type: "finish",
422
+ finishReason,
423
+ usage: {
424
+ inputTokens: 0,
425
+ outputTokens: 0,
426
+ totalTokens: 0
427
+ }
428
+ });
429
+ }
430
+ })).pipeThrough(getDatabricksLanguageModelTransformStream()),
431
+ request: { body: networkArgs.body },
432
+ response: { headers: responseHeaders }
433
+ };
434
+ }
435
+ getArgs({ config, options, stream, modelId }) {
436
+ return {
437
+ body: {
438
+ model: modelId,
439
+ stream,
440
+ messages: convertLanguageModelV2PromptToChatAgentResponse(options.prompt)
441
+ },
442
+ url: config.url({ path: "/completions" }),
443
+ headers: combineHeaders(config.headers(), options.headers),
444
+ fetch: config.fetch,
445
+ abortSignal: options.abortSignal
446
+ };
447
+ }
448
+ };
449
+
450
+ //#endregion
451
+ //#region src/responses-agent-language-model/responses-agent-schema.ts
452
+ /**
453
+ * Response schema
454
+ */
455
+ const responsesAgentMessageSchema = z.object({
456
+ type: z.literal("message"),
457
+ role: z.literal("assistant"),
458
+ id: z.string(),
459
+ content: z.array(z.object({
460
+ type: z.literal("output_text"),
461
+ text: z.string(),
462
+ logprobs: z.unknown().nullish(),
463
+ annotations: z.array(z.discriminatedUnion("type", [z.object({
464
+ type: z.literal("url_citation"),
465
+ start_index: z.number(),
466
+ end_index: z.number(),
467
+ url: z.string(),
468
+ title: z.string()
469
+ })]))
470
+ }))
471
+ });
472
+ const responsesAgentFunctionCallSchema = z.object({
473
+ type: z.literal("function_call"),
474
+ call_id: z.string(),
475
+ name: z.string(),
476
+ arguments: z.string(),
477
+ id: z.string()
478
+ });
479
+ const responsesAgentReasoningSchema = z.object({
480
+ type: z.literal("reasoning"),
481
+ id: z.string(),
482
+ encrypted_content: z.string().nullish(),
483
+ summary: z.array(z.object({
484
+ type: z.literal("summary_text"),
485
+ text: z.string()
486
+ }))
487
+ });
488
+ const responsesAgentFunctionCallOutputSchema = z.object({
489
+ type: z.literal("function_call_output"),
490
+ call_id: z.string(),
491
+ output: z.any()
492
+ });
493
+ const responsesAgentMcpApprovalRequestSchema = z.object({
494
+ type: z.literal("mcp_approval_request"),
495
+ id: z.string(),
496
+ name: z.string(),
497
+ arguments: z.string(),
498
+ server_label: z.string()
499
+ });
500
+ const responsesAgentMcpApprovalResponseSchema = z.object({
501
+ type: z.literal("mcp_approval_response"),
502
+ id: z.string().optional(),
503
+ approval_request_id: z.string(),
504
+ approve: z.boolean(),
505
+ reason: z.string().nullish()
506
+ });
507
+ const responsesAgentOutputItem = z.discriminatedUnion("type", [
508
+ responsesAgentMessageSchema,
509
+ responsesAgentFunctionCallSchema,
510
+ responsesAgentReasoningSchema,
511
+ responsesAgentFunctionCallOutputSchema,
512
+ responsesAgentMcpApprovalRequestSchema,
513
+ responsesAgentMcpApprovalResponseSchema
514
+ ]);
515
+ const responsesAgentResponseSchema = z.object({
516
+ id: z.string().optional(),
517
+ created_at: z.number().optional(),
518
+ error: z.object({
519
+ code: z.string(),
520
+ message: z.string()
521
+ }).nullish(),
522
+ model: z.string().optional(),
523
+ output: z.array(responsesAgentOutputItem),
524
+ incomplete_details: z.object({ reason: z.enum(["max_output_tokens", "content_filter"]).optional() }).nullish(),
525
+ usage: z.object({
526
+ input_tokens: z.number(),
527
+ output_tokens: z.number(),
528
+ total_tokens: z.number()
529
+ }).optional()
530
+ });
531
+ /**
532
+ * Chunk schema
533
+ */
534
+ const textDeltaChunkSchema = z.object({
535
+ type: z.literal("response.output_text.delta"),
536
+ item_id: z.string(),
537
+ delta: z.string(),
538
+ logprobs: z.unknown().nullish()
539
+ });
540
+ const errorChunkSchema = z.object({
541
+ type: z.literal("error"),
542
+ code: z.string(),
543
+ message: z.string(),
544
+ param: z.string().nullish(),
545
+ sequence_number: z.number()
546
+ });
547
+ const simpleErrorChunkSchema = z.object({
548
+ type: z.undefined().optional(),
549
+ error: z.string()
550
+ });
551
+ const responseOutputItemDoneSchema = z.object({
552
+ type: z.literal("response.output_item.done"),
553
+ output_index: z.number(),
554
+ item: responsesAgentOutputItem
555
+ });
556
+ const responseAnnotationAddedSchema = z.object({
557
+ type: z.literal("response.output_text.annotation.added"),
558
+ annotation: z.discriminatedUnion("type", [z.object({
559
+ type: z.literal("url_citation"),
560
+ url: z.string(),
561
+ title: z.string()
562
+ })])
563
+ });
564
+ const responseReasoningSummaryTextDeltaSchema = z.object({
565
+ type: z.literal("response.reasoning_summary_text.delta"),
566
+ item_id: z.string(),
567
+ summary_index: z.number(),
568
+ delta: z.string()
569
+ });
570
+ const responseFunctionCallArgumentsDeltaSchema = z.object({
571
+ type: z.literal("response.function_call_arguments.delta"),
572
+ item_id: z.string(),
573
+ delta: z.string(),
574
+ output_index: z.number(),
575
+ sequence_number: z.number()
576
+ });
577
+ const functionCallOutputChunkSchema = z.object({
578
+ type: z.literal("function_call_output"),
579
+ call_id: z.string(),
580
+ output: z.any()
581
+ });
582
+ const responsesCompletedSchema = z.object({
583
+ type: z.literal("responses.completed"),
584
+ response: z.object({
585
+ id: z.string(),
586
+ status: z.enum([
587
+ "completed",
588
+ "failed",
589
+ "in_progress",
590
+ "cancelled",
591
+ "queued",
592
+ "incomplete"
593
+ ]).optional(),
594
+ incomplete_details: z.object({ reason: z.enum(["max_output_tokens", "content_filter"]).optional() }).nullish(),
595
+ usage: z.object({
596
+ input_tokens: z.number(),
597
+ output_tokens: z.number(),
598
+ total_tokens: z.number()
599
+ })
600
+ })
601
+ });
602
+ const responsesAgentChunkSchema = z.union([
603
+ textDeltaChunkSchema,
604
+ responseOutputItemDoneSchema,
605
+ responseAnnotationAddedSchema,
606
+ responseReasoningSummaryTextDeltaSchema,
607
+ responseFunctionCallArgumentsDeltaSchema,
608
+ functionCallOutputChunkSchema,
609
+ errorChunkSchema,
610
+ responsesCompletedSchema,
611
+ simpleErrorChunkSchema
612
+ ]);
613
+ /**
614
+ * We use a loose schema for response validation to handle unknown chunks.
615
+ */
616
+ const looseResponseAgentChunkSchema = z.union([responsesAgentChunkSchema, z.object({ type: z.string() }).loose()]);
617
+
618
+ //#endregion
619
+ //#region src/mcp.ts
620
+ /**
621
+ * MCP Approval Utility Functions
622
+ *
623
+ * Shared utilities for handling MCP (Model Context Protocol) approval requests
624
+ * and responses across client and server code.
625
+ */
626
+ /** Key used in tool output to indicate approval status */
627
+ const MCP_APPROVAL_STATUS_KEY = "__approvalStatus__";
628
+ /** Type string for MCP approval requests in provider metadata */
629
+ const MCP_APPROVAL_REQUEST_TYPE = "mcp_approval_request";
630
+ /** Type string for MCP approval responses in provider metadata */
631
+ const MCP_APPROVAL_RESPONSE_TYPE = "mcp_approval_response";
632
+ /**
633
+ * Check if output contains an approval status marker.
634
+ *
635
+ * @example
636
+ * if (isApprovalStatusOutput(output)) {
637
+ * console.log(output.__approvalStatus__); // TypeScript knows this is boolean
638
+ * }
639
+ */
640
+ function isApprovalStatusOutput(output) {
641
+ return typeof output === "object" && output !== null && MCP_APPROVAL_STATUS_KEY in output && typeof output[MCP_APPROVAL_STATUS_KEY] === "boolean";
642
+ }
643
+ /**
644
+ * Check if provider metadata indicates an MCP approval request.
645
+ *
646
+ * @example
647
+ * const metadata = extractDatabricksMetadata(part);
648
+ * if (isMcpApprovalRequest(metadata)) {
649
+ * // Handle MCP approval request
650
+ * }
651
+ */
652
+ function isMcpApprovalRequest(metadata) {
653
+ return metadata?.type?.toString() === MCP_APPROVAL_REQUEST_TYPE;
654
+ }
655
+ /**
656
+ * Check if provider metadata indicates an MCP approval response.
657
+ */
658
+ function isMcpApprovalResponse(metadata) {
659
+ return metadata?.type?.toString() === MCP_APPROVAL_RESPONSE_TYPE;
660
+ }
661
+ /**
662
+ * Extract Databricks metadata from a tool call part's callProviderMetadata.
663
+ *
664
+ * @example
665
+ * const metadata = extractDatabricksMetadata(part);
666
+ * const toolName = metadata?.toolName;
667
+ * const isMcp = isMcpApprovalRequest(metadata);
668
+ */
669
+ function extractDatabricksMetadata(part) {
670
+ if ("callProviderMetadata" in part && part.callProviderMetadata?.databricks) return part.callProviderMetadata.databricks;
671
+ return void 0;
672
+ }
673
+ /**
674
+ * Extract the approval status boolean from an output object.
675
+ *
676
+ * @returns `true` if approved, `false` if denied, `undefined` if not an approval output
677
+ *
678
+ * @example
679
+ * const status = extractApprovalStatus(output);
680
+ * if (status !== undefined) {
681
+ * console.log(status ? 'Approved' : 'Denied');
682
+ * }
683
+ */
684
+ function extractApprovalStatus(output) {
685
+ if (isApprovalStatusOutput(output)) return output[MCP_APPROVAL_STATUS_KEY];
686
+ return void 0;
687
+ }
688
+ /**
689
+ * Extract approval status from a tool result's output value.
690
+ * Handles the nested structure where output.type === 'json' and value contains the status.
691
+ *
692
+ * @example
693
+ * const status = extractApprovalStatusFromToolResult(toolResult.output);
694
+ */
695
+ function extractApprovalStatusFromToolResult(output) {
696
+ if (output.type === "json" && output.value && typeof output.value === "object" && MCP_APPROVAL_STATUS_KEY in output.value) {
697
+ const value = output.value[MCP_APPROVAL_STATUS_KEY];
698
+ if (typeof value === "boolean") return value;
699
+ }
700
+ return void 0;
701
+ }
702
+ /**
703
+ * Create an approval status output object.
704
+ *
705
+ * @example
706
+ * await addToolResult({
707
+ * toolCallId,
708
+ * output: createApprovalStatusOutput(true), // Approve
709
+ * });
710
+ */
711
+ function createApprovalStatusOutput(approve) {
712
+ return { [MCP_APPROVAL_STATUS_KEY]: approve };
713
+ }
714
+ /**
715
+ * Determine the MCP approval state from a tool output.
716
+ *
717
+ * Logic:
718
+ * - No output → 'awaiting-approval' (user hasn't responded yet)
719
+ * - Output with __approvalStatus__: true → 'approved'
720
+ * - Output with __approvalStatus__: false → 'denied'
721
+ * - Output without __approvalStatus__ → 'approved' (tool executed, so it was approved)
722
+ *
723
+ * @example
724
+ * const approvalState = getMcpApprovalState(part.output);
725
+ * // 'awaiting-approval' | 'approved' | 'denied'
726
+ */
727
+ function getMcpApprovalState(output) {
728
+ if (!output) return "awaiting-approval";
729
+ const status = extractApprovalStatus(output);
730
+ if (status === void 0) return "approved";
731
+ return status ? "approved" : "denied";
732
+ }
733
+
734
+ //#endregion
735
+ //#region src/responses-agent-language-model/responses-convert-to-message-parts.ts
736
+ const convertResponsesAgentChunkToMessagePart = (chunk) => {
737
+ const parts = [];
738
+ if ("error" in chunk) {
739
+ parts.push({
740
+ type: "error",
741
+ error: chunk.error
742
+ });
743
+ return parts;
744
+ }
745
+ switch (chunk.type) {
746
+ case "response.output_text.delta":
747
+ parts.push({
748
+ type: "text-delta",
749
+ id: chunk.item_id,
750
+ delta: chunk.delta,
751
+ providerMetadata: { databricks: { itemId: chunk.item_id } }
752
+ });
753
+ break;
754
+ case "response.reasoning_summary_text.delta":
755
+ parts.push({
756
+ type: "reasoning-delta",
757
+ id: chunk.item_id,
758
+ delta: chunk.delta,
759
+ providerMetadata: { databricks: { itemId: chunk.item_id } }
760
+ });
761
+ break;
762
+ case "function_call_output":
763
+ parts.push({
764
+ type: "tool-result",
765
+ toolCallId: chunk.call_id,
766
+ result: chunk.output,
767
+ toolName: DATABRICKS_TOOL_CALL_ID
768
+ });
769
+ break;
770
+ case "response.output_item.done":
771
+ parts.push(...convertOutputItemDone(chunk.item));
772
+ break;
773
+ case "response.output_text.annotation.added":
774
+ parts.push({
775
+ type: "source",
776
+ url: chunk.annotation.url,
777
+ title: chunk.annotation.title,
778
+ id: randomUUID(),
779
+ sourceType: "url"
780
+ });
781
+ break;
782
+ case "error":
783
+ parts.push({
784
+ type: "error",
785
+ error: chunk
786
+ });
787
+ break;
788
+ default: break;
789
+ }
790
+ return parts;
791
+ };
792
+ const convertOutputItemDone = (item) => {
793
+ switch (item.type) {
794
+ case "message": {
795
+ const firstContent = item.content[0];
796
+ if (!firstContent) return [];
797
+ return [{
798
+ type: "text-delta",
799
+ id: item.id,
800
+ delta: firstContent.text,
801
+ providerMetadata: { databricks: {
802
+ itemId: item.id,
803
+ itemType: "response.output_item.done"
804
+ } }
805
+ }];
806
+ }
807
+ case "function_call": return [{
808
+ type: "tool-call",
809
+ toolCallId: item.call_id,
810
+ toolName: DATABRICKS_TOOL_CALL_ID,
811
+ input: item.arguments,
812
+ providerMetadata: { databricks: {
813
+ toolName: item.name,
814
+ itemId: item.id
815
+ } }
816
+ }];
817
+ case "function_call_output": return [{
818
+ type: "tool-result",
819
+ toolCallId: item.call_id,
820
+ result: item.output,
821
+ toolName: DATABRICKS_TOOL_CALL_ID
822
+ }];
823
+ case "reasoning": {
824
+ const firstSummary = item.summary[0];
825
+ if (!firstSummary) return [];
826
+ return [
827
+ {
828
+ type: "reasoning-start",
829
+ id: item.id
830
+ },
831
+ {
832
+ type: "reasoning-delta",
833
+ id: item.id,
834
+ delta: firstSummary.text,
835
+ providerMetadata: { databricks: { itemId: item.id } }
836
+ },
837
+ {
838
+ type: "reasoning-end",
839
+ id: item.id
840
+ }
841
+ ];
842
+ }
843
+ case "mcp_approval_request": return [{
844
+ type: "tool-call",
845
+ toolCallId: item.id,
846
+ toolName: DATABRICKS_TOOL_CALL_ID,
847
+ input: item.arguments,
848
+ providerMetadata: { databricks: {
849
+ type: MCP_APPROVAL_REQUEST_TYPE,
850
+ toolName: item.name,
851
+ itemId: item.id,
852
+ serverLabel: item.server_label
853
+ } }
854
+ }];
855
+ case "mcp_approval_response": return [{
856
+ type: "tool-result",
857
+ toolCallId: item.approval_request_id,
858
+ toolName: DATABRICKS_TOOL_CALL_ID,
859
+ result: createApprovalStatusOutput(item.approve),
860
+ providerMetadata: { databricks: {
861
+ type: MCP_APPROVAL_RESPONSE_TYPE,
862
+ ...item.id != null && { itemId: item.id }
863
+ } }
864
+ }];
865
+ default: return [];
866
+ }
867
+ };
868
+ const convertResponsesAgentResponseToMessagePart = (response) => {
869
+ const parts = [];
870
+ for (const output of response.output) switch (output.type) {
871
+ case "message": {
872
+ for (const content of output.content) if (content.type === "output_text") parts.push({
873
+ type: "text",
874
+ text: content.text,
875
+ providerMetadata: { databricks: { itemId: output.id } }
876
+ });
877
+ break;
878
+ }
879
+ case "function_call":
880
+ parts.push({
881
+ type: "tool-call",
882
+ toolCallId: output.call_id,
883
+ toolName: output.name,
884
+ input: output.arguments,
885
+ providerMetadata: { databricks: { itemId: output.id } }
886
+ });
887
+ break;
888
+ case "reasoning":
889
+ for (const summary of output.summary) if (summary.type === "summary_text") parts.push({
890
+ type: "reasoning",
891
+ text: summary.text,
892
+ providerMetadata: { databricks: { itemId: output.id } }
893
+ });
894
+ break;
895
+ case "function_call_output":
896
+ parts.push({
897
+ type: "tool-result",
898
+ result: output.output,
899
+ toolCallId: output.call_id,
900
+ toolName: DATABRICKS_TOOL_CALL_ID
901
+ });
902
+ break;
903
+ case "mcp_approval_request":
904
+ parts.push({
905
+ type: "tool-call",
906
+ toolCallId: output.id,
907
+ toolName: DATABRICKS_TOOL_CALL_ID,
908
+ input: output.arguments,
909
+ providerMetadata: { databricks: {
910
+ type: MCP_APPROVAL_REQUEST_TYPE,
911
+ toolName: output.name,
912
+ itemId: output.id,
913
+ serverLabel: output.server_label
914
+ } }
915
+ });
916
+ break;
917
+ case "mcp_approval_response":
918
+ parts.push({
919
+ type: "tool-result",
920
+ toolCallId: output.approval_request_id,
921
+ toolName: DATABRICKS_TOOL_CALL_ID,
922
+ result: createApprovalStatusOutput(output.approve),
923
+ providerMetadata: { databricks: {
924
+ type: MCP_APPROVAL_RESPONSE_TYPE,
925
+ ...output.id != null && { itemId: output.id }
926
+ } }
927
+ });
928
+ break;
929
+ default: break;
930
+ }
931
+ return parts;
932
+ };
933
+
934
+ //#endregion
935
+ //#region src/responses-agent-language-model/responses-convert-to-input.ts
936
+ async function convertToResponsesInput({ prompt, systemMessageMode }) {
937
+ const input = [];
938
+ const warnings = [];
939
+ const toolCallResultsByToolCallId = prompt.filter((p) => p.role === "tool").flatMap((p) => p.content).reduce((reduction, toolCallResult) => {
940
+ if (toolCallResult.type === "tool-result") reduction[toolCallResult.toolCallId] = toolCallResult;
941
+ return reduction;
942
+ }, {});
943
+ for (const { role, content } of prompt) switch (role) {
944
+ case "system": {
945
+ switch (systemMessageMode) {
946
+ case "system":
947
+ input.push({
948
+ role: "system",
949
+ content
950
+ });
951
+ break;
952
+ case "developer":
953
+ input.push({
954
+ role: "developer",
955
+ content
956
+ });
957
+ break;
958
+ case "remove":
959
+ warnings.push({
960
+ type: "other",
961
+ message: "system messages are removed for this model"
962
+ });
963
+ break;
964
+ default: {
965
+ const _exhaustiveCheck = systemMessageMode;
966
+ throw new Error(`Unsupported system message mode: ${String(_exhaustiveCheck)}`);
967
+ }
968
+ }
969
+ break;
970
+ }
971
+ case "user":
972
+ input.push({
973
+ role: "user",
974
+ content: content.map((part) => {
975
+ switch (part.type) {
976
+ case "text": return {
977
+ type: "input_text",
978
+ text: part.text
979
+ };
980
+ default: throw new UnsupportedFunctionalityError({ functionality: `part ${JSON.stringify(part)}` });
981
+ }
982
+ })
983
+ });
984
+ break;
985
+ case "assistant":
986
+ for (const part of content) {
987
+ const providerOptions = await parseProviderOptions({
988
+ provider: "databricks",
989
+ providerOptions: part.providerOptions,
990
+ schema: ProviderOptionsSchema
991
+ });
992
+ const itemId = providerOptions?.itemId ?? void 0;
993
+ switch (part.type) {
994
+ case "text": {
995
+ input.push({
996
+ role: "assistant",
997
+ content: [{
998
+ type: "output_text",
999
+ text: part.text
1000
+ }],
1001
+ id: itemId
1002
+ });
1003
+ break;
1004
+ }
1005
+ case "tool-call": {
1006
+ const toolName = providerOptions?.toolName ?? part.toolName;
1007
+ if (providerOptions?.type === MCP_APPROVAL_REQUEST_TYPE) {
1008
+ const serverLabel = providerOptions?.serverLabel ?? "";
1009
+ const argumentsString = JSON.stringify(part.input);
1010
+ const id = part.toolCallId;
1011
+ input.push({
1012
+ type: MCP_APPROVAL_REQUEST_TYPE,
1013
+ id,
1014
+ name: toolName,
1015
+ arguments: argumentsString,
1016
+ server_label: serverLabel
1017
+ });
1018
+ const toolResult = toolCallResultsByToolCallId[part.toolCallId];
1019
+ if (toolResult) {
1020
+ /**
1021
+ * The tool call result is either the approval status or the actual output from the tool call.
1022
+ * If it's the approval status, we need to add an approval response part.
1023
+ * If it's the tool call output, we don't include the approval response part but we do include the tool call output part.
1024
+ */
1025
+ const approvalStatus = extractApprovalStatusFromToolResult(toolResult.output);
1026
+ if (approvalStatus !== void 0) input.push({
1027
+ type: MCP_APPROVAL_RESPONSE_TYPE,
1028
+ id: toolResult.toolCallId,
1029
+ approval_request_id: toolResult.toolCallId,
1030
+ approve: approvalStatus
1031
+ });
1032
+ else input.push({
1033
+ type: "function_call_output",
1034
+ call_id: toolResult.toolCallId,
1035
+ output: convertToolResultOutputToString(toolResult.output)
1036
+ });
1037
+ }
1038
+ break;
1039
+ }
1040
+ input.push({
1041
+ type: "function_call",
1042
+ call_id: part.toolCallId,
1043
+ name: toolName,
1044
+ arguments: JSON.stringify(part.input),
1045
+ id: itemId
1046
+ });
1047
+ const toolCallResult = toolCallResultsByToolCallId[part.toolCallId];
1048
+ if (toolCallResult) input.push({
1049
+ type: "function_call_output",
1050
+ call_id: part.toolCallId,
1051
+ output: convertToolResultOutputToString(toolCallResult.output)
1052
+ });
1053
+ break;
1054
+ }
1055
+ case "tool-result": {
1056
+ if (providerOptions?.type === MCP_APPROVAL_RESPONSE_TYPE) {
1057
+ const approvalRequestId = providerOptions?.approvalRequestId ?? part.toolCallId;
1058
+ const approve = providerOptions?.approve ?? false;
1059
+ const reason = providerOptions?.reason ?? "";
1060
+ input.push({
1061
+ type: MCP_APPROVAL_RESPONSE_TYPE,
1062
+ id: approvalRequestId,
1063
+ approval_request_id: approvalRequestId,
1064
+ approve,
1065
+ reason
1066
+ });
1067
+ break;
1068
+ }
1069
+ input.push({
1070
+ type: "function_call_output",
1071
+ call_id: part.toolCallId,
1072
+ output: convertToolResultOutputToString(part.output)
1073
+ });
1074
+ break;
1075
+ }
1076
+ case "reasoning": {
1077
+ if (!itemId) break;
1078
+ input.push({
1079
+ type: "reasoning",
1080
+ summary: [{
1081
+ type: "summary_text",
1082
+ text: part.text
1083
+ }],
1084
+ id: itemId
1085
+ });
1086
+ break;
1087
+ }
1088
+ }
1089
+ }
1090
+ break;
1091
+ case "tool": break;
1092
+ default: {
1093
+ const _exhaustiveCheck = role;
1094
+ throw new Error(`Unsupported role: ${String(_exhaustiveCheck)}`);
1095
+ }
1096
+ }
1097
+ return {
1098
+ input,
1099
+ warnings
1100
+ };
1101
+ }
1102
+ const ProviderOptionsSchema = z.object({
1103
+ itemId: z.string().nullish(),
1104
+ toolName: z.string().nullish(),
1105
+ type: z.enum(["mcp_approval_request", "mcp_approval_response"]).nullish(),
1106
+ serverLabel: z.string().nullish(),
1107
+ approvalRequestId: z.string().nullish(),
1108
+ approve: z.boolean().nullish(),
1109
+ reason: z.string().nullish()
1110
+ });
1111
+ const convertToolResultOutputToString = (output) => {
1112
+ switch (output.type) {
1113
+ case "text":
1114
+ case "error-text": return output.value;
1115
+ default: return JSON.stringify(output.value);
1116
+ }
1117
+ };
1118
+
1119
+ //#endregion
1120
+ //#region src/responses-agent-language-model/responses-agent-language-model.ts
1121
+ function mapResponsesFinishReason({ finishReason, hasToolCalls }) {
1122
+ switch (finishReason) {
1123
+ case void 0:
1124
+ case null: return hasToolCalls ? "tool-calls" : "stop";
1125
+ case "max_output_tokens": return "length";
1126
+ case "content_filter": return "content-filter";
1127
+ default: return hasToolCalls ? "tool-calls" : "other";
1128
+ }
1129
+ }
1130
+ var DatabricksResponsesAgentLanguageModel = class {
1131
+ specificationVersion = "v2";
1132
+ modelId;
1133
+ config;
1134
+ constructor(modelId, config) {
1135
+ this.modelId = modelId;
1136
+ this.config = config;
1137
+ }
1138
+ get provider() {
1139
+ return this.config.provider;
1140
+ }
1141
+ supportedUrls = {};
1142
+ async doGenerate(options) {
1143
+ const networkArgs = await this.getArgs({
1144
+ config: this.config,
1145
+ options,
1146
+ stream: false,
1147
+ modelId: this.modelId
1148
+ });
1149
+ const { value: response } = await postJsonToApi({
1150
+ ...networkArgs,
1151
+ successfulResponseHandler: createJsonResponseHandler(responsesAgentResponseSchema),
1152
+ failedResponseHandler: createJsonErrorResponseHandler({
1153
+ errorSchema: z.any(),
1154
+ errorToMessage: (error) => JSON.stringify(error),
1155
+ isRetryable: () => false
1156
+ })
1157
+ });
1158
+ const content = convertResponsesAgentResponseToMessagePart(response);
1159
+ const hasToolCalls = content.some((p) => p.type === "tool-call");
1160
+ return {
1161
+ content,
1162
+ finishReason: mapResponsesFinishReason({
1163
+ finishReason: response.incomplete_details?.reason,
1164
+ hasToolCalls
1165
+ }),
1166
+ usage: {
1167
+ inputTokens: response.usage?.input_tokens ?? 0,
1168
+ outputTokens: response.usage?.output_tokens ?? 0,
1169
+ totalTokens: response.usage?.total_tokens ?? 0
1170
+ },
1171
+ warnings: []
1172
+ };
1173
+ }
1174
+ async doStream(options) {
1175
+ const networkArgs = await this.getArgs({
1176
+ config: this.config,
1177
+ options,
1178
+ stream: true,
1179
+ modelId: this.modelId
1180
+ });
1181
+ const { responseHeaders, value: response } = await postJsonToApi({
1182
+ ...networkArgs,
1183
+ failedResponseHandler: createJsonErrorResponseHandler({
1184
+ errorSchema: z.any(),
1185
+ errorToMessage: (error) => JSON.stringify(error),
1186
+ isRetryable: () => false
1187
+ }),
1188
+ successfulResponseHandler: createEventSourceResponseHandler(looseResponseAgentChunkSchema),
1189
+ abortSignal: options.abortSignal
1190
+ });
1191
+ let finishReason = "unknown";
1192
+ const usage = {
1193
+ inputTokens: 0,
1194
+ outputTokens: 0,
1195
+ totalTokens: 0
1196
+ };
1197
+ const allParts = [];
1198
+ return {
1199
+ stream: response.pipeThrough(new TransformStream({
1200
+ start(controller) {
1201
+ controller.enqueue({
1202
+ type: "stream-start",
1203
+ warnings: []
1204
+ });
1205
+ },
1206
+ transform(chunk, controller) {
1207
+ if (options.includeRawChunks) controller.enqueue({
1208
+ type: "raw",
1209
+ rawValue: chunk.rawValue
1210
+ });
1211
+ if (!chunk.success) {
1212
+ finishReason = "error";
1213
+ controller.enqueue({
1214
+ type: "error",
1215
+ error: chunk.error
1216
+ });
1217
+ return;
1218
+ }
1219
+ if (chunk.value.type === "responses.completed") {
1220
+ const hasToolCalls = allParts.some((p) => p.type === "tool-call");
1221
+ finishReason = mapResponsesFinishReason({
1222
+ finishReason: chunk.value.response.incomplete_details?.reason,
1223
+ hasToolCalls
1224
+ });
1225
+ usage.inputTokens = chunk.value.response.usage.input_tokens;
1226
+ usage.outputTokens = chunk.value.response.usage.output_tokens;
1227
+ usage.totalTokens = chunk.value.response.usage.total_tokens;
1228
+ return;
1229
+ }
1230
+ const parts = convertResponsesAgentChunkToMessagePart(chunk.value);
1231
+ allParts.push(...parts);
1232
+ /**
1233
+ * Check if the last chunk was a tool result without a tool call
1234
+ * This is a special case for MCP approval requests where the tool result
1235
+ * is sent in a separate call after the tool call was approved/denied.
1236
+ */
1237
+ if (parts.length === 0) return;
1238
+ const part = parts[0];
1239
+ if (part.type === "tool-result") {
1240
+ const matchingToolCallInParts = parts.find((c) => c.type === "tool-call" && c.toolCallId === part.toolCallId);
1241
+ const matchingToolCallInStream = allParts.find((c) => c.type === "tool-call" && c.toolCallId === part.toolCallId);
1242
+ if (!matchingToolCallInParts && !matchingToolCallInStream) {
1243
+ const toolCallFromPreviousMessages = options.prompt.flatMap((message) => {
1244
+ if (typeof message.content === "string") return [];
1245
+ return message.content;
1246
+ }).find((p) => p.type === "tool-call" && p.toolCallId === part.toolCallId);
1247
+ if (!toolCallFromPreviousMessages) throw new Error("No matching tool call found in previous message");
1248
+ if (toolCallFromPreviousMessages.type === "tool-call") controller.enqueue({
1249
+ ...toolCallFromPreviousMessages,
1250
+ input: JSON.stringify(toolCallFromPreviousMessages.input)
1251
+ });
1252
+ }
1253
+ }
1254
+ if (shouldDedupeOutputItemDone(parts, allParts.slice(0, -parts.length))) return;
1255
+ for (const part$1 of parts) controller.enqueue(part$1);
1256
+ },
1257
+ flush(controller) {
1258
+ controller.enqueue({
1259
+ type: "finish",
1260
+ finishReason,
1261
+ usage
1262
+ });
1263
+ }
1264
+ })).pipeThrough(getDatabricksLanguageModelTransformStream()),
1265
+ request: { body: networkArgs.body },
1266
+ response: { headers: responseHeaders }
1267
+ };
1268
+ }
1269
+ async getArgs({ config, options, stream, modelId }) {
1270
+ const { input } = await convertToResponsesInput({
1271
+ prompt: options.prompt,
1272
+ systemMessageMode: "system"
1273
+ });
1274
+ return {
1275
+ url: config.url({ path: "/responses" }),
1276
+ headers: combineHeaders(config.headers(), options.headers),
1277
+ body: {
1278
+ model: modelId,
1279
+ input,
1280
+ stream
1281
+ },
1282
+ fetch: config.fetch
1283
+ };
1284
+ }
1285
+ };
1286
+ function shouldDedupeOutputItemDone(incomingParts, previousParts) {
1287
+ const doneTextDelta = incomingParts.find((p) => p.type === "text-delta" && p.providerMetadata?.databricks?.itemType === "response.output_item.done");
1288
+ if (!doneTextDelta || doneTextDelta.type !== "text-delta" || !doneTextDelta.id) return false;
1289
+ /**
1290
+ * To determine if the text in response.output_item.done is a duplicate, we need to reconstruct the text from the
1291
+ * previous consecutive text-deltas and check if the .done text is already present in what we've streamed.
1292
+ *
1293
+ * The caveat is that the response.output_item.done text uses GFM footnote syntax, where as the streamed content
1294
+ * uses response.output_text.delta and response.output_text.annotation.added events. So we reconstruct all the
1295
+ * delta text and check if the .done text is contained in it (meaning we've already streamed it).
1296
+ *
1297
+ * We only consider text-deltas that came AFTER the last response.output_item.done event, since each .done
1298
+ * corresponds to a specific message and we should only compare against text streamed for that message.
1299
+ */
1300
+ const lastDoneIndex = previousParts.findLastIndex((part) => part.type === "text-delta" && part.providerMetadata?.databricks?.itemType === "response.output_item.done");
1301
+ const partsAfterLastDone = previousParts.slice(lastDoneIndex + 1);
1302
+ const { texts: reconstructuredTexts, current } = partsAfterLastDone.reduce((acc, part) => {
1303
+ if (part.type === "text-delta") return {
1304
+ ...acc,
1305
+ current: acc.current + part.delta
1306
+ };
1307
+ else if (acc.current.trim().length > 0) return {
1308
+ texts: [...acc.texts, acc.current.trim()],
1309
+ current: ""
1310
+ };
1311
+ return acc;
1312
+ }, {
1313
+ texts: [],
1314
+ current: ""
1315
+ });
1316
+ reconstructuredTexts.push(current);
1317
+ if (reconstructuredTexts.length === 0) return false;
1318
+ const allTextsFoundInOrder = reconstructuredTexts.reduce((acc, text) => {
1319
+ if (!acc.found) return acc;
1320
+ const index = doneTextDelta.delta.indexOf(text, acc.lastIndex);
1321
+ if (index === -1) return {
1322
+ found: false,
1323
+ lastIndex: acc.lastIndex
1324
+ };
1325
+ return {
1326
+ found: true,
1327
+ lastIndex: index + text.length
1328
+ };
1329
+ }, {
1330
+ found: true,
1331
+ lastIndex: 0
1332
+ });
1333
+ return allTextsFoundInOrder.found;
1334
+ }
1335
+
1336
+ //#endregion
1337
+ //#region src/fmapi-language-model/fmapi-schema.ts
1338
+ const toolCallSchema = z.object({
1339
+ id: z.string(),
1340
+ type: z.literal("function"),
1341
+ function: z.object({
1342
+ name: z.string(),
1343
+ arguments: z.string()
1344
+ })
1345
+ });
1346
+ const reasoningSummarySchema = z.discriminatedUnion("type", [z.object({
1347
+ type: z.literal("summary_text"),
1348
+ text: z.string(),
1349
+ signature: z.string().optional()
1350
+ }), z.object({
1351
+ type: z.literal("summary_encrypted_text"),
1352
+ data: z.string()
1353
+ })]);
1354
+ const contentItemSchema = z.discriminatedUnion("type", [
1355
+ z.object({
1356
+ type: z.literal("text"),
1357
+ text: z.string(),
1358
+ citation: z.unknown().optional()
1359
+ }),
1360
+ z.object({
1361
+ type: z.literal("image"),
1362
+ image_url: z.string()
1363
+ }),
1364
+ z.object({
1365
+ type: z.literal("reasoning"),
1366
+ summary: z.array(reasoningSummarySchema)
1367
+ })
1368
+ ]);
1369
+ const toolCallDeltaSchema = z.object({
1370
+ index: z.number(),
1371
+ id: z.string().optional(),
1372
+ type: z.literal("function").optional(),
1373
+ function: z.object({
1374
+ name: z.string().optional(),
1375
+ arguments: z.string().optional()
1376
+ }).optional()
1377
+ });
1378
+ const fmapiChunkSchema = z.object({
1379
+ id: z.string(),
1380
+ created: z.number(),
1381
+ model: z.string(),
1382
+ usage: z.object({
1383
+ prompt_tokens: z.number(),
1384
+ completion_tokens: z.number(),
1385
+ total_tokens: z.number()
1386
+ }).nullable().optional(),
1387
+ object: z.literal("chat.completion.chunk"),
1388
+ choices: z.array(z.object({
1389
+ index: z.number(),
1390
+ delta: z.object({
1391
+ role: z.union([
1392
+ z.literal("assistant"),
1393
+ z.null(),
1394
+ z.undefined()
1395
+ ]).optional(),
1396
+ content: z.union([
1397
+ z.string(),
1398
+ z.array(contentItemSchema),
1399
+ z.null()
1400
+ ]).optional(),
1401
+ tool_calls: z.array(toolCallDeltaSchema).optional()
1402
+ }),
1403
+ finish_reason: z.union([
1404
+ z.literal("stop"),
1405
+ z.literal("tool_calls"),
1406
+ z.null()
1407
+ ]).optional()
1408
+ }))
1409
+ });
1410
+ const fmapiResponseSchema = z.object({
1411
+ id: z.string(),
1412
+ created: z.number(),
1413
+ model: z.string(),
1414
+ usage: z.object({
1415
+ prompt_tokens: z.number(),
1416
+ completion_tokens: z.number(),
1417
+ total_tokens: z.number()
1418
+ }).nullable().optional(),
1419
+ choices: z.array(z.object({
1420
+ message: z.object({
1421
+ role: z.union([
1422
+ z.literal("assistant"),
1423
+ z.literal("user"),
1424
+ z.literal("tool")
1425
+ ]),
1426
+ content: z.union([
1427
+ z.string(),
1428
+ z.array(contentItemSchema),
1429
+ z.null()
1430
+ ]).optional(),
1431
+ tool_calls: z.array(toolCallSchema).optional()
1432
+ }),
1433
+ finish_reason: z.union([
1434
+ z.literal("stop"),
1435
+ z.literal("tool_calls"),
1436
+ z.null()
1437
+ ]).optional()
1438
+ }))
1439
+ });
1440
+
1441
+ //#endregion
1442
+ //#region src/fmapi-language-model/fmapi-tags.ts
1443
+ const TAGS = {
1444
+ LEGACY_CALL_OPEN: "<uc_function_call>",
1445
+ LEGACY_CALL_CLOSE: "</uc_function_call>",
1446
+ LEGACY_RESULT_OPEN: "<uc_function_result>",
1447
+ LEGACY_RESULT_CLOSE: "</uc_function_result>",
1448
+ CALL_OPEN: "<tool_call>",
1449
+ CALL_CLOSE: "</tool_call>",
1450
+ RESULT_OPEN: "<tool_call_result>",
1451
+ RESULT_CLOSE: "</tool_call_result>"
1452
+ };
1453
+ const tagSplitRegex = new RegExp(`(${escapeRegex(TAGS.LEGACY_CALL_OPEN)}.*?${escapeRegex(TAGS.LEGACY_CALL_CLOSE)}|${escapeRegex(TAGS.LEGACY_RESULT_OPEN)}.*?${escapeRegex(TAGS.LEGACY_RESULT_CLOSE)}|${escapeRegex(TAGS.CALL_OPEN)}.*?${escapeRegex(TAGS.CALL_CLOSE)}|${escapeRegex(TAGS.RESULT_OPEN)}.*?${escapeRegex(TAGS.RESULT_CLOSE)})`, "g");
1454
+ function parseTaggedToolCall(text) {
1455
+ const inner = stripEnclosingTag(text, TAGS.LEGACY_CALL_OPEN, TAGS.LEGACY_CALL_CLOSE) ?? stripEnclosingTag(text, TAGS.CALL_OPEN, TAGS.CALL_CLOSE);
1456
+ if (inner == null) return null;
1457
+ try {
1458
+ const parsed = JSON.parse(inner);
1459
+ if (parsed && typeof parsed === "object" && "id" in parsed && "name" in parsed) return {
1460
+ id: String(parsed.id),
1461
+ name: String(parsed.name),
1462
+ arguments: parsed.arguments
1463
+ };
1464
+ } catch {}
1465
+ return null;
1466
+ }
1467
+ function parseTaggedToolResult(text) {
1468
+ const inner = stripEnclosingTag(text, TAGS.LEGACY_RESULT_OPEN, TAGS.LEGACY_RESULT_CLOSE) ?? stripEnclosingTag(text, TAGS.RESULT_OPEN, TAGS.RESULT_CLOSE);
1469
+ if (inner == null) return null;
1470
+ try {
1471
+ const parsed = JSON.parse(inner);
1472
+ if (parsed && typeof parsed === "object" && "id" in parsed) return {
1473
+ id: String(parsed.id),
1474
+ content: parsed.content
1475
+ };
1476
+ } catch {}
1477
+ return null;
1478
+ }
1479
+ function serializeToolCall(value) {
1480
+ const payload = JSON.stringify({
1481
+ id: value.id,
1482
+ name: value.name,
1483
+ arguments: value.arguments
1484
+ });
1485
+ return `${TAGS.CALL_OPEN}${payload}${TAGS.CALL_CLOSE}`;
1486
+ }
1487
+ function serializeToolResult(value) {
1488
+ const payload = JSON.stringify({
1489
+ id: value.id,
1490
+ content: value.content
1491
+ });
1492
+ return `${TAGS.RESULT_OPEN}${payload}${TAGS.RESULT_CLOSE}`;
1493
+ }
1494
+ function stripEnclosingTag(text, open, close) {
1495
+ const trimmed = text.trim();
1496
+ if (trimmed.startsWith(open) && trimmed.endsWith(close)) return trimmed.slice(open.length, trimmed.length - close.length);
1497
+ return null;
1498
+ }
1499
+ function escapeRegex(str) {
1500
+ return str.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
1501
+ }
1502
+
1503
+ //#endregion
1504
+ //#region src/fmapi-language-model/fmapi-convert-to-message-parts.ts
1505
+ const convertFmapiChunkToMessagePart = (chunk, toolCallIdsByIndex) => {
1506
+ const parts = [];
1507
+ if (chunk.choices.length === 0) return parts;
1508
+ const choice = chunk.choices[0];
1509
+ if (choice.delta.tool_calls && choice.delta.tool_calls.length > 0) for (const toolCallDelta of choice.delta.tool_calls) {
1510
+ const index = toolCallDelta.index;
1511
+ if (toolCallDelta.id && toolCallDelta.function?.name) {
1512
+ toolCallIdsByIndex?.set(index, toolCallDelta.id);
1513
+ parts.push({
1514
+ type: "tool-input-start",
1515
+ id: toolCallDelta.id,
1516
+ toolName: toolCallDelta.function.name
1517
+ });
1518
+ }
1519
+ if (toolCallDelta.function?.arguments) {
1520
+ const id = toolCallDelta.id ?? toolCallIdsByIndex?.get(index) ?? `tool-call-${index}`;
1521
+ parts.push({
1522
+ type: "tool-input-delta",
1523
+ id,
1524
+ delta: toolCallDelta.function.arguments
1525
+ });
1526
+ }
1527
+ }
1528
+ if (typeof choice.delta.content === "string") {
1529
+ const extracted = extractPartsFromTextCompletion(choice.delta.content);
1530
+ for (const part of extracted) if (part.type === "text") parts.push({
1531
+ type: "text-delta",
1532
+ id: chunk.id,
1533
+ delta: part.text
1534
+ });
1535
+ else parts.push(part);
1536
+ } else if (Array.isArray(choice.delta.content)) parts.push(...mapContentItemsToStreamParts(choice.delta.content, chunk.id));
1537
+ return parts;
1538
+ };
1539
+ const convertFmapiResponseToMessagePart = (response) => {
1540
+ const parts = [];
1541
+ if (response.choices.length === 0) return parts;
1542
+ const choice = response.choices[0];
1543
+ if (choice.message.tool_calls && choice.message.tool_calls.length > 0) {
1544
+ for (const toolCall of choice.message.tool_calls) parts.push(convertToolCallToContent(toolCall));
1545
+ if (typeof choice.message.content === "string" && choice.message.content) parts.push({
1546
+ type: "text",
1547
+ text: choice.message.content
1548
+ });
1549
+ return parts;
1550
+ }
1551
+ if (typeof choice.message.content === "string") {
1552
+ const extracted = extractToolPartsFromText(choice.message.content);
1553
+ if (extracted) for (const part of extracted) parts.push(part);
1554
+ else parts.push({
1555
+ type: "text",
1556
+ text: choice.message.content
1557
+ });
1558
+ } else parts.push(...mapContentItemsToProviderContent(choice.message.content ?? []));
1559
+ return parts;
1560
+ };
1561
+ const convertToolCallToContent = (toolCall) => {
1562
+ return {
1563
+ type: "tool-call",
1564
+ toolCallId: toolCall.id,
1565
+ toolName: toolCall.function.name,
1566
+ input: toolCall.function.arguments
1567
+ };
1568
+ };
1569
+ const extractPartsFromTextCompletion = (text) => {
1570
+ const parts = text.split(tagSplitRegex);
1571
+ const accumulated = [];
1572
+ for (const segment of parts.filter((p) => p !== "")) {
1573
+ const toolParts = extractToolPartsFromText(segment);
1574
+ if (toolParts) accumulated.push(...toolParts);
1575
+ else accumulated.push({
1576
+ type: "text",
1577
+ text: segment
1578
+ });
1579
+ }
1580
+ return accumulated;
1581
+ };
1582
+ const extractToolPartsFromText = (text) => {
1583
+ const trimmed = text.trim();
1584
+ const call = parseTaggedToolCall(trimmed);
1585
+ if (call) return [{
1586
+ type: "tool-call",
1587
+ input: typeof call.arguments === "string" ? call.arguments : JSON.stringify(call.arguments),
1588
+ toolName: call.name,
1589
+ toolCallId: call.id,
1590
+ providerExecuted: true
1591
+ }];
1592
+ const result = parseTaggedToolResult(trimmed);
1593
+ if (result) return [{
1594
+ type: "tool-result",
1595
+ result: result.content,
1596
+ toolCallId: result.id,
1597
+ toolName: DATABRICKS_TOOL_CALL_ID
1598
+ }];
1599
+ return null;
1600
+ };
1601
+ const mapContentItemsToStreamParts = (items, id) => {
1602
+ const parts = [];
1603
+ for (const item of items) switch (item.type) {
1604
+ case "text":
1605
+ parts.push({
1606
+ type: "text-delta",
1607
+ id,
1608
+ delta: item.text
1609
+ });
1610
+ break;
1611
+ case "image": break;
1612
+ case "reasoning": {
1613
+ for (const summary of item.summary.filter((s) => s.type === "summary_text")) parts.push({
1614
+ type: "reasoning-delta",
1615
+ id,
1616
+ delta: summary.text
1617
+ });
1618
+ break;
1619
+ }
1620
+ }
1621
+ return parts;
1622
+ };
1623
+ const mapContentItemsToProviderContent = (items) => {
1624
+ const parts = [];
1625
+ for (const item of items) switch (item.type) {
1626
+ case "text":
1627
+ parts.push({
1628
+ type: "text",
1629
+ text: item.text
1630
+ });
1631
+ break;
1632
+ case "image": break;
1633
+ case "reasoning": {
1634
+ for (const summary of item.summary.filter((s) => s.type === "summary_text")) parts.push({
1635
+ type: "reasoning",
1636
+ text: summary.text
1637
+ });
1638
+ break;
1639
+ }
1640
+ }
1641
+ return parts;
1642
+ };
1643
+
1644
+ //#endregion
1645
+ //#region src/fmapi-language-model/fmapi-convert-to-input.ts
1646
+ const convertPromptToFmapiMessages = (prompt) => {
1647
+ const messages = prompt.map((message) => {
1648
+ const role = message.role === "system" ? "user" : message.role;
1649
+ let contentItems = [];
1650
+ switch (message.role) {
1651
+ case "system":
1652
+ contentItems = convertSystemContent(message);
1653
+ break;
1654
+ case "user":
1655
+ contentItems = convertUserContent(message);
1656
+ break;
1657
+ case "assistant":
1658
+ contentItems = convertAssistantContent(message);
1659
+ break;
1660
+ case "tool":
1661
+ contentItems = convertToolContent(message);
1662
+ break;
1663
+ }
1664
+ const content = contentItems.length === 0 ? "" : contentItems;
1665
+ return {
1666
+ role,
1667
+ content
1668
+ };
1669
+ });
1670
+ return { messages };
1671
+ };
1672
+ const convertSystemContent = (message) => {
1673
+ return [{
1674
+ type: "text",
1675
+ text: message.content
1676
+ }];
1677
+ };
1678
+ const convertUserContent = (message) => {
1679
+ const items = [];
1680
+ for (const part of message.content) switch (part.type) {
1681
+ case "text":
1682
+ items.push({
1683
+ type: "text",
1684
+ text: part.text
1685
+ });
1686
+ break;
1687
+ case "file":
1688
+ if (part.mediaType.startsWith("image/")) {
1689
+ const url = toHttpUrlString(part.data);
1690
+ if (url) items.push({
1691
+ type: "image",
1692
+ image_url: url
1693
+ });
1694
+ }
1695
+ break;
1696
+ }
1697
+ return items;
1698
+ };
1699
+ const convertAssistantContent = (message) => {
1700
+ const items = [];
1701
+ for (const part of message.content) switch (part.type) {
1702
+ case "text":
1703
+ items.push({
1704
+ type: "text",
1705
+ text: part.text
1706
+ });
1707
+ break;
1708
+ case "file":
1709
+ if (part.mediaType.startsWith("image/")) {
1710
+ const url = toHttpUrlString(part.data);
1711
+ if (url) items.push({
1712
+ type: "image",
1713
+ image_url: url
1714
+ });
1715
+ }
1716
+ break;
1717
+ case "reasoning":
1718
+ items.push({
1719
+ type: "reasoning",
1720
+ summary: [{
1721
+ type: "summary_text",
1722
+ text: part.text
1723
+ }]
1724
+ });
1725
+ break;
1726
+ case "tool-call":
1727
+ items.push({
1728
+ type: "text",
1729
+ text: serializeToolCall({
1730
+ id: part.toolCallId,
1731
+ name: part.toolName,
1732
+ arguments: part.input
1733
+ })
1734
+ });
1735
+ break;
1736
+ case "tool-result":
1737
+ items.push({
1738
+ type: "text",
1739
+ text: serializeToolResult({
1740
+ id: part.toolCallId,
1741
+ content: convertToolResultOutputToContentValue(part.output)
1742
+ })
1743
+ });
1744
+ break;
1745
+ }
1746
+ return items;
1747
+ };
1748
+ const convertToolContent = (message) => {
1749
+ const items = [];
1750
+ for (const part of message.content) if (part.type === "tool-result") items.push({
1751
+ type: "text",
1752
+ text: serializeToolResult({
1753
+ id: part.toolCallId,
1754
+ content: convertToolResultOutputToContentValue(part.output)
1755
+ })
1756
+ });
1757
+ return items;
1758
+ };
1759
+ const toHttpUrlString = (data) => {
1760
+ if (data instanceof URL) return data.toString();
1761
+ if (typeof data === "string") {
1762
+ if (data.startsWith("http://") || data.startsWith("https://")) return data;
1763
+ }
1764
+ return null;
1765
+ };
1766
+ const convertToolResultOutputToContentValue = (output) => {
1767
+ switch (output.type) {
1768
+ case "text":
1769
+ case "error-text": return output.value;
1770
+ case "json":
1771
+ case "error-json": return output.value;
1772
+ case "content": return output.value;
1773
+ default: return null;
1774
+ }
1775
+ };
1776
+
1777
+ //#endregion
1778
+ //#region src/fmapi-language-model/fmapi-language-model.ts
1779
+ var DatabricksFmapiLanguageModel = class {
1780
+ specificationVersion = "v2";
1781
+ modelId;
1782
+ config;
1783
+ constructor(modelId, config) {
1784
+ this.modelId = modelId;
1785
+ this.config = config;
1786
+ }
1787
+ get provider() {
1788
+ return this.config.provider;
1789
+ }
1790
+ supportedUrls = {};
1791
+ async doGenerate(options) {
1792
+ const networkArgs = this.getArgs({
1793
+ config: this.config,
1794
+ options,
1795
+ stream: false,
1796
+ modelId: this.modelId
1797
+ });
1798
+ const { value: response } = await postJsonToApi({
1799
+ ...networkArgs,
1800
+ successfulResponseHandler: createJsonResponseHandler(fmapiResponseSchema),
1801
+ failedResponseHandler: createJsonErrorResponseHandler({
1802
+ errorSchema: z.any(),
1803
+ errorToMessage: (error) => JSON.stringify(error),
1804
+ isRetryable: () => false
1805
+ })
1806
+ });
1807
+ const choice = response.choices[0];
1808
+ let finishReason = "stop";
1809
+ if (choice?.finish_reason === "tool_calls") finishReason = "tool-calls";
1810
+ return {
1811
+ content: convertFmapiResponseToMessagePart(response),
1812
+ finishReason,
1813
+ usage: {
1814
+ inputTokens: response.usage?.prompt_tokens ?? 0,
1815
+ outputTokens: response.usage?.completion_tokens ?? 0,
1816
+ totalTokens: response.usage?.total_tokens ?? 0
1817
+ },
1818
+ warnings: []
1819
+ };
1820
+ }
1821
+ async doStream(options) {
1822
+ const networkArgs = this.getArgs({
1823
+ config: this.config,
1824
+ options,
1825
+ stream: true,
1826
+ modelId: this.modelId
1827
+ });
1828
+ const { responseHeaders, value: response } = await postJsonToApi({
1829
+ ...networkArgs,
1830
+ failedResponseHandler: createJsonErrorResponseHandler({
1831
+ errorSchema: z.any(),
1832
+ errorToMessage: (error) => JSON.stringify(error),
1833
+ isRetryable: () => false
1834
+ }),
1835
+ successfulResponseHandler: createEventSourceResponseHandler(fmapiChunkSchema),
1836
+ abortSignal: options.abortSignal
1837
+ });
1838
+ let finishReason = "unknown";
1839
+ let usage = {
1840
+ inputTokens: 0,
1841
+ outputTokens: 0,
1842
+ totalTokens: 0
1843
+ };
1844
+ const toolCallIdsByIndex = new Map();
1845
+ const toolCallNamesById = new Map();
1846
+ const toolCallInputsById = new Map();
1847
+ return {
1848
+ stream: response.pipeThrough(new TransformStream({
1849
+ start(controller) {
1850
+ controller.enqueue({
1851
+ type: "stream-start",
1852
+ warnings: []
1853
+ });
1854
+ },
1855
+ transform(chunk, controller) {
1856
+ if (options.includeRawChunks) controller.enqueue({
1857
+ type: "raw",
1858
+ rawValue: chunk.rawValue
1859
+ });
1860
+ if (!chunk.success) {
1861
+ finishReason = "error";
1862
+ controller.enqueue({
1863
+ type: "error",
1864
+ error: chunk.error
1865
+ });
1866
+ return;
1867
+ }
1868
+ const choice = chunk.value.choices[0];
1869
+ if (choice?.finish_reason === "stop") finishReason = "stop";
1870
+ else if (choice?.finish_reason === "tool_calls") finishReason = "tool-calls";
1871
+ if (chunk.value.usage) usage = {
1872
+ inputTokens: chunk.value.usage.prompt_tokens,
1873
+ outputTokens: chunk.value.usage.completion_tokens,
1874
+ totalTokens: chunk.value.usage.total_tokens
1875
+ };
1876
+ const parts = convertFmapiChunkToMessagePart(chunk.value, toolCallIdsByIndex);
1877
+ for (const part of parts) {
1878
+ if (part.type === "tool-input-start") {
1879
+ toolCallNamesById.set(part.id, part.toolName);
1880
+ toolCallInputsById.set(part.id, "");
1881
+ } else if (part.type === "tool-input-delta") {
1882
+ const current = toolCallInputsById.get(part.id) ?? "";
1883
+ toolCallInputsById.set(part.id, current + part.delta);
1884
+ }
1885
+ controller.enqueue(part);
1886
+ }
1887
+ },
1888
+ flush(controller) {
1889
+ for (const [toolCallId, inputText] of toolCallInputsById) {
1890
+ const toolName = toolCallNamesById.get(toolCallId);
1891
+ if (toolName) {
1892
+ controller.enqueue({
1893
+ type: "tool-input-end",
1894
+ id: toolCallId
1895
+ });
1896
+ controller.enqueue({
1897
+ type: "tool-call",
1898
+ toolCallId,
1899
+ toolName,
1900
+ input: inputText
1901
+ });
1902
+ }
1903
+ }
1904
+ controller.enqueue({
1905
+ type: "finish",
1906
+ finishReason,
1907
+ usage
1908
+ });
1909
+ }
1910
+ })).pipeThrough(getDatabricksLanguageModelTransformStream()),
1911
+ request: { body: networkArgs.body },
1912
+ response: { headers: responseHeaders }
1913
+ };
1914
+ }
1915
+ getArgs({ config, options, stream, modelId }) {
1916
+ const tools = options.tools?.map((tool) => convertToolToOpenAIFormat(tool)).filter((tool) => tool !== void 0);
1917
+ const toolChoice = options.toolChoice ? convertToolChoiceToOpenAIFormat(options.toolChoice) : void 0;
1918
+ return {
1919
+ url: config.url({ path: "/chat/completions" }),
1920
+ headers: combineHeaders(config.headers(), options.headers),
1921
+ body: {
1922
+ messages: convertPromptToFmapiMessages(options.prompt).messages,
1923
+ stream,
1924
+ model: modelId,
1925
+ ...tools && tools.length > 0 ? { tools } : {},
1926
+ ...toolChoice && tools && tools.length > 0 ? { tool_choice: toolChoice } : {},
1927
+ ...options.temperature !== void 0 ? { temperature: options.temperature } : {},
1928
+ ...options.maxOutputTokens !== void 0 ? { max_tokens: options.maxOutputTokens } : {},
1929
+ ...options.stopSequences && options.stopSequences.length > 0 ? { stop: options.stopSequences } : {}
1930
+ },
1931
+ fetch: config.fetch
1932
+ };
1933
+ }
1934
+ };
1935
+ /**
1936
+ * Convert AI SDK tool to OpenAI format
1937
+ */
1938
+ function convertToolToOpenAIFormat(tool) {
1939
+ if (tool.type === "provider-defined" || tool.name === DATABRICKS_TOOL_CALL_ID) return void 0;
1940
+ return {
1941
+ type: "function",
1942
+ function: {
1943
+ name: tool.name,
1944
+ description: tool.description,
1945
+ parameters: tool.inputSchema
1946
+ }
1947
+ };
1948
+ }
1949
+ /**
1950
+ * Convert AI SDK tool choice to OpenAI format
1951
+ */
1952
+ function convertToolChoiceToOpenAIFormat(toolChoice) {
1953
+ if (toolChoice.type === "auto") return "auto";
1954
+ if (toolChoice.type === "none") return "none";
1955
+ if (toolChoice.type === "required") return "required";
1956
+ if (toolChoice.type === "tool") return {
1957
+ type: "function",
1958
+ function: { name: toolChoice.toolName }
1959
+ };
1960
+ return "auto";
1961
+ }
1962
+
1963
+ //#endregion
1964
+ //#region src/databricks-provider.ts
1965
+ const createDatabricksProvider = (settings) => {
1966
+ const baseUrl = withoutTrailingSlash(settings.baseURL);
1967
+ const getHeaders = () => combineHeaders(settings.headers);
1968
+ const fetch = settings.fetch;
1969
+ const provider = settings.provider ?? "databricks";
1970
+ const formatUrl = ({ path }) => settings.formatUrl?.({
1971
+ baseUrl,
1972
+ path
1973
+ }) ?? `${baseUrl}${path}`;
1974
+ const createChatAgent = (modelId) => new DatabricksChatAgentLanguageModel(modelId, {
1975
+ url: formatUrl,
1976
+ headers: getHeaders,
1977
+ fetch,
1978
+ provider
1979
+ });
1980
+ const createResponsesAgent = (modelId) => new DatabricksResponsesAgentLanguageModel(modelId, {
1981
+ url: formatUrl,
1982
+ headers: getHeaders,
1983
+ fetch,
1984
+ provider
1985
+ });
1986
+ const createFmapi = (modelId) => new DatabricksFmapiLanguageModel(modelId, {
1987
+ url: formatUrl,
1988
+ headers: getHeaders,
1989
+ fetch,
1990
+ provider
1991
+ });
1992
+ const notImplemented = (name) => {
1993
+ return () => {
1994
+ throw new Error(`${name} is not supported yet`);
1995
+ };
1996
+ };
1997
+ return {
1998
+ chatAgent: createChatAgent,
1999
+ responsesAgent: createResponsesAgent,
2000
+ fmapi: createFmapi,
2001
+ imageModel: notImplemented("ImageModel"),
2002
+ textEmbeddingModel: notImplemented("TextEmbeddingModel"),
2003
+ languageModel: notImplemented("LanguageModel")
2004
+ };
2005
+ };
2006
+
2007
+ //#endregion
2008
+ export { DATABRICKS_TOOL_CALL_ID, DATABRICKS_TOOL_DEFINITION, MCP_APPROVAL_REQUEST_TYPE, MCP_APPROVAL_RESPONSE_TYPE, MCP_APPROVAL_STATUS_KEY, createApprovalStatusOutput, createDatabricksProvider, extractApprovalStatus, extractApprovalStatusFromToolResult, extractDatabricksMetadata, getMcpApprovalState, isApprovalStatusOutput, isMcpApprovalRequest, isMcpApprovalResponse };
2009
+ //# sourceMappingURL=index.js.map