@browser-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,1635 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ BuiltInAIChatLanguageModel: () => BuiltInAIChatLanguageModel,
24
+ BuiltInAIEmbeddingModel: () => BuiltInAIEmbeddingModel,
25
+ builtInAI: () => builtInAI,
26
+ createBuiltInAI: () => createBuiltInAI,
27
+ doesBrowserSupportBuiltInAI: () => doesBrowserSupportBuiltInAI,
28
+ isBuiltInAIModelAvailable: () => isBuiltInAIModelAvailable
29
+ });
30
+ module.exports = __toCommonJS(index_exports);
31
+
32
+ // src/convert-to-built-in-ai-messages.ts
33
+ var import_provider = require("@ai-sdk/provider");
34
+
35
+ // src/tool-calling/format-tool-results.ts
36
+ function buildResultPayload(result) {
37
+ const payload = {
38
+ name: result.toolName,
39
+ result: result.result ?? null,
40
+ error: Boolean(result.isError)
41
+ };
42
+ if (result.toolCallId) {
43
+ payload.id = result.toolCallId;
44
+ }
45
+ return payload;
46
+ }
47
+ function formatToolResults(results) {
48
+ if (!results || results.length === 0) {
49
+ return "";
50
+ }
51
+ const payloads = results.map(
52
+ (result) => JSON.stringify(buildResultPayload(result))
53
+ );
54
+ return `\`\`\`tool_result
55
+ ${payloads.join("\n")}
56
+ \`\`\``;
57
+ }
58
+
59
+ // src/convert-to-built-in-ai-messages.ts
60
+ function convertBase64ToUint8Array(base64) {
61
+ try {
62
+ const binaryString = atob(base64);
63
+ const bytes = new Uint8Array(binaryString.length);
64
+ for (let i = 0; i < binaryString.length; i++) {
65
+ bytes[i] = binaryString.charCodeAt(i);
66
+ }
67
+ return bytes;
68
+ } catch (error) {
69
+ throw new Error(`Failed to convert base64 to Uint8Array: ${error}`);
70
+ }
71
+ }
72
+ function convertFileData(data, mediaType) {
73
+ if (data instanceof URL) {
74
+ return data.toString();
75
+ }
76
+ if (data instanceof Uint8Array) {
77
+ return data;
78
+ }
79
+ if (typeof data === "string") {
80
+ return convertBase64ToUint8Array(data);
81
+ }
82
+ const exhaustiveCheck = data;
83
+ throw new Error(`Unexpected data type for ${mediaType}: ${exhaustiveCheck}`);
84
+ }
85
+ function normalizeToolArguments(input) {
86
+ if (input === void 0) {
87
+ return {};
88
+ }
89
+ if (typeof input === "string") {
90
+ try {
91
+ return JSON.parse(input);
92
+ } catch {
93
+ return input;
94
+ }
95
+ }
96
+ return input ?? {};
97
+ }
98
+ function formatToolCallsJson(parts) {
99
+ if (!parts.length) {
100
+ return "";
101
+ }
102
+ const payloads = parts.map((call) => {
103
+ const payload = {
104
+ name: call.toolName,
105
+ arguments: normalizeToolArguments(call.input)
106
+ };
107
+ if (call.toolCallId) {
108
+ payload.id = call.toolCallId;
109
+ }
110
+ return JSON.stringify(payload);
111
+ });
112
+ return `\`\`\`tool_call
113
+ ${payloads.join("\n")}
114
+ \`\`\``;
115
+ }
116
+ function convertToolResultOutput(output) {
117
+ switch (output.type) {
118
+ case "text":
119
+ return { value: output.value, isError: false };
120
+ case "json":
121
+ return { value: output.value, isError: false };
122
+ case "error-text":
123
+ return { value: output.value, isError: true };
124
+ case "error-json":
125
+ return { value: output.value, isError: true };
126
+ case "content":
127
+ return { value: output.value, isError: false };
128
+ default: {
129
+ const exhaustiveCheck = output;
130
+ return { value: exhaustiveCheck, isError: false };
131
+ }
132
+ }
133
+ }
134
+ function toToolResult(part) {
135
+ const { value, isError } = convertToolResultOutput(part.output);
136
+ return {
137
+ toolCallId: part.toolCallId,
138
+ toolName: part.toolName,
139
+ result: value,
140
+ isError
141
+ };
142
+ }
143
+ function convertToBuiltInAIMessages(prompt) {
144
+ const normalizedPrompt = prompt.slice();
145
+ let systemMessage;
146
+ const messages = [];
147
+ for (const message of normalizedPrompt) {
148
+ switch (message.role) {
149
+ case "system": {
150
+ systemMessage = message.content;
151
+ break;
152
+ }
153
+ case "user": {
154
+ messages.push({
155
+ role: "user",
156
+ content: message.content.map((part) => {
157
+ switch (part.type) {
158
+ case "text": {
159
+ return {
160
+ type: "text",
161
+ value: part.text
162
+ };
163
+ }
164
+ case "file": {
165
+ const { mediaType, data } = part;
166
+ if (mediaType?.startsWith("image/")) {
167
+ const convertedData = convertFileData(data, mediaType);
168
+ return {
169
+ type: "image",
170
+ value: convertedData
171
+ };
172
+ } else if (mediaType?.startsWith("audio/")) {
173
+ const convertedData = convertFileData(data, mediaType);
174
+ return {
175
+ type: "audio",
176
+ value: convertedData
177
+ };
178
+ } else {
179
+ throw new import_provider.UnsupportedFunctionalityError({
180
+ functionality: `file type: ${mediaType}`
181
+ });
182
+ }
183
+ }
184
+ default: {
185
+ const exhaustiveCheck = part;
186
+ throw new import_provider.UnsupportedFunctionalityError({
187
+ functionality: `content type: ${exhaustiveCheck.type ?? "unknown"}`
188
+ });
189
+ }
190
+ }
191
+ })
192
+ });
193
+ break;
194
+ }
195
+ case "assistant": {
196
+ let text = "";
197
+ const toolCallParts = [];
198
+ for (const part of message.content) {
199
+ switch (part.type) {
200
+ case "text": {
201
+ text += part.text;
202
+ break;
203
+ }
204
+ case "reasoning": {
205
+ text += part.text;
206
+ break;
207
+ }
208
+ case "tool-call": {
209
+ toolCallParts.push(part);
210
+ break;
211
+ }
212
+ case "file": {
213
+ throw new import_provider.UnsupportedFunctionalityError({
214
+ functionality: "assistant file attachments"
215
+ });
216
+ }
217
+ case "tool-result": {
218
+ throw new import_provider.UnsupportedFunctionalityError({
219
+ functionality: "tool-result parts in assistant messages (should be in tool messages)"
220
+ });
221
+ }
222
+ default: {
223
+ const exhaustiveCheck = part;
224
+ throw new import_provider.UnsupportedFunctionalityError({
225
+ functionality: `assistant part type: ${exhaustiveCheck.type ?? "unknown"}`
226
+ });
227
+ }
228
+ }
229
+ }
230
+ const toolCallJson = formatToolCallsJson(toolCallParts);
231
+ const contentSegments = [];
232
+ if (text.trim().length > 0) {
233
+ contentSegments.push(text);
234
+ } else if (text.length > 0) {
235
+ contentSegments.push(text);
236
+ }
237
+ if (toolCallJson) {
238
+ contentSegments.push(toolCallJson);
239
+ }
240
+ const content = contentSegments.length > 0 ? contentSegments.join("\n") : "";
241
+ messages.push({
242
+ role: "assistant",
243
+ content
244
+ });
245
+ break;
246
+ }
247
+ case "tool": {
248
+ const toolParts = message.content;
249
+ const results = toolParts.map(toToolResult);
250
+ const toolResultsJson = formatToolResults(results);
251
+ messages.push({
252
+ role: "user",
253
+ content: toolResultsJson
254
+ });
255
+ break;
256
+ }
257
+ default: {
258
+ const exhaustiveCheck = message;
259
+ throw new Error(
260
+ `Unsupported role: ${exhaustiveCheck.role ?? "unknown"}`
261
+ );
262
+ }
263
+ }
264
+ }
265
+ return { systemMessage, messages };
266
+ }
267
+
268
+ // src/tool-calling/build-json-system-prompt.ts
269
+ function buildJsonToolSystemPrompt(originalSystemPrompt, tools, options) {
270
+ if (!tools || tools.length === 0) {
271
+ return originalSystemPrompt || "";
272
+ }
273
+ const parallelInstruction = "Only request one tool call at a time. Wait for tool results before asking for another tool.";
274
+ const toolSchemas = tools.map((tool) => {
275
+ const schema = getParameters(tool);
276
+ return {
277
+ name: tool.name,
278
+ description: tool.description ?? "No description provided.",
279
+ parameters: schema || { type: "object", properties: {} }
280
+ };
281
+ });
282
+ const toolsJson = JSON.stringify(toolSchemas, null, 2);
283
+ const instructionBody = `You are a helpful AI assistant with access to tools.
284
+
285
+ # Available Tools
286
+ ${toolsJson}
287
+
288
+ # Tool Calling Instructions
289
+ ${parallelInstruction}
290
+
291
+ To call a tool, output JSON in this exact format inside a \`\`\`tool_call code fence:
292
+
293
+ \`\`\`tool_call
294
+ {"name": "tool_name", "arguments": {"param1": "value1", "param2": "value2"}}
295
+ \`\`\`
296
+
297
+ Tool responses will be provided in \`\`\`tool_result fences. Each line contains JSON like:
298
+ \`\`\`tool_result
299
+ {"id": "call_123", "name": "tool_name", "result": {...}, "error": false}
300
+ \`\`\`
301
+ Use the \`result\` payload (and treat \`error\` as a boolean flag) when continuing the conversation.
302
+
303
+ Important:
304
+ - Use exact tool and parameter names from the schema above
305
+ - Arguments must be a valid JSON object matching the tool's parameters
306
+ - You can include brief reasoning before or after the tool call
307
+ - If no tool is needed, respond directly without tool_call fences`;
308
+ if (originalSystemPrompt?.trim()) {
309
+ return `${originalSystemPrompt.trim()}
310
+
311
+ ${instructionBody}`;
312
+ }
313
+ return instructionBody;
314
+ }
315
+ function getParameters(tool) {
316
+ if ("parameters" in tool) {
317
+ return tool.parameters;
318
+ }
319
+ return tool.inputSchema;
320
+ }
321
+
322
+ // src/tool-calling/parse-json-function-calls.ts
323
+ var JSON_TOOL_CALL_FENCE_REGEX = /```tool[_-]?call\s*([\s\S]*?)```/gi;
324
+ function generateToolCallId() {
325
+ return `call_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
326
+ }
327
+ function parseJsonFunctionCalls(response) {
328
+ const matches = Array.from(response.matchAll(JSON_TOOL_CALL_FENCE_REGEX));
329
+ JSON_TOOL_CALL_FENCE_REGEX.lastIndex = 0;
330
+ if (matches.length === 0) {
331
+ return { toolCalls: [], textContent: response };
332
+ }
333
+ const toolCalls = [];
334
+ let textContent = response;
335
+ for (const match of matches) {
336
+ const [fullFence, innerContent] = match;
337
+ textContent = textContent.replace(fullFence, "");
338
+ try {
339
+ const trimmed = innerContent.trim();
340
+ try {
341
+ const parsed = JSON.parse(trimmed);
342
+ const callsArray = Array.isArray(parsed) ? parsed : [parsed];
343
+ for (const call of callsArray) {
344
+ if (!call.name) continue;
345
+ toolCalls.push({
346
+ type: "tool-call",
347
+ toolCallId: call.id || generateToolCallId(),
348
+ toolName: call.name,
349
+ args: call.arguments || {}
350
+ });
351
+ }
352
+ } catch {
353
+ const lines = trimmed.split("\n").filter((line) => line.trim());
354
+ for (const line of lines) {
355
+ try {
356
+ const call = JSON.parse(line.trim());
357
+ if (!call.name) continue;
358
+ toolCalls.push({
359
+ type: "tool-call",
360
+ toolCallId: call.id || generateToolCallId(),
361
+ toolName: call.name,
362
+ args: call.arguments || {}
363
+ });
364
+ } catch {
365
+ continue;
366
+ }
367
+ }
368
+ }
369
+ } catch (error) {
370
+ console.warn("Failed to parse JSON tool call:", error);
371
+ continue;
372
+ }
373
+ }
374
+ textContent = textContent.replace(/\n{2,}/g, "\n");
375
+ return { toolCalls, textContent: textContent.trim() };
376
+ }
377
+
378
+ // src/utils/warnings.ts
379
+ function createUnsupportedSettingWarning(setting, details) {
380
+ return {
381
+ type: "unsupported-setting",
382
+ setting,
383
+ details
384
+ };
385
+ }
386
+ function createUnsupportedToolWarning(tool, details) {
387
+ return {
388
+ type: "unsupported-tool",
389
+ tool,
390
+ details
391
+ };
392
+ }
393
+ function gatherUnsupportedSettingWarnings(options) {
394
+ const warnings = [];
395
+ if (options.maxOutputTokens != null) {
396
+ warnings.push(
397
+ createUnsupportedSettingWarning(
398
+ "maxOutputTokens",
399
+ "maxOutputTokens is not supported by Prompt API"
400
+ )
401
+ );
402
+ }
403
+ if (options.stopSequences != null) {
404
+ warnings.push(
405
+ createUnsupportedSettingWarning(
406
+ "stopSequences",
407
+ "stopSequences is not supported by Prompt API"
408
+ )
409
+ );
410
+ }
411
+ if (options.topP != null) {
412
+ warnings.push(
413
+ createUnsupportedSettingWarning(
414
+ "topP",
415
+ "topP is not supported by Prompt API"
416
+ )
417
+ );
418
+ }
419
+ if (options.presencePenalty != null) {
420
+ warnings.push(
421
+ createUnsupportedSettingWarning(
422
+ "presencePenalty",
423
+ "presencePenalty is not supported by Prompt API"
424
+ )
425
+ );
426
+ }
427
+ if (options.frequencyPenalty != null) {
428
+ warnings.push(
429
+ createUnsupportedSettingWarning(
430
+ "frequencyPenalty",
431
+ "frequencyPenalty is not supported by Prompt API"
432
+ )
433
+ );
434
+ }
435
+ if (options.seed != null) {
436
+ warnings.push(
437
+ createUnsupportedSettingWarning(
438
+ "seed",
439
+ "seed is not supported by Prompt API"
440
+ )
441
+ );
442
+ }
443
+ if (options.toolChoice != null) {
444
+ warnings.push(
445
+ createUnsupportedSettingWarning(
446
+ "toolChoice",
447
+ "toolChoice is not supported by Prompt API"
448
+ )
449
+ );
450
+ }
451
+ return warnings;
452
+ }
453
+
454
+ // src/utils/prompt-utils.ts
455
+ function hasMultimodalContent(prompt) {
456
+ for (const message of prompt) {
457
+ if (message.role === "user") {
458
+ for (const part of message.content) {
459
+ if (part.type === "file") {
460
+ return true;
461
+ }
462
+ }
463
+ }
464
+ }
465
+ return false;
466
+ }
467
+ function getExpectedInputs(prompt) {
468
+ const inputs = /* @__PURE__ */ new Set();
469
+ for (const message of prompt) {
470
+ if (message.role === "user") {
471
+ for (const part of message.content) {
472
+ if (part.type === "file") {
473
+ if (part.mediaType?.startsWith("image/")) {
474
+ inputs.add("image");
475
+ } else if (part.mediaType?.startsWith("audio/")) {
476
+ inputs.add("audio");
477
+ }
478
+ }
479
+ }
480
+ }
481
+ }
482
+ return Array.from(inputs).map((type) => ({ type }));
483
+ }
484
+ function prependSystemPromptToMessages(messages, systemPrompt) {
485
+ if (!systemPrompt.trim()) {
486
+ return messages;
487
+ }
488
+ const prompts = messages.map((message) => ({ ...message }));
489
+ const firstUserIndex = prompts.findIndex(
490
+ (message) => message.role === "user"
491
+ );
492
+ if (firstUserIndex !== -1) {
493
+ const firstUserMessage = prompts[firstUserIndex];
494
+ if (Array.isArray(firstUserMessage.content)) {
495
+ const content = firstUserMessage.content.slice();
496
+ content.unshift({
497
+ type: "text",
498
+ value: `${systemPrompt}
499
+
500
+ `
501
+ });
502
+ prompts[firstUserIndex] = {
503
+ ...firstUserMessage,
504
+ content
505
+ };
506
+ } else if (typeof firstUserMessage.content === "string") {
507
+ prompts[firstUserIndex] = {
508
+ ...firstUserMessage,
509
+ content: `${systemPrompt}
510
+
511
+ ${firstUserMessage.content}`
512
+ };
513
+ }
514
+ } else {
515
+ prompts.unshift({
516
+ role: "user",
517
+ content: systemPrompt
518
+ });
519
+ }
520
+ return prompts;
521
+ }
522
+
523
+ // src/utils/tool-utils.ts
524
+ function isFunctionTool(tool) {
525
+ return tool.type === "function";
526
+ }
527
+
528
+ // src/models/session-manager.ts
529
+ var import_provider2 = require("@ai-sdk/provider");
530
+ var SessionManager = class {
531
+ /**
532
+ * Creates a new SessionManager
533
+ *
534
+ * @param baseOptions - Base configuration options for all sessions
535
+ */
536
+ constructor(baseOptions) {
537
+ this.session = null;
538
+ this.baseOptions = baseOptions;
539
+ }
540
+ /**
541
+ * Gets or creates a session with the specified options
542
+ *
543
+ * If a session already exists, it will be reused unless force create is needed.
544
+ *
545
+ * @param options - Optional session creation options
546
+ * @returns Promise resolving to a LanguageModel session
547
+ * @throws {LoadSettingError} When Prompt API is not available or model is unavailable
548
+ *
549
+ * @example
550
+ * ```typescript
551
+ * const session = await manager.getSession({
552
+ * systemMessage: "You are a helpful assistant",
553
+ * expectedInputs: [{ type: "image" }],
554
+ * temperature: 0.8
555
+ * });
556
+ * ```
557
+ */
558
+ async getSession(options) {
559
+ if (typeof LanguageModel === "undefined") {
560
+ throw new import_provider2.LoadSettingError({
561
+ message: "Prompt API is not available. This library requires Chrome or Edge browser with built-in AI capabilities."
562
+ });
563
+ }
564
+ if (this.session) {
565
+ return this.session;
566
+ }
567
+ const availability = await LanguageModel.availability();
568
+ if (availability === "unavailable") {
569
+ throw new import_provider2.LoadSettingError({
570
+ message: "Built-in model not available in this browser"
571
+ });
572
+ }
573
+ const sessionOptions = this.prepareSessionOptions(options);
574
+ this.session = await LanguageModel.create(sessionOptions);
575
+ return this.session;
576
+ }
577
+ /**
578
+ * Creates a session with download progress monitoring
579
+ *
580
+ * This is a convenience method for users who want explicit progress tracking.
581
+ *
582
+ * @param onDownloadProgress - Optional callback receiving progress (0-1) during download
583
+ * @returns Promise resolving to a LanguageModel session
584
+ * @throws {LoadSettingError} When Prompt API is not available or model is unavailable
585
+ *
586
+ * @example
587
+ * ```typescript
588
+ * const session = await manager.createSessionWithProgress(
589
+ * (progress) => {
590
+ * console.log(`Download: ${Math.round(progress * 100)}%`);
591
+ * }
592
+ * );
593
+ * ```
594
+ */
595
+ async createSessionWithProgress(onDownloadProgress) {
596
+ return this.getSession({ onDownloadProgress });
597
+ }
598
+ /**
599
+ * Checks the availability status of the built-in AI model
600
+ *
601
+ * @returns Promise resolving to availability status
602
+ * - "unavailable": Model is not supported
603
+ * - "downloadable": Model needs to be downloaded
604
+ * - "downloading": Model is currently downloading
605
+ * - "available": Model is ready to use
606
+ *
607
+ * @example
608
+ * ```typescript
609
+ * const status = await manager.checkAvailability();
610
+ * if (status === "downloadable") {
611
+ * console.log("Model needs to be downloaded first");
612
+ * }
613
+ * ```
614
+ */
615
+ async checkAvailability() {
616
+ if (typeof LanguageModel === "undefined") {
617
+ return "unavailable";
618
+ }
619
+ return LanguageModel.availability();
620
+ }
621
+ /**
622
+ * Gets the current session if it exists
623
+ *
624
+ * @returns The current session or null if none exists
625
+ */
626
+ getCurrentSession() {
627
+ return this.session;
628
+ }
629
+ /**
630
+ * Destroys the current session
631
+ *
632
+ * Use this when you want to force creation of a new session
633
+ * with different options on the next getSession call.
634
+ */
635
+ destroySession() {
636
+ if (this.session && typeof this.session.destroy === "function") {
637
+ this.session.destroy();
638
+ }
639
+ this.session = null;
640
+ }
641
+ /**
642
+ * Prepares merged session options from base config and request options
643
+ *
644
+ * @param options - Optional request-specific options
645
+ * @returns Merged and sanitized options ready for LanguageModel.create()
646
+ * @private
647
+ */
648
+ prepareSessionOptions(options) {
649
+ const mergedOptions = { ...this.baseOptions };
650
+ if (options) {
651
+ const {
652
+ systemMessage,
653
+ expectedInputs,
654
+ onDownloadProgress,
655
+ ...createOptions
656
+ } = options;
657
+ Object.assign(mergedOptions, createOptions);
658
+ if (systemMessage) {
659
+ mergedOptions.initialPrompts = [
660
+ { role: "system", content: systemMessage }
661
+ ];
662
+ }
663
+ if (expectedInputs && expectedInputs.length > 0) {
664
+ mergedOptions.expectedInputs = expectedInputs;
665
+ }
666
+ if (onDownloadProgress) {
667
+ mergedOptions.monitor = (m) => {
668
+ m.addEventListener("downloadprogress", (e) => {
669
+ onDownloadProgress(e.loaded);
670
+ });
671
+ };
672
+ }
673
+ }
674
+ this.sanitizeOptions(mergedOptions);
675
+ return mergedOptions;
676
+ }
677
+ /**
678
+ * Removes custom options that aren't part of LanguageModel.create API
679
+ *
680
+ * @param options - Options object to sanitize in-place
681
+ * @private
682
+ */
683
+ sanitizeOptions(options) {
684
+ }
685
+ };
686
+
687
+ // src/streaming/tool-call-detector.ts
688
+ var ToolCallFenceDetector = class {
689
+ constructor() {
690
+ this.FENCE_STARTS = ["```tool_call"];
691
+ this.FENCE_END = "```";
692
+ this.buffer = "";
693
+ // Streaming state
694
+ this.inFence = false;
695
+ this.fenceStartBuffer = "";
696
+ }
697
+ // Accumulated fence content
698
+ /**
699
+ * Adds a chunk of text to the internal buffer
700
+ *
701
+ * @param chunk - Text chunk from the stream
702
+ */
703
+ addChunk(chunk) {
704
+ this.buffer += chunk;
705
+ }
706
+ /**
707
+ * Gets the current buffer content
708
+ */
709
+ getBuffer() {
710
+ return this.buffer;
711
+ }
712
+ /**
713
+ * Clears the internal buffer
714
+ */
715
+ clearBuffer() {
716
+ this.buffer = "";
717
+ }
718
+ /**
719
+ * Detects if there's a complete fence in the buffer
720
+ *
721
+ * This method:
722
+ * 1. Searches for fence start markers
723
+ * 2. If found, looks for closing fence
724
+ * 3. Computes overlap for partial fences
725
+ * 4. Returns safe text that can be emitted
726
+ *
727
+ * @returns Detection result with fence info and safe text
728
+ */
729
+ detectFence() {
730
+ const { index: startIdx, prefix: matchedPrefix } = this.findFenceStart(
731
+ this.buffer
732
+ );
733
+ if (startIdx === -1) {
734
+ const overlap = this.computeOverlapLength(this.buffer, this.FENCE_STARTS);
735
+ const safeTextLength = this.buffer.length - overlap;
736
+ const prefixText2 = safeTextLength > 0 ? this.buffer.slice(0, safeTextLength) : "";
737
+ const remaining = overlap > 0 ? this.buffer.slice(-overlap) : "";
738
+ this.buffer = remaining;
739
+ return {
740
+ fence: null,
741
+ prefixText: prefixText2,
742
+ remainingText: "",
743
+ overlapLength: overlap
744
+ };
745
+ }
746
+ const prefixText = this.buffer.slice(0, startIdx);
747
+ this.buffer = this.buffer.slice(startIdx);
748
+ const prefixLength = matchedPrefix?.length ?? 0;
749
+ const closingIdx = this.buffer.indexOf(this.FENCE_END, prefixLength);
750
+ if (closingIdx === -1) {
751
+ return {
752
+ fence: null,
753
+ prefixText,
754
+ remainingText: "",
755
+ overlapLength: 0
756
+ };
757
+ }
758
+ const endPos = closingIdx + this.FENCE_END.length;
759
+ const fence = this.buffer.slice(0, endPos);
760
+ const remainingText = this.buffer.slice(endPos);
761
+ this.buffer = "";
762
+ return {
763
+ fence,
764
+ prefixText,
765
+ remainingText,
766
+ overlapLength: 0
767
+ };
768
+ }
769
+ /**
770
+ * Finds the first occurrence of any fence start marker
771
+ *
772
+ * @param text - Text to search in
773
+ * @returns Index of first fence start and which prefix matched
774
+ * @private
775
+ */
776
+ findFenceStart(text) {
777
+ let bestIndex = -1;
778
+ let matchedPrefix = null;
779
+ for (const prefix of this.FENCE_STARTS) {
780
+ const idx = text.indexOf(prefix);
781
+ if (idx !== -1 && (bestIndex === -1 || idx < bestIndex)) {
782
+ bestIndex = idx;
783
+ matchedPrefix = prefix;
784
+ }
785
+ }
786
+ return { index: bestIndex, prefix: matchedPrefix };
787
+ }
788
+ /**
789
+ * Computes the maximum overlap between the end of text and the start of any prefix
790
+ *
791
+ * This is crucial for streaming: if the buffer ends with "``", we can't emit it
792
+ * because the next chunk might be "`tool_call", completing a fence marker.
793
+ *
794
+ * @param text - Text to check for overlap
795
+ * @param prefixes - List of prefixes to check against
796
+ * @returns Length of the maximum overlap found
797
+ *
798
+ * @example
799
+ * ```typescript
800
+ * computeOverlapLength("hello ``", ["```tool_call"])
801
+ * // Returns: 2 (because "``" matches start of "```tool_call")
802
+ *
803
+ * computeOverlapLength("hello `", ["```tool_call"])
804
+ * // Returns: 1
805
+ *
806
+ * computeOverlapLength("hello world", ["```tool_call"])
807
+ * // Returns: 0 (no overlap)
808
+ * ```
809
+ *
810
+ * @private
811
+ */
812
+ computeOverlapLength(text, prefixes) {
813
+ let overlap = 0;
814
+ for (const prefix of prefixes) {
815
+ const maxLength = Math.min(text.length, prefix.length - 1);
816
+ for (let size = maxLength; size > 0; size -= 1) {
817
+ if (prefix.startsWith(text.slice(-size))) {
818
+ overlap = Math.max(overlap, size);
819
+ break;
820
+ }
821
+ }
822
+ }
823
+ return overlap;
824
+ }
825
+ /**
826
+ * Checks if the buffer currently contains any text
827
+ */
828
+ hasContent() {
829
+ return this.buffer.length > 0;
830
+ }
831
+ /**
832
+ * Gets the buffer size
833
+ */
834
+ getBufferSize() {
835
+ return this.buffer.length;
836
+ }
837
+ /**
838
+ * Detect and stream fence content in real-time for true incremental streaming
839
+ *
840
+ * This method is designed for streaming tool calls as they arrive:
841
+ * 1. Detects when a fence starts and transitions to "inFence" state
842
+ * 2. While inFence, emits safe content that won't conflict with fence end marker
843
+ * 3. When fence ends, returns the complete fence for parsing
844
+ *
845
+ * @returns Streaming result with current state and safe content to emit
846
+ */
847
+ detectStreamingFence() {
848
+ if (!this.inFence) {
849
+ const { index: startIdx, prefix: matchedPrefix } = this.findFenceStart(
850
+ this.buffer
851
+ );
852
+ if (startIdx === -1) {
853
+ const overlap = this.computeOverlapLength(
854
+ this.buffer,
855
+ this.FENCE_STARTS
856
+ );
857
+ const safeTextLength = this.buffer.length - overlap;
858
+ const safeContent = safeTextLength > 0 ? this.buffer.slice(0, safeTextLength) : "";
859
+ this.buffer = this.buffer.slice(safeTextLength);
860
+ return {
861
+ inFence: false,
862
+ safeContent,
863
+ completeFence: null,
864
+ textAfterFence: ""
865
+ };
866
+ }
867
+ const prefixText = this.buffer.slice(0, startIdx);
868
+ const fenceStartLength = matchedPrefix?.length ?? 0;
869
+ this.buffer = this.buffer.slice(startIdx + fenceStartLength);
870
+ if (this.buffer.startsWith("\n")) {
871
+ this.buffer = this.buffer.slice(1);
872
+ }
873
+ this.inFence = true;
874
+ this.fenceStartBuffer = "";
875
+ return {
876
+ inFence: true,
877
+ safeContent: prefixText,
878
+ // Emit any text before the fence
879
+ completeFence: null,
880
+ textAfterFence: ""
881
+ };
882
+ }
883
+ const closingIdx = this.buffer.indexOf(this.FENCE_END);
884
+ if (closingIdx === -1) {
885
+ const overlap = this.computeOverlapLength(this.buffer, [this.FENCE_END]);
886
+ const safeContentLength = this.buffer.length - overlap;
887
+ if (safeContentLength > 0) {
888
+ const safeContent = this.buffer.slice(0, safeContentLength);
889
+ this.fenceStartBuffer += safeContent;
890
+ this.buffer = this.buffer.slice(safeContentLength);
891
+ return {
892
+ inFence: true,
893
+ safeContent,
894
+ completeFence: null,
895
+ textAfterFence: ""
896
+ };
897
+ }
898
+ return {
899
+ inFence: true,
900
+ safeContent: "",
901
+ completeFence: null,
902
+ textAfterFence: ""
903
+ };
904
+ }
905
+ const fenceContent = this.buffer.slice(0, closingIdx);
906
+ this.fenceStartBuffer += fenceContent;
907
+ const completeFence = `${this.FENCE_STARTS[0]}
908
+ ${this.fenceStartBuffer}
909
+ ${this.FENCE_END}`;
910
+ const textAfterFence = this.buffer.slice(
911
+ closingIdx + this.FENCE_END.length
912
+ );
913
+ this.inFence = false;
914
+ this.fenceStartBuffer = "";
915
+ this.buffer = textAfterFence;
916
+ return {
917
+ inFence: false,
918
+ safeContent: fenceContent,
919
+ // Emit the last bit of fence content
920
+ completeFence,
921
+ textAfterFence
922
+ };
923
+ }
924
+ /**
925
+ * Check if currently inside a fence
926
+ */
927
+ isInFence() {
928
+ return this.inFence;
929
+ }
930
+ /**
931
+ * Reset streaming state
932
+ */
933
+ resetStreamingState() {
934
+ this.inFence = false;
935
+ this.fenceStartBuffer = "";
936
+ }
937
+ };
938
+
939
+ // src/built-in-ai-language-model.ts
940
+ function doesBrowserSupportBuiltInAI() {
941
+ return typeof LanguageModel !== "undefined";
942
+ }
943
+ function isBuiltInAIModelAvailable() {
944
+ return typeof LanguageModel !== "undefined";
945
+ }
946
+ function extractToolName(content) {
947
+ const jsonMatch = content.match(/\{\s*"name"\s*:\s*"([^"]+)"/);
948
+ if (jsonMatch) {
949
+ return jsonMatch[1];
950
+ }
951
+ return null;
952
+ }
953
+ function extractArgumentsContent(content) {
954
+ const match = content.match(/"arguments"\s*:\s*/);
955
+ if (!match || match.index === void 0) {
956
+ return "";
957
+ }
958
+ const startIndex = match.index + match[0].length;
959
+ let result = "";
960
+ let depth = 0;
961
+ let inString = false;
962
+ let escaped = false;
963
+ let started = false;
964
+ for (let i = startIndex; i < content.length; i++) {
965
+ const char = content[i];
966
+ result += char;
967
+ if (!started) {
968
+ if (!/\s/.test(char)) {
969
+ started = true;
970
+ if (char === "{" || char === "[") {
971
+ depth = 1;
972
+ }
973
+ }
974
+ continue;
975
+ }
976
+ if (escaped) {
977
+ escaped = false;
978
+ continue;
979
+ }
980
+ if (char === "\\") {
981
+ escaped = true;
982
+ continue;
983
+ }
984
+ if (char === '"') {
985
+ inString = !inString;
986
+ continue;
987
+ }
988
+ if (!inString) {
989
+ if (char === "{" || char === "[") {
990
+ depth += 1;
991
+ } else if (char === "}" || char === "]") {
992
+ if (depth > 0) {
993
+ depth -= 1;
994
+ if (depth === 0) {
995
+ break;
996
+ }
997
+ }
998
+ }
999
+ }
1000
+ }
1001
+ return result;
1002
+ }
1003
+ var BuiltInAIChatLanguageModel = class {
1004
+ constructor(modelId, options = {}) {
1005
+ this.specificationVersion = "v2";
1006
+ this.provider = "browser-ai";
1007
+ this.supportedUrls = {
1008
+ "image/*": [/^https?:\/\/.+$/],
1009
+ "audio/*": [/^https?:\/\/.+$/]
1010
+ };
1011
+ this.modelId = modelId;
1012
+ this.config = {
1013
+ provider: this.provider,
1014
+ modelId,
1015
+ options
1016
+ };
1017
+ this.sessionManager = new SessionManager(options);
1018
+ }
1019
+ /**
1020
+ * Gets a session with the specified options
1021
+ * Delegates to SessionManager for all session lifecycle management
1022
+ * @private
1023
+ */
1024
+ async getSession(options, expectedInputs, systemMessage, onDownloadProgress) {
1025
+ return this.sessionManager.getSession({
1026
+ ...options,
1027
+ expectedInputs,
1028
+ systemMessage,
1029
+ onDownloadProgress
1030
+ });
1031
+ }
1032
+ getArgs(callOptions) {
1033
+ const {
1034
+ prompt,
1035
+ maxOutputTokens,
1036
+ temperature,
1037
+ topP,
1038
+ topK,
1039
+ frequencyPenalty,
1040
+ presencePenalty,
1041
+ stopSequences,
1042
+ responseFormat,
1043
+ seed,
1044
+ tools,
1045
+ toolChoice,
1046
+ providerOptions
1047
+ } = callOptions;
1048
+ const warnings = [];
1049
+ warnings.push(
1050
+ ...gatherUnsupportedSettingWarnings({
1051
+ maxOutputTokens,
1052
+ stopSequences,
1053
+ topP,
1054
+ presencePenalty,
1055
+ frequencyPenalty,
1056
+ seed,
1057
+ toolChoice
1058
+ })
1059
+ );
1060
+ const functionTools = (tools ?? []).filter(isFunctionTool);
1061
+ const unsupportedTools = (tools ?? []).filter(
1062
+ (tool) => !isFunctionTool(tool)
1063
+ );
1064
+ for (const tool of unsupportedTools) {
1065
+ warnings.push(
1066
+ createUnsupportedToolWarning(
1067
+ tool,
1068
+ "Only function tools are supported by the Prompt API polyfill"
1069
+ )
1070
+ );
1071
+ }
1072
+ const hasMultiModalInput = hasMultimodalContent(prompt);
1073
+ const { systemMessage, messages } = convertToBuiltInAIMessages(prompt);
1074
+ const promptOptions = {};
1075
+ if (responseFormat?.type === "json") {
1076
+ promptOptions.responseConstraint = responseFormat.schema;
1077
+ }
1078
+ if (temperature !== void 0) {
1079
+ promptOptions.temperature = temperature;
1080
+ }
1081
+ if (topK !== void 0) {
1082
+ promptOptions.topK = topK;
1083
+ }
1084
+ return {
1085
+ systemMessage,
1086
+ messages,
1087
+ warnings,
1088
+ promptOptions,
1089
+ hasMultiModalInput,
1090
+ expectedInputs: hasMultiModalInput ? getExpectedInputs(prompt) : void 0,
1091
+ functionTools
1092
+ };
1093
+ }
1094
+ /**
1095
+ * Generates a complete text response using the browser's built-in Prompt API
1096
+ * @param options
1097
+ * @returns Promise resolving to the generated content with finish reason, usage stats, and any warnings
1098
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
1099
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
1100
+ */
1101
+ async doGenerate(options) {
1102
+ const converted = this.getArgs(options);
1103
+ const {
1104
+ systemMessage,
1105
+ messages,
1106
+ warnings,
1107
+ promptOptions,
1108
+ expectedInputs,
1109
+ functionTools
1110
+ } = converted;
1111
+ const session = await this.getSession(void 0, expectedInputs, void 0);
1112
+ const systemPrompt = await buildJsonToolSystemPrompt(
1113
+ systemMessage,
1114
+ functionTools,
1115
+ {
1116
+ allowParallelToolCalls: false
1117
+ }
1118
+ );
1119
+ const promptMessages = prependSystemPromptToMessages(
1120
+ messages,
1121
+ systemPrompt
1122
+ );
1123
+ const rawResponse = await session.prompt(promptMessages, promptOptions);
1124
+ const { toolCalls, textContent } = parseJsonFunctionCalls(rawResponse);
1125
+ if (toolCalls.length > 0) {
1126
+ const toolCallsToEmit = toolCalls.slice(0, 1);
1127
+ const parts = [];
1128
+ if (textContent) {
1129
+ parts.push({
1130
+ type: "text",
1131
+ text: textContent
1132
+ });
1133
+ }
1134
+ for (const call of toolCallsToEmit) {
1135
+ parts.push({
1136
+ type: "tool-call",
1137
+ toolCallId: call.toolCallId,
1138
+ toolName: call.toolName,
1139
+ input: JSON.stringify(call.args ?? {})
1140
+ });
1141
+ }
1142
+ return {
1143
+ content: parts,
1144
+ finishReason: "tool-calls",
1145
+ usage: {
1146
+ inputTokens: void 0,
1147
+ outputTokens: void 0,
1148
+ totalTokens: void 0
1149
+ },
1150
+ request: { body: { messages: promptMessages, options: promptOptions } },
1151
+ warnings
1152
+ };
1153
+ }
1154
+ const content = [
1155
+ {
1156
+ type: "text",
1157
+ text: textContent || rawResponse
1158
+ }
1159
+ ];
1160
+ return {
1161
+ content,
1162
+ finishReason: "stop",
1163
+ usage: {
1164
+ inputTokens: void 0,
1165
+ outputTokens: void 0,
1166
+ totalTokens: void 0
1167
+ },
1168
+ request: { body: { messages: promptMessages, options: promptOptions } },
1169
+ warnings
1170
+ };
1171
+ }
1172
+ /**
1173
+ * Check the availability of the built-in AI model
1174
+ * @returns Promise resolving to "unavailable", "available", or "available-after-download"
1175
+ */
1176
+ async availability() {
1177
+ return this.sessionManager.checkAvailability();
1178
+ }
1179
+ /**
1180
+ * Creates a session with download progress monitoring.
1181
+ *
1182
+ * @example
1183
+ * ```typescript
1184
+ * const session = await model.createSessionWithProgress(
1185
+ * (progress) => {
1186
+ * console.log(`Download progress: ${Math.round(progress * 100)}%`);
1187
+ * }
1188
+ * );
1189
+ * ```
1190
+ *
1191
+ * @param onDownloadProgress Optional callback receiving progress values 0-1 during model download
1192
+ * @returns Promise resolving to a configured LanguageModel session
1193
+ * @throws {LoadSettingError} When the Prompt API is not available or model is unavailable
1194
+ */
1195
+ async createSessionWithProgress(onDownloadProgress) {
1196
+ return this.sessionManager.createSessionWithProgress(onDownloadProgress);
1197
+ }
1198
+ /**
1199
+ * Generates a streaming text response using the browser's built-in Prompt API
1200
+ * @param options
1201
+ * @returns Promise resolving to a readable stream of text chunks and request metadata
1202
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
1203
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
1204
+ */
1205
+ async doStream(options) {
1206
+ const converted = this.getArgs(options);
1207
+ const {
1208
+ systemMessage,
1209
+ messages,
1210
+ warnings,
1211
+ promptOptions,
1212
+ expectedInputs,
1213
+ functionTools
1214
+ } = converted;
1215
+ const session = await this.getSession(void 0, expectedInputs, void 0);
1216
+ const systemPrompt = await buildJsonToolSystemPrompt(
1217
+ systemMessage,
1218
+ functionTools,
1219
+ {
1220
+ allowParallelToolCalls: false
1221
+ }
1222
+ );
1223
+ const promptMessages = prependSystemPromptToMessages(
1224
+ messages,
1225
+ systemPrompt
1226
+ );
1227
+ const streamOptions = {
1228
+ ...promptOptions,
1229
+ signal: options.abortSignal
1230
+ };
1231
+ const conversationHistory = [...promptMessages];
1232
+ const textId = "text-0";
1233
+ const stream = new ReadableStream({
1234
+ start: async (controller) => {
1235
+ controller.enqueue({
1236
+ type: "stream-start",
1237
+ warnings
1238
+ });
1239
+ let textStarted = false;
1240
+ let finished = false;
1241
+ let aborted = false;
1242
+ let currentReader = null;
1243
+ const ensureTextStart = () => {
1244
+ if (!textStarted) {
1245
+ controller.enqueue({
1246
+ type: "text-start",
1247
+ id: textId
1248
+ });
1249
+ textStarted = true;
1250
+ }
1251
+ };
1252
+ const emitTextDelta = (delta) => {
1253
+ if (!delta) return;
1254
+ ensureTextStart();
1255
+ controller.enqueue({
1256
+ type: "text-delta",
1257
+ id: textId,
1258
+ delta
1259
+ });
1260
+ };
1261
+ const emitTextEndIfNeeded = () => {
1262
+ if (!textStarted) return;
1263
+ controller.enqueue({
1264
+ type: "text-end",
1265
+ id: textId
1266
+ });
1267
+ textStarted = false;
1268
+ };
1269
+ const finishStream = (finishReason) => {
1270
+ if (finished) return;
1271
+ finished = true;
1272
+ emitTextEndIfNeeded();
1273
+ controller.enqueue({
1274
+ type: "finish",
1275
+ finishReason,
1276
+ usage: {
1277
+ inputTokens: session.inputUsage,
1278
+ outputTokens: void 0,
1279
+ totalTokens: void 0
1280
+ }
1281
+ });
1282
+ controller.close();
1283
+ };
1284
+ const abortHandler = () => {
1285
+ if (aborted) {
1286
+ return;
1287
+ }
1288
+ aborted = true;
1289
+ if (currentReader) {
1290
+ currentReader.cancel().catch(() => void 0);
1291
+ }
1292
+ finishStream("stop");
1293
+ };
1294
+ if (options.abortSignal) {
1295
+ options.abortSignal.addEventListener("abort", abortHandler);
1296
+ }
1297
+ const maxIterations = 10;
1298
+ let iteration = 0;
1299
+ try {
1300
+ const fenceDetector = new ToolCallFenceDetector();
1301
+ while (iteration < maxIterations && !aborted && !finished) {
1302
+ iteration += 1;
1303
+ const promptStream = session.promptStreaming(
1304
+ conversationHistory,
1305
+ streamOptions
1306
+ );
1307
+ currentReader = promptStream.getReader();
1308
+ let toolCalls = [];
1309
+ let toolBlockDetected = false;
1310
+ let trailingTextAfterBlock = "";
1311
+ let currentToolCallId = null;
1312
+ let toolInputStartEmitted = false;
1313
+ let accumulatedFenceContent = "";
1314
+ let streamedArgumentsLength = 0;
1315
+ let insideFence = false;
1316
+ while (!aborted) {
1317
+ const { done, value } = await currentReader.read();
1318
+ if (done) {
1319
+ break;
1320
+ }
1321
+ fenceDetector.addChunk(value);
1322
+ while (fenceDetector.hasContent()) {
1323
+ const wasInsideFence = insideFence;
1324
+ const result = fenceDetector.detectStreamingFence();
1325
+ insideFence = result.inFence;
1326
+ let madeProgress = false;
1327
+ if (!wasInsideFence && result.inFence) {
1328
+ if (result.safeContent) {
1329
+ emitTextDelta(result.safeContent);
1330
+ madeProgress = true;
1331
+ }
1332
+ currentToolCallId = `call_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
1333
+ toolInputStartEmitted = false;
1334
+ accumulatedFenceContent = "";
1335
+ streamedArgumentsLength = 0;
1336
+ insideFence = true;
1337
+ continue;
1338
+ }
1339
+ if (result.completeFence) {
1340
+ madeProgress = true;
1341
+ if (result.safeContent) {
1342
+ accumulatedFenceContent += result.safeContent;
1343
+ }
1344
+ if (toolInputStartEmitted && currentToolCallId) {
1345
+ const argsContent = extractArgumentsContent(
1346
+ accumulatedFenceContent
1347
+ );
1348
+ if (argsContent.length > streamedArgumentsLength) {
1349
+ const delta = argsContent.slice(streamedArgumentsLength);
1350
+ streamedArgumentsLength = argsContent.length;
1351
+ if (delta.length > 0) {
1352
+ controller.enqueue({
1353
+ type: "tool-input-delta",
1354
+ id: currentToolCallId,
1355
+ delta
1356
+ });
1357
+ }
1358
+ }
1359
+ }
1360
+ const parsed = parseJsonFunctionCalls(result.completeFence);
1361
+ const parsedToolCalls = parsed.toolCalls;
1362
+ const selectedToolCalls = parsedToolCalls.slice(0, 1);
1363
+ if (selectedToolCalls.length === 0) {
1364
+ toolCalls = [];
1365
+ toolBlockDetected = false;
1366
+ emitTextDelta(result.completeFence);
1367
+ if (result.textAfterFence) {
1368
+ emitTextDelta(result.textAfterFence);
1369
+ }
1370
+ currentToolCallId = null;
1371
+ toolInputStartEmitted = false;
1372
+ accumulatedFenceContent = "";
1373
+ streamedArgumentsLength = 0;
1374
+ insideFence = false;
1375
+ continue;
1376
+ }
1377
+ if (selectedToolCalls.length > 0 && currentToolCallId) {
1378
+ selectedToolCalls[0].toolCallId = currentToolCallId;
1379
+ }
1380
+ toolCalls = selectedToolCalls;
1381
+ toolBlockDetected = toolCalls.length > 0;
1382
+ for (const [index, call] of toolCalls.entries()) {
1383
+ const toolCallId = index === 0 && currentToolCallId ? currentToolCallId : call.toolCallId;
1384
+ const toolName = call.toolName;
1385
+ const argsJson = JSON.stringify(call.args ?? {});
1386
+ if (toolCallId === currentToolCallId) {
1387
+ if (!toolInputStartEmitted) {
1388
+ controller.enqueue({
1389
+ type: "tool-input-start",
1390
+ id: toolCallId,
1391
+ toolName
1392
+ });
1393
+ toolInputStartEmitted = true;
1394
+ }
1395
+ const argsContent = extractArgumentsContent(
1396
+ accumulatedFenceContent
1397
+ );
1398
+ if (argsContent.length > streamedArgumentsLength) {
1399
+ const delta = argsContent.slice(
1400
+ streamedArgumentsLength
1401
+ );
1402
+ streamedArgumentsLength = argsContent.length;
1403
+ if (delta.length > 0) {
1404
+ controller.enqueue({
1405
+ type: "tool-input-delta",
1406
+ id: toolCallId,
1407
+ delta
1408
+ });
1409
+ }
1410
+ }
1411
+ } else {
1412
+ controller.enqueue({
1413
+ type: "tool-input-start",
1414
+ id: toolCallId,
1415
+ toolName
1416
+ });
1417
+ if (argsJson.length > 0) {
1418
+ controller.enqueue({
1419
+ type: "tool-input-delta",
1420
+ id: toolCallId,
1421
+ delta: argsJson
1422
+ });
1423
+ }
1424
+ }
1425
+ controller.enqueue({
1426
+ type: "tool-input-end",
1427
+ id: toolCallId
1428
+ });
1429
+ controller.enqueue({
1430
+ type: "tool-call",
1431
+ toolCallId,
1432
+ toolName,
1433
+ input: argsJson,
1434
+ providerExecuted: false
1435
+ });
1436
+ }
1437
+ trailingTextAfterBlock += result.textAfterFence;
1438
+ madeProgress = true;
1439
+ if (toolBlockDetected && currentReader) {
1440
+ await currentReader.cancel().catch(() => void 0);
1441
+ break;
1442
+ }
1443
+ currentToolCallId = null;
1444
+ toolInputStartEmitted = false;
1445
+ accumulatedFenceContent = "";
1446
+ streamedArgumentsLength = 0;
1447
+ insideFence = false;
1448
+ continue;
1449
+ }
1450
+ if (insideFence) {
1451
+ if (result.safeContent) {
1452
+ accumulatedFenceContent += result.safeContent;
1453
+ madeProgress = true;
1454
+ const toolName = extractToolName(accumulatedFenceContent);
1455
+ if (toolName && !toolInputStartEmitted && currentToolCallId) {
1456
+ controller.enqueue({
1457
+ type: "tool-input-start",
1458
+ id: currentToolCallId,
1459
+ toolName
1460
+ });
1461
+ toolInputStartEmitted = true;
1462
+ }
1463
+ if (toolInputStartEmitted && currentToolCallId) {
1464
+ const argsContent = extractArgumentsContent(
1465
+ accumulatedFenceContent
1466
+ );
1467
+ if (argsContent.length > streamedArgumentsLength) {
1468
+ const delta = argsContent.slice(
1469
+ streamedArgumentsLength
1470
+ );
1471
+ streamedArgumentsLength = argsContent.length;
1472
+ if (delta.length > 0) {
1473
+ controller.enqueue({
1474
+ type: "tool-input-delta",
1475
+ id: currentToolCallId,
1476
+ delta
1477
+ });
1478
+ }
1479
+ }
1480
+ }
1481
+ }
1482
+ continue;
1483
+ }
1484
+ if (!insideFence && result.safeContent) {
1485
+ emitTextDelta(result.safeContent);
1486
+ madeProgress = true;
1487
+ }
1488
+ if (!madeProgress) {
1489
+ break;
1490
+ }
1491
+ }
1492
+ if (toolBlockDetected) {
1493
+ break;
1494
+ }
1495
+ }
1496
+ currentReader = null;
1497
+ if (aborted) {
1498
+ return;
1499
+ }
1500
+ if (!toolBlockDetected && fenceDetector.hasContent()) {
1501
+ emitTextDelta(fenceDetector.getBuffer());
1502
+ fenceDetector.clearBuffer();
1503
+ }
1504
+ if (!toolBlockDetected || toolCalls.length === 0) {
1505
+ finishStream("stop");
1506
+ return;
1507
+ }
1508
+ if (trailingTextAfterBlock) {
1509
+ emitTextDelta(trailingTextAfterBlock);
1510
+ }
1511
+ finishStream("tool-calls");
1512
+ return;
1513
+ }
1514
+ if (!finished && !aborted) {
1515
+ finishStream("other");
1516
+ }
1517
+ } catch (error) {
1518
+ controller.enqueue({ type: "error", error });
1519
+ controller.close();
1520
+ } finally {
1521
+ if (options.abortSignal) {
1522
+ options.abortSignal.removeEventListener("abort", abortHandler);
1523
+ }
1524
+ }
1525
+ }
1526
+ });
1527
+ return {
1528
+ stream,
1529
+ request: { body: { messages: promptMessages, options: promptOptions } }
1530
+ };
1531
+ }
1532
+ };
1533
+
1534
+ // src/built-in-ai-embedding-model.ts
1535
+ var import_tasks_text = require("@mediapipe/tasks-text");
1536
+ var BuiltInAIEmbeddingModel = class {
1537
+ constructor(settings = {}) {
1538
+ this.specificationVersion = "v2";
1539
+ this.provider = "google-mediapipe";
1540
+ this.modelId = "embedding";
1541
+ this.supportsParallelCalls = true;
1542
+ this.maxEmbeddingsPerCall = void 0;
1543
+ this.settings = {
1544
+ wasmLoaderPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.js",
1545
+ wasmBinaryPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.wasm",
1546
+ modelAssetPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/universal_sentence_encoder.tflite",
1547
+ l2Normalize: false,
1548
+ quantize: false
1549
+ };
1550
+ this.getTextEmbedder = async () => {
1551
+ return import_tasks_text.TextEmbedder.createFromOptions(
1552
+ {
1553
+ wasmBinaryPath: this.settings.wasmBinaryPath,
1554
+ wasmLoaderPath: this.settings.wasmLoaderPath
1555
+ },
1556
+ {
1557
+ baseOptions: {
1558
+ modelAssetBuffer: await this.modelAssetBuffer,
1559
+ delegate: this.settings.delegate
1560
+ },
1561
+ l2Normalize: this.settings.l2Normalize,
1562
+ quantize: this.settings.quantize
1563
+ }
1564
+ );
1565
+ };
1566
+ this.doEmbed = async (options) => {
1567
+ if (options.abortSignal?.aborted) {
1568
+ throw new Error("Operation was aborted");
1569
+ }
1570
+ const embedder = await this.textEmbedder;
1571
+ const embeddings = options.values.map((text) => {
1572
+ const embedderResult = embedder.embed(text);
1573
+ const [embedding] = embedderResult.embeddings;
1574
+ return embedding?.floatEmbedding ?? [];
1575
+ });
1576
+ return {
1577
+ embeddings,
1578
+ rawResponse: {
1579
+ model: "universal_sentence_encoder",
1580
+ provider: "google-mediapipe",
1581
+ processed_texts: options.values.length
1582
+ }
1583
+ };
1584
+ };
1585
+ this.settings = { ...this.settings, ...settings };
1586
+ this.modelAssetBuffer = fetch(this.settings.modelAssetPath).then(
1587
+ (response) => response.body.getReader()
1588
+ );
1589
+ this.textEmbedder = this.getTextEmbedder();
1590
+ }
1591
+ };
1592
+
1593
+ // src/built-in-ai-provider.ts
1594
+ var import_provider3 = require("@ai-sdk/provider");
1595
+ function createBuiltInAI(options = {}) {
1596
+ const createChatModel = (modelId, settings) => {
1597
+ return new BuiltInAIChatLanguageModel(modelId, settings);
1598
+ };
1599
+ const createEmbeddingModel = (modelId, settings) => {
1600
+ return new BuiltInAIEmbeddingModel(settings);
1601
+ };
1602
+ const provider = function(modelId = "text", settings) {
1603
+ if (new.target) {
1604
+ throw new Error(
1605
+ "The BuiltInAI model function cannot be called with the new keyword."
1606
+ );
1607
+ }
1608
+ return createChatModel(modelId, settings);
1609
+ };
1610
+ provider.languageModel = createChatModel;
1611
+ provider.chat = createChatModel;
1612
+ provider.textEmbedding = createEmbeddingModel;
1613
+ provider.textEmbeddingModel = createEmbeddingModel;
1614
+ provider.imageModel = (modelId) => {
1615
+ throw new import_provider3.NoSuchModelError({ modelId, modelType: "imageModel" });
1616
+ };
1617
+ provider.speechModel = (modelId) => {
1618
+ throw new import_provider3.NoSuchModelError({ modelId, modelType: "speechModel" });
1619
+ };
1620
+ provider.transcriptionModel = (modelId) => {
1621
+ throw new import_provider3.NoSuchModelError({ modelId, modelType: "transcriptionModel" });
1622
+ };
1623
+ return provider;
1624
+ }
1625
+ var builtInAI = createBuiltInAI();
1626
+ // Annotate the CommonJS export names for ESM import in node:
1627
+ 0 && (module.exports = {
1628
+ BuiltInAIChatLanguageModel,
1629
+ BuiltInAIEmbeddingModel,
1630
+ builtInAI,
1631
+ createBuiltInAI,
1632
+ doesBrowserSupportBuiltInAI,
1633
+ isBuiltInAIModelAvailable
1634
+ });
1635
+ //# sourceMappingURL=index.js.map