@browser-ai/core 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,1607 @@
1
+ // src/convert-to-built-in-ai-messages.ts
2
+ import {
3
+ UnsupportedFunctionalityError
4
+ } from "@ai-sdk/provider";
5
+
6
+ // src/tool-calling/format-tool-results.ts
7
+ function buildResultPayload(result) {
8
+ const payload = {
9
+ name: result.toolName,
10
+ result: result.result ?? null,
11
+ error: Boolean(result.isError)
12
+ };
13
+ if (result.toolCallId) {
14
+ payload.id = result.toolCallId;
15
+ }
16
+ return payload;
17
+ }
18
+ function formatToolResults(results) {
19
+ if (!results || results.length === 0) {
20
+ return "";
21
+ }
22
+ const payloads = results.map(
23
+ (result) => JSON.stringify(buildResultPayload(result))
24
+ );
25
+ return `\`\`\`tool_result
26
+ ${payloads.join("\n")}
27
+ \`\`\``;
28
+ }
29
+
30
+ // src/convert-to-built-in-ai-messages.ts
31
+ function convertBase64ToUint8Array(base64) {
32
+ try {
33
+ const binaryString = atob(base64);
34
+ const bytes = new Uint8Array(binaryString.length);
35
+ for (let i = 0; i < binaryString.length; i++) {
36
+ bytes[i] = binaryString.charCodeAt(i);
37
+ }
38
+ return bytes;
39
+ } catch (error) {
40
+ throw new Error(`Failed to convert base64 to Uint8Array: ${error}`);
41
+ }
42
+ }
43
+ function convertFileData(data, mediaType) {
44
+ if (data instanceof URL) {
45
+ return data.toString();
46
+ }
47
+ if (data instanceof Uint8Array) {
48
+ return data;
49
+ }
50
+ if (typeof data === "string") {
51
+ return convertBase64ToUint8Array(data);
52
+ }
53
+ const exhaustiveCheck = data;
54
+ throw new Error(`Unexpected data type for ${mediaType}: ${exhaustiveCheck}`);
55
+ }
56
+ function normalizeToolArguments(input) {
57
+ if (input === void 0) {
58
+ return {};
59
+ }
60
+ if (typeof input === "string") {
61
+ try {
62
+ return JSON.parse(input);
63
+ } catch {
64
+ return input;
65
+ }
66
+ }
67
+ return input ?? {};
68
+ }
69
+ function formatToolCallsJson(parts) {
70
+ if (!parts.length) {
71
+ return "";
72
+ }
73
+ const payloads = parts.map((call) => {
74
+ const payload = {
75
+ name: call.toolName,
76
+ arguments: normalizeToolArguments(call.input)
77
+ };
78
+ if (call.toolCallId) {
79
+ payload.id = call.toolCallId;
80
+ }
81
+ return JSON.stringify(payload);
82
+ });
83
+ return `\`\`\`tool_call
84
+ ${payloads.join("\n")}
85
+ \`\`\``;
86
+ }
87
+ function convertToolResultOutput(output) {
88
+ switch (output.type) {
89
+ case "text":
90
+ return { value: output.value, isError: false };
91
+ case "json":
92
+ return { value: output.value, isError: false };
93
+ case "error-text":
94
+ return { value: output.value, isError: true };
95
+ case "error-json":
96
+ return { value: output.value, isError: true };
97
+ case "content":
98
+ return { value: output.value, isError: false };
99
+ default: {
100
+ const exhaustiveCheck = output;
101
+ return { value: exhaustiveCheck, isError: false };
102
+ }
103
+ }
104
+ }
105
+ function toToolResult(part) {
106
+ const { value, isError } = convertToolResultOutput(part.output);
107
+ return {
108
+ toolCallId: part.toolCallId,
109
+ toolName: part.toolName,
110
+ result: value,
111
+ isError
112
+ };
113
+ }
114
+ function convertToBuiltInAIMessages(prompt) {
115
+ const normalizedPrompt = prompt.slice();
116
+ let systemMessage;
117
+ const messages = [];
118
+ for (const message of normalizedPrompt) {
119
+ switch (message.role) {
120
+ case "system": {
121
+ systemMessage = message.content;
122
+ break;
123
+ }
124
+ case "user": {
125
+ messages.push({
126
+ role: "user",
127
+ content: message.content.map((part) => {
128
+ switch (part.type) {
129
+ case "text": {
130
+ return {
131
+ type: "text",
132
+ value: part.text
133
+ };
134
+ }
135
+ case "file": {
136
+ const { mediaType, data } = part;
137
+ if (mediaType?.startsWith("image/")) {
138
+ const convertedData = convertFileData(data, mediaType);
139
+ return {
140
+ type: "image",
141
+ value: convertedData
142
+ };
143
+ } else if (mediaType?.startsWith("audio/")) {
144
+ const convertedData = convertFileData(data, mediaType);
145
+ return {
146
+ type: "audio",
147
+ value: convertedData
148
+ };
149
+ } else {
150
+ throw new UnsupportedFunctionalityError({
151
+ functionality: `file type: ${mediaType}`
152
+ });
153
+ }
154
+ }
155
+ default: {
156
+ const exhaustiveCheck = part;
157
+ throw new UnsupportedFunctionalityError({
158
+ functionality: `content type: ${exhaustiveCheck.type ?? "unknown"}`
159
+ });
160
+ }
161
+ }
162
+ })
163
+ });
164
+ break;
165
+ }
166
+ case "assistant": {
167
+ let text = "";
168
+ const toolCallParts = [];
169
+ for (const part of message.content) {
170
+ switch (part.type) {
171
+ case "text": {
172
+ text += part.text;
173
+ break;
174
+ }
175
+ case "reasoning": {
176
+ text += part.text;
177
+ break;
178
+ }
179
+ case "tool-call": {
180
+ toolCallParts.push(part);
181
+ break;
182
+ }
183
+ case "file": {
184
+ throw new UnsupportedFunctionalityError({
185
+ functionality: "assistant file attachments"
186
+ });
187
+ }
188
+ case "tool-result": {
189
+ throw new UnsupportedFunctionalityError({
190
+ functionality: "tool-result parts in assistant messages (should be in tool messages)"
191
+ });
192
+ }
193
+ default: {
194
+ const exhaustiveCheck = part;
195
+ throw new UnsupportedFunctionalityError({
196
+ functionality: `assistant part type: ${exhaustiveCheck.type ?? "unknown"}`
197
+ });
198
+ }
199
+ }
200
+ }
201
+ const toolCallJson = formatToolCallsJson(toolCallParts);
202
+ const contentSegments = [];
203
+ if (text.trim().length > 0) {
204
+ contentSegments.push(text);
205
+ } else if (text.length > 0) {
206
+ contentSegments.push(text);
207
+ }
208
+ if (toolCallJson) {
209
+ contentSegments.push(toolCallJson);
210
+ }
211
+ const content = contentSegments.length > 0 ? contentSegments.join("\n") : "";
212
+ messages.push({
213
+ role: "assistant",
214
+ content
215
+ });
216
+ break;
217
+ }
218
+ case "tool": {
219
+ const toolParts = message.content;
220
+ const results = toolParts.map(toToolResult);
221
+ const toolResultsJson = formatToolResults(results);
222
+ messages.push({
223
+ role: "user",
224
+ content: toolResultsJson
225
+ });
226
+ break;
227
+ }
228
+ default: {
229
+ const exhaustiveCheck = message;
230
+ throw new Error(
231
+ `Unsupported role: ${exhaustiveCheck.role ?? "unknown"}`
232
+ );
233
+ }
234
+ }
235
+ }
236
+ return { systemMessage, messages };
237
+ }
238
+
239
+ // src/tool-calling/build-json-system-prompt.ts
240
+ function buildJsonToolSystemPrompt(originalSystemPrompt, tools, options) {
241
+ if (!tools || tools.length === 0) {
242
+ return originalSystemPrompt || "";
243
+ }
244
+ const parallelInstruction = "Only request one tool call at a time. Wait for tool results before asking for another tool.";
245
+ const toolSchemas = tools.map((tool) => {
246
+ const schema = getParameters(tool);
247
+ return {
248
+ name: tool.name,
249
+ description: tool.description ?? "No description provided.",
250
+ parameters: schema || { type: "object", properties: {} }
251
+ };
252
+ });
253
+ const toolsJson = JSON.stringify(toolSchemas, null, 2);
254
+ const instructionBody = `You are a helpful AI assistant with access to tools.
255
+
256
+ # Available Tools
257
+ ${toolsJson}
258
+
259
+ # Tool Calling Instructions
260
+ ${parallelInstruction}
261
+
262
+ To call a tool, output JSON in this exact format inside a \`\`\`tool_call code fence:
263
+
264
+ \`\`\`tool_call
265
+ {"name": "tool_name", "arguments": {"param1": "value1", "param2": "value2"}}
266
+ \`\`\`
267
+
268
+ Tool responses will be provided in \`\`\`tool_result fences. Each line contains JSON like:
269
+ \`\`\`tool_result
270
+ {"id": "call_123", "name": "tool_name", "result": {...}, "error": false}
271
+ \`\`\`
272
+ Use the \`result\` payload (and treat \`error\` as a boolean flag) when continuing the conversation.
273
+
274
+ Important:
275
+ - Use exact tool and parameter names from the schema above
276
+ - Arguments must be a valid JSON object matching the tool's parameters
277
+ - You can include brief reasoning before or after the tool call
278
+ - If no tool is needed, respond directly without tool_call fences`;
279
+ if (originalSystemPrompt?.trim()) {
280
+ return `${originalSystemPrompt.trim()}
281
+
282
+ ${instructionBody}`;
283
+ }
284
+ return instructionBody;
285
+ }
286
+ function getParameters(tool) {
287
+ if ("parameters" in tool) {
288
+ return tool.parameters;
289
+ }
290
+ return tool.inputSchema;
291
+ }
292
+
293
+ // src/tool-calling/parse-json-function-calls.ts
294
+ var JSON_TOOL_CALL_FENCE_REGEX = /```tool[_-]?call\s*([\s\S]*?)```/gi;
295
+ function generateToolCallId() {
296
+ return `call_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
297
+ }
298
+ function parseJsonFunctionCalls(response) {
299
+ const matches = Array.from(response.matchAll(JSON_TOOL_CALL_FENCE_REGEX));
300
+ JSON_TOOL_CALL_FENCE_REGEX.lastIndex = 0;
301
+ if (matches.length === 0) {
302
+ return { toolCalls: [], textContent: response };
303
+ }
304
+ const toolCalls = [];
305
+ let textContent = response;
306
+ for (const match of matches) {
307
+ const [fullFence, innerContent] = match;
308
+ textContent = textContent.replace(fullFence, "");
309
+ try {
310
+ const trimmed = innerContent.trim();
311
+ try {
312
+ const parsed = JSON.parse(trimmed);
313
+ const callsArray = Array.isArray(parsed) ? parsed : [parsed];
314
+ for (const call of callsArray) {
315
+ if (!call.name) continue;
316
+ toolCalls.push({
317
+ type: "tool-call",
318
+ toolCallId: call.id || generateToolCallId(),
319
+ toolName: call.name,
320
+ args: call.arguments || {}
321
+ });
322
+ }
323
+ } catch {
324
+ const lines = trimmed.split("\n").filter((line) => line.trim());
325
+ for (const line of lines) {
326
+ try {
327
+ const call = JSON.parse(line.trim());
328
+ if (!call.name) continue;
329
+ toolCalls.push({
330
+ type: "tool-call",
331
+ toolCallId: call.id || generateToolCallId(),
332
+ toolName: call.name,
333
+ args: call.arguments || {}
334
+ });
335
+ } catch {
336
+ continue;
337
+ }
338
+ }
339
+ }
340
+ } catch (error) {
341
+ console.warn("Failed to parse JSON tool call:", error);
342
+ continue;
343
+ }
344
+ }
345
+ textContent = textContent.replace(/\n{2,}/g, "\n");
346
+ return { toolCalls, textContent: textContent.trim() };
347
+ }
348
+
349
+ // src/utils/warnings.ts
350
+ function createUnsupportedSettingWarning(setting, details) {
351
+ return {
352
+ type: "unsupported-setting",
353
+ setting,
354
+ details
355
+ };
356
+ }
357
+ function createUnsupportedToolWarning(tool, details) {
358
+ return {
359
+ type: "unsupported-tool",
360
+ tool,
361
+ details
362
+ };
363
+ }
364
+ function gatherUnsupportedSettingWarnings(options) {
365
+ const warnings = [];
366
+ if (options.maxOutputTokens != null) {
367
+ warnings.push(
368
+ createUnsupportedSettingWarning(
369
+ "maxOutputTokens",
370
+ "maxOutputTokens is not supported by Prompt API"
371
+ )
372
+ );
373
+ }
374
+ if (options.stopSequences != null) {
375
+ warnings.push(
376
+ createUnsupportedSettingWarning(
377
+ "stopSequences",
378
+ "stopSequences is not supported by Prompt API"
379
+ )
380
+ );
381
+ }
382
+ if (options.topP != null) {
383
+ warnings.push(
384
+ createUnsupportedSettingWarning(
385
+ "topP",
386
+ "topP is not supported by Prompt API"
387
+ )
388
+ );
389
+ }
390
+ if (options.presencePenalty != null) {
391
+ warnings.push(
392
+ createUnsupportedSettingWarning(
393
+ "presencePenalty",
394
+ "presencePenalty is not supported by Prompt API"
395
+ )
396
+ );
397
+ }
398
+ if (options.frequencyPenalty != null) {
399
+ warnings.push(
400
+ createUnsupportedSettingWarning(
401
+ "frequencyPenalty",
402
+ "frequencyPenalty is not supported by Prompt API"
403
+ )
404
+ );
405
+ }
406
+ if (options.seed != null) {
407
+ warnings.push(
408
+ createUnsupportedSettingWarning(
409
+ "seed",
410
+ "seed is not supported by Prompt API"
411
+ )
412
+ );
413
+ }
414
+ if (options.toolChoice != null) {
415
+ warnings.push(
416
+ createUnsupportedSettingWarning(
417
+ "toolChoice",
418
+ "toolChoice is not supported by Prompt API"
419
+ )
420
+ );
421
+ }
422
+ return warnings;
423
+ }
424
+
425
+ // src/utils/prompt-utils.ts
426
+ function hasMultimodalContent(prompt) {
427
+ for (const message of prompt) {
428
+ if (message.role === "user") {
429
+ for (const part of message.content) {
430
+ if (part.type === "file") {
431
+ return true;
432
+ }
433
+ }
434
+ }
435
+ }
436
+ return false;
437
+ }
438
+ function getExpectedInputs(prompt) {
439
+ const inputs = /* @__PURE__ */ new Set();
440
+ for (const message of prompt) {
441
+ if (message.role === "user") {
442
+ for (const part of message.content) {
443
+ if (part.type === "file") {
444
+ if (part.mediaType?.startsWith("image/")) {
445
+ inputs.add("image");
446
+ } else if (part.mediaType?.startsWith("audio/")) {
447
+ inputs.add("audio");
448
+ }
449
+ }
450
+ }
451
+ }
452
+ }
453
+ return Array.from(inputs).map((type) => ({ type }));
454
+ }
455
+ function prependSystemPromptToMessages(messages, systemPrompt) {
456
+ if (!systemPrompt.trim()) {
457
+ return messages;
458
+ }
459
+ const prompts = messages.map((message) => ({ ...message }));
460
+ const firstUserIndex = prompts.findIndex(
461
+ (message) => message.role === "user"
462
+ );
463
+ if (firstUserIndex !== -1) {
464
+ const firstUserMessage = prompts[firstUserIndex];
465
+ if (Array.isArray(firstUserMessage.content)) {
466
+ const content = firstUserMessage.content.slice();
467
+ content.unshift({
468
+ type: "text",
469
+ value: `${systemPrompt}
470
+
471
+ `
472
+ });
473
+ prompts[firstUserIndex] = {
474
+ ...firstUserMessage,
475
+ content
476
+ };
477
+ } else if (typeof firstUserMessage.content === "string") {
478
+ prompts[firstUserIndex] = {
479
+ ...firstUserMessage,
480
+ content: `${systemPrompt}
481
+
482
+ ${firstUserMessage.content}`
483
+ };
484
+ }
485
+ } else {
486
+ prompts.unshift({
487
+ role: "user",
488
+ content: systemPrompt
489
+ });
490
+ }
491
+ return prompts;
492
+ }
493
+
494
+ // src/utils/tool-utils.ts
495
+ function isFunctionTool(tool) {
496
+ return tool.type === "function";
497
+ }
498
+
499
+ // src/models/session-manager.ts
500
+ import { LoadSettingError } from "@ai-sdk/provider";
501
+ var SessionManager = class {
502
+ /**
503
+ * Creates a new SessionManager
504
+ *
505
+ * @param baseOptions - Base configuration options for all sessions
506
+ */
507
+ constructor(baseOptions) {
508
+ this.session = null;
509
+ this.baseOptions = baseOptions;
510
+ }
511
+ /**
512
+ * Gets or creates a session with the specified options
513
+ *
514
+ * If a session already exists, it will be reused unless force create is needed.
515
+ *
516
+ * @param options - Optional session creation options
517
+ * @returns Promise resolving to a LanguageModel session
518
+ * @throws {LoadSettingError} When Prompt API is not available or model is unavailable
519
+ *
520
+ * @example
521
+ * ```typescript
522
+ * const session = await manager.getSession({
523
+ * systemMessage: "You are a helpful assistant",
524
+ * expectedInputs: [{ type: "image" }],
525
+ * temperature: 0.8
526
+ * });
527
+ * ```
528
+ */
529
+ async getSession(options) {
530
+ if (typeof LanguageModel === "undefined") {
531
+ throw new LoadSettingError({
532
+ message: "Prompt API is not available. This library requires Chrome or Edge browser with built-in AI capabilities."
533
+ });
534
+ }
535
+ if (this.session) {
536
+ return this.session;
537
+ }
538
+ const availability = await LanguageModel.availability();
539
+ if (availability === "unavailable") {
540
+ throw new LoadSettingError({
541
+ message: "Built-in model not available in this browser"
542
+ });
543
+ }
544
+ const sessionOptions = this.prepareSessionOptions(options);
545
+ this.session = await LanguageModel.create(sessionOptions);
546
+ return this.session;
547
+ }
548
+ /**
549
+ * Creates a session with download progress monitoring
550
+ *
551
+ * This is a convenience method for users who want explicit progress tracking.
552
+ *
553
+ * @param onDownloadProgress - Optional callback receiving progress (0-1) during download
554
+ * @returns Promise resolving to a LanguageModel session
555
+ * @throws {LoadSettingError} When Prompt API is not available or model is unavailable
556
+ *
557
+ * @example
558
+ * ```typescript
559
+ * const session = await manager.createSessionWithProgress(
560
+ * (progress) => {
561
+ * console.log(`Download: ${Math.round(progress * 100)}%`);
562
+ * }
563
+ * );
564
+ * ```
565
+ */
566
+ async createSessionWithProgress(onDownloadProgress) {
567
+ return this.getSession({ onDownloadProgress });
568
+ }
569
+ /**
570
+ * Checks the availability status of the built-in AI model
571
+ *
572
+ * @returns Promise resolving to availability status
573
+ * - "unavailable": Model is not supported
574
+ * - "downloadable": Model needs to be downloaded
575
+ * - "downloading": Model is currently downloading
576
+ * - "available": Model is ready to use
577
+ *
578
+ * @example
579
+ * ```typescript
580
+ * const status = await manager.checkAvailability();
581
+ * if (status === "downloadable") {
582
+ * console.log("Model needs to be downloaded first");
583
+ * }
584
+ * ```
585
+ */
586
+ async checkAvailability() {
587
+ if (typeof LanguageModel === "undefined") {
588
+ return "unavailable";
589
+ }
590
+ return LanguageModel.availability();
591
+ }
592
+ /**
593
+ * Gets the current session if it exists
594
+ *
595
+ * @returns The current session or null if none exists
596
+ */
597
+ getCurrentSession() {
598
+ return this.session;
599
+ }
600
+ /**
601
+ * Destroys the current session
602
+ *
603
+ * Use this when you want to force creation of a new session
604
+ * with different options on the next getSession call.
605
+ */
606
+ destroySession() {
607
+ if (this.session && typeof this.session.destroy === "function") {
608
+ this.session.destroy();
609
+ }
610
+ this.session = null;
611
+ }
612
+ /**
613
+ * Prepares merged session options from base config and request options
614
+ *
615
+ * @param options - Optional request-specific options
616
+ * @returns Merged and sanitized options ready for LanguageModel.create()
617
+ * @private
618
+ */
619
+ prepareSessionOptions(options) {
620
+ const mergedOptions = { ...this.baseOptions };
621
+ if (options) {
622
+ const {
623
+ systemMessage,
624
+ expectedInputs,
625
+ onDownloadProgress,
626
+ ...createOptions
627
+ } = options;
628
+ Object.assign(mergedOptions, createOptions);
629
+ if (systemMessage) {
630
+ mergedOptions.initialPrompts = [
631
+ { role: "system", content: systemMessage }
632
+ ];
633
+ }
634
+ if (expectedInputs && expectedInputs.length > 0) {
635
+ mergedOptions.expectedInputs = expectedInputs;
636
+ }
637
+ if (onDownloadProgress) {
638
+ mergedOptions.monitor = (m) => {
639
+ m.addEventListener("downloadprogress", (e) => {
640
+ onDownloadProgress(e.loaded);
641
+ });
642
+ };
643
+ }
644
+ }
645
+ this.sanitizeOptions(mergedOptions);
646
+ return mergedOptions;
647
+ }
648
+ /**
649
+ * Removes custom options that aren't part of LanguageModel.create API
650
+ *
651
+ * @param options - Options object to sanitize in-place
652
+ * @private
653
+ */
654
+ sanitizeOptions(options) {
655
+ }
656
+ };
657
+
658
+ // src/streaming/tool-call-detector.ts
659
+ var ToolCallFenceDetector = class {
660
+ constructor() {
661
+ this.FENCE_STARTS = ["```tool_call"];
662
+ this.FENCE_END = "```";
663
+ this.buffer = "";
664
+ // Streaming state
665
+ this.inFence = false;
666
+ this.fenceStartBuffer = "";
667
+ }
668
+ // Accumulated fence content
669
+ /**
670
+ * Adds a chunk of text to the internal buffer
671
+ *
672
+ * @param chunk - Text chunk from the stream
673
+ */
674
+ addChunk(chunk) {
675
+ this.buffer += chunk;
676
+ }
677
+ /**
678
+ * Gets the current buffer content
679
+ */
680
+ getBuffer() {
681
+ return this.buffer;
682
+ }
683
+ /**
684
+ * Clears the internal buffer
685
+ */
686
+ clearBuffer() {
687
+ this.buffer = "";
688
+ }
689
+ /**
690
+ * Detects if there's a complete fence in the buffer
691
+ *
692
+ * This method:
693
+ * 1. Searches for fence start markers
694
+ * 2. If found, looks for closing fence
695
+ * 3. Computes overlap for partial fences
696
+ * 4. Returns safe text that can be emitted
697
+ *
698
+ * @returns Detection result with fence info and safe text
699
+ */
700
+ detectFence() {
701
+ const { index: startIdx, prefix: matchedPrefix } = this.findFenceStart(
702
+ this.buffer
703
+ );
704
+ if (startIdx === -1) {
705
+ const overlap = this.computeOverlapLength(this.buffer, this.FENCE_STARTS);
706
+ const safeTextLength = this.buffer.length - overlap;
707
+ const prefixText2 = safeTextLength > 0 ? this.buffer.slice(0, safeTextLength) : "";
708
+ const remaining = overlap > 0 ? this.buffer.slice(-overlap) : "";
709
+ this.buffer = remaining;
710
+ return {
711
+ fence: null,
712
+ prefixText: prefixText2,
713
+ remainingText: "",
714
+ overlapLength: overlap
715
+ };
716
+ }
717
+ const prefixText = this.buffer.slice(0, startIdx);
718
+ this.buffer = this.buffer.slice(startIdx);
719
+ const prefixLength = matchedPrefix?.length ?? 0;
720
+ const closingIdx = this.buffer.indexOf(this.FENCE_END, prefixLength);
721
+ if (closingIdx === -1) {
722
+ return {
723
+ fence: null,
724
+ prefixText,
725
+ remainingText: "",
726
+ overlapLength: 0
727
+ };
728
+ }
729
+ const endPos = closingIdx + this.FENCE_END.length;
730
+ const fence = this.buffer.slice(0, endPos);
731
+ const remainingText = this.buffer.slice(endPos);
732
+ this.buffer = "";
733
+ return {
734
+ fence,
735
+ prefixText,
736
+ remainingText,
737
+ overlapLength: 0
738
+ };
739
+ }
740
+ /**
741
+ * Finds the first occurrence of any fence start marker
742
+ *
743
+ * @param text - Text to search in
744
+ * @returns Index of first fence start and which prefix matched
745
+ * @private
746
+ */
747
+ findFenceStart(text) {
748
+ let bestIndex = -1;
749
+ let matchedPrefix = null;
750
+ for (const prefix of this.FENCE_STARTS) {
751
+ const idx = text.indexOf(prefix);
752
+ if (idx !== -1 && (bestIndex === -1 || idx < bestIndex)) {
753
+ bestIndex = idx;
754
+ matchedPrefix = prefix;
755
+ }
756
+ }
757
+ return { index: bestIndex, prefix: matchedPrefix };
758
+ }
759
+ /**
760
+ * Computes the maximum overlap between the end of text and the start of any prefix
761
+ *
762
+ * This is crucial for streaming: if the buffer ends with "``", we can't emit it
763
+ * because the next chunk might be "`tool_call", completing a fence marker.
764
+ *
765
+ * @param text - Text to check for overlap
766
+ * @param prefixes - List of prefixes to check against
767
+ * @returns Length of the maximum overlap found
768
+ *
769
+ * @example
770
+ * ```typescript
771
+ * computeOverlapLength("hello ``", ["```tool_call"])
772
+ * // Returns: 2 (because "``" matches start of "```tool_call")
773
+ *
774
+ * computeOverlapLength("hello `", ["```tool_call"])
775
+ * // Returns: 1
776
+ *
777
+ * computeOverlapLength("hello world", ["```tool_call"])
778
+ * // Returns: 0 (no overlap)
779
+ * ```
780
+ *
781
+ * @private
782
+ */
783
+ computeOverlapLength(text, prefixes) {
784
+ let overlap = 0;
785
+ for (const prefix of prefixes) {
786
+ const maxLength = Math.min(text.length, prefix.length - 1);
787
+ for (let size = maxLength; size > 0; size -= 1) {
788
+ if (prefix.startsWith(text.slice(-size))) {
789
+ overlap = Math.max(overlap, size);
790
+ break;
791
+ }
792
+ }
793
+ }
794
+ return overlap;
795
+ }
796
+ /**
797
+ * Checks if the buffer currently contains any text
798
+ */
799
+ hasContent() {
800
+ return this.buffer.length > 0;
801
+ }
802
+ /**
803
+ * Gets the buffer size
804
+ */
805
+ getBufferSize() {
806
+ return this.buffer.length;
807
+ }
808
+ /**
809
+ * Detect and stream fence content in real-time for true incremental streaming
810
+ *
811
+ * This method is designed for streaming tool calls as they arrive:
812
+ * 1. Detects when a fence starts and transitions to "inFence" state
813
+ * 2. While inFence, emits safe content that won't conflict with fence end marker
814
+ * 3. When fence ends, returns the complete fence for parsing
815
+ *
816
+ * @returns Streaming result with current state and safe content to emit
817
+ */
818
+ detectStreamingFence() {
819
+ if (!this.inFence) {
820
+ const { index: startIdx, prefix: matchedPrefix } = this.findFenceStart(
821
+ this.buffer
822
+ );
823
+ if (startIdx === -1) {
824
+ const overlap = this.computeOverlapLength(
825
+ this.buffer,
826
+ this.FENCE_STARTS
827
+ );
828
+ const safeTextLength = this.buffer.length - overlap;
829
+ const safeContent = safeTextLength > 0 ? this.buffer.slice(0, safeTextLength) : "";
830
+ this.buffer = this.buffer.slice(safeTextLength);
831
+ return {
832
+ inFence: false,
833
+ safeContent,
834
+ completeFence: null,
835
+ textAfterFence: ""
836
+ };
837
+ }
838
+ const prefixText = this.buffer.slice(0, startIdx);
839
+ const fenceStartLength = matchedPrefix?.length ?? 0;
840
+ this.buffer = this.buffer.slice(startIdx + fenceStartLength);
841
+ if (this.buffer.startsWith("\n")) {
842
+ this.buffer = this.buffer.slice(1);
843
+ }
844
+ this.inFence = true;
845
+ this.fenceStartBuffer = "";
846
+ return {
847
+ inFence: true,
848
+ safeContent: prefixText,
849
+ // Emit any text before the fence
850
+ completeFence: null,
851
+ textAfterFence: ""
852
+ };
853
+ }
854
+ const closingIdx = this.buffer.indexOf(this.FENCE_END);
855
+ if (closingIdx === -1) {
856
+ const overlap = this.computeOverlapLength(this.buffer, [this.FENCE_END]);
857
+ const safeContentLength = this.buffer.length - overlap;
858
+ if (safeContentLength > 0) {
859
+ const safeContent = this.buffer.slice(0, safeContentLength);
860
+ this.fenceStartBuffer += safeContent;
861
+ this.buffer = this.buffer.slice(safeContentLength);
862
+ return {
863
+ inFence: true,
864
+ safeContent,
865
+ completeFence: null,
866
+ textAfterFence: ""
867
+ };
868
+ }
869
+ return {
870
+ inFence: true,
871
+ safeContent: "",
872
+ completeFence: null,
873
+ textAfterFence: ""
874
+ };
875
+ }
876
+ const fenceContent = this.buffer.slice(0, closingIdx);
877
+ this.fenceStartBuffer += fenceContent;
878
+ const completeFence = `${this.FENCE_STARTS[0]}
879
+ ${this.fenceStartBuffer}
880
+ ${this.FENCE_END}`;
881
+ const textAfterFence = this.buffer.slice(
882
+ closingIdx + this.FENCE_END.length
883
+ );
884
+ this.inFence = false;
885
+ this.fenceStartBuffer = "";
886
+ this.buffer = textAfterFence;
887
+ return {
888
+ inFence: false,
889
+ safeContent: fenceContent,
890
+ // Emit the last bit of fence content
891
+ completeFence,
892
+ textAfterFence
893
+ };
894
+ }
895
+ /**
896
+ * Check if currently inside a fence
897
+ */
898
+ isInFence() {
899
+ return this.inFence;
900
+ }
901
+ /**
902
+ * Reset streaming state
903
+ */
904
+ resetStreamingState() {
905
+ this.inFence = false;
906
+ this.fenceStartBuffer = "";
907
+ }
908
+ };
909
+
910
+ // src/built-in-ai-language-model.ts
911
+ function doesBrowserSupportBuiltInAI() {
912
+ return typeof LanguageModel !== "undefined";
913
+ }
914
+ function isBuiltInAIModelAvailable() {
915
+ return typeof LanguageModel !== "undefined";
916
+ }
917
+ function extractToolName(content) {
918
+ const jsonMatch = content.match(/\{\s*"name"\s*:\s*"([^"]+)"/);
919
+ if (jsonMatch) {
920
+ return jsonMatch[1];
921
+ }
922
+ return null;
923
+ }
924
+ function extractArgumentsContent(content) {
925
+ const match = content.match(/"arguments"\s*:\s*/);
926
+ if (!match || match.index === void 0) {
927
+ return "";
928
+ }
929
+ const startIndex = match.index + match[0].length;
930
+ let result = "";
931
+ let depth = 0;
932
+ let inString = false;
933
+ let escaped = false;
934
+ let started = false;
935
+ for (let i = startIndex; i < content.length; i++) {
936
+ const char = content[i];
937
+ result += char;
938
+ if (!started) {
939
+ if (!/\s/.test(char)) {
940
+ started = true;
941
+ if (char === "{" || char === "[") {
942
+ depth = 1;
943
+ }
944
+ }
945
+ continue;
946
+ }
947
+ if (escaped) {
948
+ escaped = false;
949
+ continue;
950
+ }
951
+ if (char === "\\") {
952
+ escaped = true;
953
+ continue;
954
+ }
955
+ if (char === '"') {
956
+ inString = !inString;
957
+ continue;
958
+ }
959
+ if (!inString) {
960
+ if (char === "{" || char === "[") {
961
+ depth += 1;
962
+ } else if (char === "}" || char === "]") {
963
+ if (depth > 0) {
964
+ depth -= 1;
965
+ if (depth === 0) {
966
+ break;
967
+ }
968
+ }
969
+ }
970
+ }
971
+ }
972
+ return result;
973
+ }
974
+ var BuiltInAIChatLanguageModel = class {
975
+ constructor(modelId, options = {}) {
976
+ this.specificationVersion = "v2";
977
+ this.provider = "browser-ai";
978
+ this.supportedUrls = {
979
+ "image/*": [/^https?:\/\/.+$/],
980
+ "audio/*": [/^https?:\/\/.+$/]
981
+ };
982
+ this.modelId = modelId;
983
+ this.config = {
984
+ provider: this.provider,
985
+ modelId,
986
+ options
987
+ };
988
+ this.sessionManager = new SessionManager(options);
989
+ }
990
+ /**
991
+ * Gets a session with the specified options
992
+ * Delegates to SessionManager for all session lifecycle management
993
+ * @private
994
+ */
995
+ async getSession(options, expectedInputs, systemMessage, onDownloadProgress) {
996
+ return this.sessionManager.getSession({
997
+ ...options,
998
+ expectedInputs,
999
+ systemMessage,
1000
+ onDownloadProgress
1001
+ });
1002
+ }
1003
+ getArgs(callOptions) {
1004
+ const {
1005
+ prompt,
1006
+ maxOutputTokens,
1007
+ temperature,
1008
+ topP,
1009
+ topK,
1010
+ frequencyPenalty,
1011
+ presencePenalty,
1012
+ stopSequences,
1013
+ responseFormat,
1014
+ seed,
1015
+ tools,
1016
+ toolChoice,
1017
+ providerOptions
1018
+ } = callOptions;
1019
+ const warnings = [];
1020
+ warnings.push(
1021
+ ...gatherUnsupportedSettingWarnings({
1022
+ maxOutputTokens,
1023
+ stopSequences,
1024
+ topP,
1025
+ presencePenalty,
1026
+ frequencyPenalty,
1027
+ seed,
1028
+ toolChoice
1029
+ })
1030
+ );
1031
+ const functionTools = (tools ?? []).filter(isFunctionTool);
1032
+ const unsupportedTools = (tools ?? []).filter(
1033
+ (tool) => !isFunctionTool(tool)
1034
+ );
1035
+ for (const tool of unsupportedTools) {
1036
+ warnings.push(
1037
+ createUnsupportedToolWarning(
1038
+ tool,
1039
+ "Only function tools are supported by the Prompt API polyfill"
1040
+ )
1041
+ );
1042
+ }
1043
+ const hasMultiModalInput = hasMultimodalContent(prompt);
1044
+ const { systemMessage, messages } = convertToBuiltInAIMessages(prompt);
1045
+ const promptOptions = {};
1046
+ if (responseFormat?.type === "json") {
1047
+ promptOptions.responseConstraint = responseFormat.schema;
1048
+ }
1049
+ if (temperature !== void 0) {
1050
+ promptOptions.temperature = temperature;
1051
+ }
1052
+ if (topK !== void 0) {
1053
+ promptOptions.topK = topK;
1054
+ }
1055
+ return {
1056
+ systemMessage,
1057
+ messages,
1058
+ warnings,
1059
+ promptOptions,
1060
+ hasMultiModalInput,
1061
+ expectedInputs: hasMultiModalInput ? getExpectedInputs(prompt) : void 0,
1062
+ functionTools
1063
+ };
1064
+ }
1065
+ /**
1066
+ * Generates a complete text response using the browser's built-in Prompt API
1067
+ * @param options
1068
+ * @returns Promise resolving to the generated content with finish reason, usage stats, and any warnings
1069
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
1070
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
1071
+ */
1072
+ async doGenerate(options) {
1073
+ const converted = this.getArgs(options);
1074
+ const {
1075
+ systemMessage,
1076
+ messages,
1077
+ warnings,
1078
+ promptOptions,
1079
+ expectedInputs,
1080
+ functionTools
1081
+ } = converted;
1082
+ const session = await this.getSession(void 0, expectedInputs, void 0);
1083
+ const systemPrompt = await buildJsonToolSystemPrompt(
1084
+ systemMessage,
1085
+ functionTools,
1086
+ {
1087
+ allowParallelToolCalls: false
1088
+ }
1089
+ );
1090
+ const promptMessages = prependSystemPromptToMessages(
1091
+ messages,
1092
+ systemPrompt
1093
+ );
1094
+ const rawResponse = await session.prompt(promptMessages, promptOptions);
1095
+ const { toolCalls, textContent } = parseJsonFunctionCalls(rawResponse);
1096
+ if (toolCalls.length > 0) {
1097
+ const toolCallsToEmit = toolCalls.slice(0, 1);
1098
+ const parts = [];
1099
+ if (textContent) {
1100
+ parts.push({
1101
+ type: "text",
1102
+ text: textContent
1103
+ });
1104
+ }
1105
+ for (const call of toolCallsToEmit) {
1106
+ parts.push({
1107
+ type: "tool-call",
1108
+ toolCallId: call.toolCallId,
1109
+ toolName: call.toolName,
1110
+ input: JSON.stringify(call.args ?? {})
1111
+ });
1112
+ }
1113
+ return {
1114
+ content: parts,
1115
+ finishReason: "tool-calls",
1116
+ usage: {
1117
+ inputTokens: void 0,
1118
+ outputTokens: void 0,
1119
+ totalTokens: void 0
1120
+ },
1121
+ request: { body: { messages: promptMessages, options: promptOptions } },
1122
+ warnings
1123
+ };
1124
+ }
1125
+ const content = [
1126
+ {
1127
+ type: "text",
1128
+ text: textContent || rawResponse
1129
+ }
1130
+ ];
1131
+ return {
1132
+ content,
1133
+ finishReason: "stop",
1134
+ usage: {
1135
+ inputTokens: void 0,
1136
+ outputTokens: void 0,
1137
+ totalTokens: void 0
1138
+ },
1139
+ request: { body: { messages: promptMessages, options: promptOptions } },
1140
+ warnings
1141
+ };
1142
+ }
1143
+ /**
1144
+ * Check the availability of the built-in AI model
1145
+ * @returns Promise resolving to "unavailable", "available", or "available-after-download"
1146
+ */
1147
+ async availability() {
1148
+ return this.sessionManager.checkAvailability();
1149
+ }
1150
+ /**
1151
+ * Creates a session with download progress monitoring.
1152
+ *
1153
+ * @example
1154
+ * ```typescript
1155
+ * const session = await model.createSessionWithProgress(
1156
+ * (progress) => {
1157
+ * console.log(`Download progress: ${Math.round(progress * 100)}%`);
1158
+ * }
1159
+ * );
1160
+ * ```
1161
+ *
1162
+ * @param onDownloadProgress Optional callback receiving progress values 0-1 during model download
1163
+ * @returns Promise resolving to a configured LanguageModel session
1164
+ * @throws {LoadSettingError} When the Prompt API is not available or model is unavailable
1165
+ */
1166
+ async createSessionWithProgress(onDownloadProgress) {
1167
+ return this.sessionManager.createSessionWithProgress(onDownloadProgress);
1168
+ }
1169
+ /**
1170
+ * Generates a streaming text response using the browser's built-in Prompt API
1171
+ * @param options
1172
+ * @returns Promise resolving to a readable stream of text chunks and request metadata
1173
+ * @throws {LoadSettingError} When the Prompt API is not available or model needs to be downloaded
1174
+ * @throws {UnsupportedFunctionalityError} When unsupported features like file input are used
1175
+ */
1176
+ async doStream(options) {
1177
+ const converted = this.getArgs(options);
1178
+ const {
1179
+ systemMessage,
1180
+ messages,
1181
+ warnings,
1182
+ promptOptions,
1183
+ expectedInputs,
1184
+ functionTools
1185
+ } = converted;
1186
+ const session = await this.getSession(void 0, expectedInputs, void 0);
1187
+ const systemPrompt = await buildJsonToolSystemPrompt(
1188
+ systemMessage,
1189
+ functionTools,
1190
+ {
1191
+ allowParallelToolCalls: false
1192
+ }
1193
+ );
1194
+ const promptMessages = prependSystemPromptToMessages(
1195
+ messages,
1196
+ systemPrompt
1197
+ );
1198
+ const streamOptions = {
1199
+ ...promptOptions,
1200
+ signal: options.abortSignal
1201
+ };
1202
+ const conversationHistory = [...promptMessages];
1203
+ const textId = "text-0";
1204
+ const stream = new ReadableStream({
1205
+ start: async (controller) => {
1206
+ controller.enqueue({
1207
+ type: "stream-start",
1208
+ warnings
1209
+ });
1210
+ let textStarted = false;
1211
+ let finished = false;
1212
+ let aborted = false;
1213
+ let currentReader = null;
1214
+ const ensureTextStart = () => {
1215
+ if (!textStarted) {
1216
+ controller.enqueue({
1217
+ type: "text-start",
1218
+ id: textId
1219
+ });
1220
+ textStarted = true;
1221
+ }
1222
+ };
1223
+ const emitTextDelta = (delta) => {
1224
+ if (!delta) return;
1225
+ ensureTextStart();
1226
+ controller.enqueue({
1227
+ type: "text-delta",
1228
+ id: textId,
1229
+ delta
1230
+ });
1231
+ };
1232
+ const emitTextEndIfNeeded = () => {
1233
+ if (!textStarted) return;
1234
+ controller.enqueue({
1235
+ type: "text-end",
1236
+ id: textId
1237
+ });
1238
+ textStarted = false;
1239
+ };
1240
+ const finishStream = (finishReason) => {
1241
+ if (finished) return;
1242
+ finished = true;
1243
+ emitTextEndIfNeeded();
1244
+ controller.enqueue({
1245
+ type: "finish",
1246
+ finishReason,
1247
+ usage: {
1248
+ inputTokens: session.inputUsage,
1249
+ outputTokens: void 0,
1250
+ totalTokens: void 0
1251
+ }
1252
+ });
1253
+ controller.close();
1254
+ };
1255
+ const abortHandler = () => {
1256
+ if (aborted) {
1257
+ return;
1258
+ }
1259
+ aborted = true;
1260
+ if (currentReader) {
1261
+ currentReader.cancel().catch(() => void 0);
1262
+ }
1263
+ finishStream("stop");
1264
+ };
1265
+ if (options.abortSignal) {
1266
+ options.abortSignal.addEventListener("abort", abortHandler);
1267
+ }
1268
+ const maxIterations = 10;
1269
+ let iteration = 0;
1270
+ try {
1271
+ const fenceDetector = new ToolCallFenceDetector();
1272
+ while (iteration < maxIterations && !aborted && !finished) {
1273
+ iteration += 1;
1274
+ const promptStream = session.promptStreaming(
1275
+ conversationHistory,
1276
+ streamOptions
1277
+ );
1278
+ currentReader = promptStream.getReader();
1279
+ let toolCalls = [];
1280
+ let toolBlockDetected = false;
1281
+ let trailingTextAfterBlock = "";
1282
+ let currentToolCallId = null;
1283
+ let toolInputStartEmitted = false;
1284
+ let accumulatedFenceContent = "";
1285
+ let streamedArgumentsLength = 0;
1286
+ let insideFence = false;
1287
+ while (!aborted) {
1288
+ const { done, value } = await currentReader.read();
1289
+ if (done) {
1290
+ break;
1291
+ }
1292
+ fenceDetector.addChunk(value);
1293
+ while (fenceDetector.hasContent()) {
1294
+ const wasInsideFence = insideFence;
1295
+ const result = fenceDetector.detectStreamingFence();
1296
+ insideFence = result.inFence;
1297
+ let madeProgress = false;
1298
+ if (!wasInsideFence && result.inFence) {
1299
+ if (result.safeContent) {
1300
+ emitTextDelta(result.safeContent);
1301
+ madeProgress = true;
1302
+ }
1303
+ currentToolCallId = `call_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`;
1304
+ toolInputStartEmitted = false;
1305
+ accumulatedFenceContent = "";
1306
+ streamedArgumentsLength = 0;
1307
+ insideFence = true;
1308
+ continue;
1309
+ }
1310
+ if (result.completeFence) {
1311
+ madeProgress = true;
1312
+ if (result.safeContent) {
1313
+ accumulatedFenceContent += result.safeContent;
1314
+ }
1315
+ if (toolInputStartEmitted && currentToolCallId) {
1316
+ const argsContent = extractArgumentsContent(
1317
+ accumulatedFenceContent
1318
+ );
1319
+ if (argsContent.length > streamedArgumentsLength) {
1320
+ const delta = argsContent.slice(streamedArgumentsLength);
1321
+ streamedArgumentsLength = argsContent.length;
1322
+ if (delta.length > 0) {
1323
+ controller.enqueue({
1324
+ type: "tool-input-delta",
1325
+ id: currentToolCallId,
1326
+ delta
1327
+ });
1328
+ }
1329
+ }
1330
+ }
1331
+ const parsed = parseJsonFunctionCalls(result.completeFence);
1332
+ const parsedToolCalls = parsed.toolCalls;
1333
+ const selectedToolCalls = parsedToolCalls.slice(0, 1);
1334
+ if (selectedToolCalls.length === 0) {
1335
+ toolCalls = [];
1336
+ toolBlockDetected = false;
1337
+ emitTextDelta(result.completeFence);
1338
+ if (result.textAfterFence) {
1339
+ emitTextDelta(result.textAfterFence);
1340
+ }
1341
+ currentToolCallId = null;
1342
+ toolInputStartEmitted = false;
1343
+ accumulatedFenceContent = "";
1344
+ streamedArgumentsLength = 0;
1345
+ insideFence = false;
1346
+ continue;
1347
+ }
1348
+ if (selectedToolCalls.length > 0 && currentToolCallId) {
1349
+ selectedToolCalls[0].toolCallId = currentToolCallId;
1350
+ }
1351
+ toolCalls = selectedToolCalls;
1352
+ toolBlockDetected = toolCalls.length > 0;
1353
+ for (const [index, call] of toolCalls.entries()) {
1354
+ const toolCallId = index === 0 && currentToolCallId ? currentToolCallId : call.toolCallId;
1355
+ const toolName = call.toolName;
1356
+ const argsJson = JSON.stringify(call.args ?? {});
1357
+ if (toolCallId === currentToolCallId) {
1358
+ if (!toolInputStartEmitted) {
1359
+ controller.enqueue({
1360
+ type: "tool-input-start",
1361
+ id: toolCallId,
1362
+ toolName
1363
+ });
1364
+ toolInputStartEmitted = true;
1365
+ }
1366
+ const argsContent = extractArgumentsContent(
1367
+ accumulatedFenceContent
1368
+ );
1369
+ if (argsContent.length > streamedArgumentsLength) {
1370
+ const delta = argsContent.slice(
1371
+ streamedArgumentsLength
1372
+ );
1373
+ streamedArgumentsLength = argsContent.length;
1374
+ if (delta.length > 0) {
1375
+ controller.enqueue({
1376
+ type: "tool-input-delta",
1377
+ id: toolCallId,
1378
+ delta
1379
+ });
1380
+ }
1381
+ }
1382
+ } else {
1383
+ controller.enqueue({
1384
+ type: "tool-input-start",
1385
+ id: toolCallId,
1386
+ toolName
1387
+ });
1388
+ if (argsJson.length > 0) {
1389
+ controller.enqueue({
1390
+ type: "tool-input-delta",
1391
+ id: toolCallId,
1392
+ delta: argsJson
1393
+ });
1394
+ }
1395
+ }
1396
+ controller.enqueue({
1397
+ type: "tool-input-end",
1398
+ id: toolCallId
1399
+ });
1400
+ controller.enqueue({
1401
+ type: "tool-call",
1402
+ toolCallId,
1403
+ toolName,
1404
+ input: argsJson,
1405
+ providerExecuted: false
1406
+ });
1407
+ }
1408
+ trailingTextAfterBlock += result.textAfterFence;
1409
+ madeProgress = true;
1410
+ if (toolBlockDetected && currentReader) {
1411
+ await currentReader.cancel().catch(() => void 0);
1412
+ break;
1413
+ }
1414
+ currentToolCallId = null;
1415
+ toolInputStartEmitted = false;
1416
+ accumulatedFenceContent = "";
1417
+ streamedArgumentsLength = 0;
1418
+ insideFence = false;
1419
+ continue;
1420
+ }
1421
+ if (insideFence) {
1422
+ if (result.safeContent) {
1423
+ accumulatedFenceContent += result.safeContent;
1424
+ madeProgress = true;
1425
+ const toolName = extractToolName(accumulatedFenceContent);
1426
+ if (toolName && !toolInputStartEmitted && currentToolCallId) {
1427
+ controller.enqueue({
1428
+ type: "tool-input-start",
1429
+ id: currentToolCallId,
1430
+ toolName
1431
+ });
1432
+ toolInputStartEmitted = true;
1433
+ }
1434
+ if (toolInputStartEmitted && currentToolCallId) {
1435
+ const argsContent = extractArgumentsContent(
1436
+ accumulatedFenceContent
1437
+ );
1438
+ if (argsContent.length > streamedArgumentsLength) {
1439
+ const delta = argsContent.slice(
1440
+ streamedArgumentsLength
1441
+ );
1442
+ streamedArgumentsLength = argsContent.length;
1443
+ if (delta.length > 0) {
1444
+ controller.enqueue({
1445
+ type: "tool-input-delta",
1446
+ id: currentToolCallId,
1447
+ delta
1448
+ });
1449
+ }
1450
+ }
1451
+ }
1452
+ }
1453
+ continue;
1454
+ }
1455
+ if (!insideFence && result.safeContent) {
1456
+ emitTextDelta(result.safeContent);
1457
+ madeProgress = true;
1458
+ }
1459
+ if (!madeProgress) {
1460
+ break;
1461
+ }
1462
+ }
1463
+ if (toolBlockDetected) {
1464
+ break;
1465
+ }
1466
+ }
1467
+ currentReader = null;
1468
+ if (aborted) {
1469
+ return;
1470
+ }
1471
+ if (!toolBlockDetected && fenceDetector.hasContent()) {
1472
+ emitTextDelta(fenceDetector.getBuffer());
1473
+ fenceDetector.clearBuffer();
1474
+ }
1475
+ if (!toolBlockDetected || toolCalls.length === 0) {
1476
+ finishStream("stop");
1477
+ return;
1478
+ }
1479
+ if (trailingTextAfterBlock) {
1480
+ emitTextDelta(trailingTextAfterBlock);
1481
+ }
1482
+ finishStream("tool-calls");
1483
+ return;
1484
+ }
1485
+ if (!finished && !aborted) {
1486
+ finishStream("other");
1487
+ }
1488
+ } catch (error) {
1489
+ controller.enqueue({ type: "error", error });
1490
+ controller.close();
1491
+ } finally {
1492
+ if (options.abortSignal) {
1493
+ options.abortSignal.removeEventListener("abort", abortHandler);
1494
+ }
1495
+ }
1496
+ }
1497
+ });
1498
+ return {
1499
+ stream,
1500
+ request: { body: { messages: promptMessages, options: promptOptions } }
1501
+ };
1502
+ }
1503
+ };
1504
+
1505
+ // src/built-in-ai-embedding-model.ts
1506
+ import { TextEmbedder } from "@mediapipe/tasks-text";
1507
+ var BuiltInAIEmbeddingModel = class {
1508
+ constructor(settings = {}) {
1509
+ this.specificationVersion = "v2";
1510
+ this.provider = "google-mediapipe";
1511
+ this.modelId = "embedding";
1512
+ this.supportsParallelCalls = true;
1513
+ this.maxEmbeddingsPerCall = void 0;
1514
+ this.settings = {
1515
+ wasmLoaderPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.js",
1516
+ wasmBinaryPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/text_wasm_internal.wasm",
1517
+ modelAssetPath: "https://pub-ddcfe353995744e89b8002f16bf98575.r2.dev/universal_sentence_encoder.tflite",
1518
+ l2Normalize: false,
1519
+ quantize: false
1520
+ };
1521
+ this.getTextEmbedder = async () => {
1522
+ return TextEmbedder.createFromOptions(
1523
+ {
1524
+ wasmBinaryPath: this.settings.wasmBinaryPath,
1525
+ wasmLoaderPath: this.settings.wasmLoaderPath
1526
+ },
1527
+ {
1528
+ baseOptions: {
1529
+ modelAssetBuffer: await this.modelAssetBuffer,
1530
+ delegate: this.settings.delegate
1531
+ },
1532
+ l2Normalize: this.settings.l2Normalize,
1533
+ quantize: this.settings.quantize
1534
+ }
1535
+ );
1536
+ };
1537
+ this.doEmbed = async (options) => {
1538
+ if (options.abortSignal?.aborted) {
1539
+ throw new Error("Operation was aborted");
1540
+ }
1541
+ const embedder = await this.textEmbedder;
1542
+ const embeddings = options.values.map((text) => {
1543
+ const embedderResult = embedder.embed(text);
1544
+ const [embedding] = embedderResult.embeddings;
1545
+ return embedding?.floatEmbedding ?? [];
1546
+ });
1547
+ return {
1548
+ embeddings,
1549
+ rawResponse: {
1550
+ model: "universal_sentence_encoder",
1551
+ provider: "google-mediapipe",
1552
+ processed_texts: options.values.length
1553
+ }
1554
+ };
1555
+ };
1556
+ this.settings = { ...this.settings, ...settings };
1557
+ this.modelAssetBuffer = fetch(this.settings.modelAssetPath).then(
1558
+ (response) => response.body.getReader()
1559
+ );
1560
+ this.textEmbedder = this.getTextEmbedder();
1561
+ }
1562
+ };
1563
+
1564
+ // src/built-in-ai-provider.ts
1565
+ import {
1566
+ NoSuchModelError
1567
+ } from "@ai-sdk/provider";
1568
+ function createBuiltInAI(options = {}) {
1569
+ const createChatModel = (modelId, settings) => {
1570
+ return new BuiltInAIChatLanguageModel(modelId, settings);
1571
+ };
1572
+ const createEmbeddingModel = (modelId, settings) => {
1573
+ return new BuiltInAIEmbeddingModel(settings);
1574
+ };
1575
+ const provider = function(modelId = "text", settings) {
1576
+ if (new.target) {
1577
+ throw new Error(
1578
+ "The BuiltInAI model function cannot be called with the new keyword."
1579
+ );
1580
+ }
1581
+ return createChatModel(modelId, settings);
1582
+ };
1583
+ provider.languageModel = createChatModel;
1584
+ provider.chat = createChatModel;
1585
+ provider.textEmbedding = createEmbeddingModel;
1586
+ provider.textEmbeddingModel = createEmbeddingModel;
1587
+ provider.imageModel = (modelId) => {
1588
+ throw new NoSuchModelError({ modelId, modelType: "imageModel" });
1589
+ };
1590
+ provider.speechModel = (modelId) => {
1591
+ throw new NoSuchModelError({ modelId, modelType: "speechModel" });
1592
+ };
1593
+ provider.transcriptionModel = (modelId) => {
1594
+ throw new NoSuchModelError({ modelId, modelType: "transcriptionModel" });
1595
+ };
1596
+ return provider;
1597
+ }
1598
+ var builtInAI = createBuiltInAI();
1599
+ export {
1600
+ BuiltInAIChatLanguageModel,
1601
+ BuiltInAIEmbeddingModel,
1602
+ builtInAI,
1603
+ createBuiltInAI,
1604
+ doesBrowserSupportBuiltInAI,
1605
+ isBuiltInAIModelAvailable
1606
+ };
1607
+ //# sourceMappingURL=index.mjs.map