opencodekit 0.15.18 → 0.15.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/dist/index.js +16 -16
  2. package/dist/template/.opencode/memory/observations/2026-01-30-decision-github-copilot-claude-routing-keep-disab.md +32 -0
  3. package/dist/template/.opencode/memory/observations/2026-01-30-discovery-context-management-research-critical-gap.md +14 -0
  4. package/dist/template/.opencode/memory/observations/2026-01-31-decision-copilot-auth-plugin-updated-with-baseurl.md +63 -0
  5. package/dist/template/.opencode/memory/observations/2026-01-31-learning-opencode-copilot-auth-comparison-finding.md +61 -0
  6. package/dist/template/.opencode/memory/observations/2026-01-31-learning-opencode-copilot-reasoning-architecture-.md +66 -0
  7. package/dist/template/.opencode/memory/observations/2026-01-31-warning-copilot-claude-v1-endpoint-returns-404-c.md +48 -0
  8. package/dist/template/.opencode/memory/research/context-management-analysis.md +685 -0
  9. package/dist/template/.opencode/opencode.json +52 -156
  10. package/dist/template/.opencode/package.json +1 -1
  11. package/dist/template/.opencode/plugins/copilot-auth.ts +286 -29
  12. package/dist/template/.opencode/plugins/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts +181 -0
  13. package/dist/template/.opencode/plugins/sdk/copilot/chat/get-response-metadata.ts +15 -0
  14. package/dist/template/.opencode/plugins/sdk/copilot/chat/map-openai-compatible-finish-reason.ts +19 -0
  15. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-api-types.ts +72 -0
  16. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-chat-language-model.ts +823 -0
  17. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-chat-options.ts +30 -0
  18. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-metadata-extractor.ts +48 -0
  19. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-prepare-tools.ts +92 -0
  20. package/dist/template/.opencode/plugins/sdk/copilot/copilot-provider.ts +94 -0
  21. package/dist/template/.opencode/plugins/sdk/copilot/index.ts +5 -0
  22. package/dist/template/.opencode/plugins/sdk/copilot/openai-compatible-error.ts +30 -0
  23. package/dist/template/.opencode/skills/notebooklm/SKILL.md +272 -0
  24. package/dist/template/.opencode/skills/notebooklm/references/setup.md +353 -0
  25. package/dist/template/.opencode/tools/notebooklm.ts +488 -0
  26. package/package.json +1 -1
@@ -0,0 +1,823 @@
1
+ import {
2
+ type APICallError,
3
+ InvalidResponseDataError,
4
+ type LanguageModelV2,
5
+ type LanguageModelV2CallWarning,
6
+ type LanguageModelV2Content,
7
+ type LanguageModelV2FinishReason,
8
+ type LanguageModelV2StreamPart,
9
+ type SharedV2ProviderMetadata,
10
+ } from "@ai-sdk/provider";
11
+ import {
12
+ type FetchFunction,
13
+ type ParseResult,
14
+ type ResponseHandler,
15
+ combineHeaders,
16
+ createEventSourceResponseHandler,
17
+ createJsonErrorResponseHandler,
18
+ createJsonResponseHandler,
19
+ generateId,
20
+ isParsableJson,
21
+ parseProviderOptions,
22
+ postJsonToApi,
23
+ } from "@ai-sdk/provider-utils";
24
+ import { z } from "zod";
25
+ import {
26
+ type ProviderErrorStructure,
27
+ defaultOpenAICompatibleErrorStructure,
28
+ } from "../openai-compatible-error.js";
29
+ import { convertToOpenAICompatibleChatMessages } from "./convert-to-openai-compatible-chat-messages.js";
30
+ import { getResponseMetadata } from "./get-response-metadata.js";
31
+ import { mapOpenAICompatibleFinishReason } from "./map-openai-compatible-finish-reason.js";
32
+ import {
33
+ type OpenAICompatibleChatModelId,
34
+ openaiCompatibleProviderOptions,
35
+ } from "./openai-compatible-chat-options.js";
36
+ import type { MetadataExtractor } from "./openai-compatible-metadata-extractor.js";
37
+ import { prepareTools } from "./openai-compatible-prepare-tools.js";
38
+
39
+ export type OpenAICompatibleChatConfig = {
40
+ provider: string;
41
+ headers: () => Record<string, string | undefined>;
42
+ url: (options: { modelId: string; path: string }) => string;
43
+ fetch?: FetchFunction;
44
+ includeUsage?: boolean;
45
+ errorStructure?: ProviderErrorStructure<any>;
46
+ metadataExtractor?: MetadataExtractor;
47
+
48
+ /**
49
+ * Whether the model supports structured outputs.
50
+ */
51
+ supportsStructuredOutputs?: boolean;
52
+
53
+ /**
54
+ * The supported URLs for the model.
55
+ */
56
+ supportedUrls?: () => LanguageModelV2["supportedUrls"];
57
+ };
58
+
59
+ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
60
+ readonly specificationVersion = "v2";
61
+
62
+ readonly supportsStructuredOutputs: boolean;
63
+
64
+ readonly modelId: OpenAICompatibleChatModelId;
65
+ private readonly config: OpenAICompatibleChatConfig;
66
+ private readonly failedResponseHandler: ResponseHandler<APICallError>;
67
+ private readonly chunkSchema; // type inferred via constructor
68
+
69
+ constructor(
70
+ modelId: OpenAICompatibleChatModelId,
71
+ config: OpenAICompatibleChatConfig,
72
+ ) {
73
+ this.modelId = modelId;
74
+ this.config = config;
75
+
76
+ // initialize error handling:
77
+ const errorStructure =
78
+ config.errorStructure ?? defaultOpenAICompatibleErrorStructure;
79
+ this.chunkSchema = createOpenAICompatibleChatChunkSchema(
80
+ errorStructure.errorSchema,
81
+ );
82
+ this.failedResponseHandler = createJsonErrorResponseHandler(errorStructure);
83
+
84
+ this.supportsStructuredOutputs = config.supportsStructuredOutputs ?? false;
85
+ }
86
+
87
+ get provider(): string {
88
+ return this.config.provider;
89
+ }
90
+
91
+ private get providerOptionsName(): string {
92
+ return this.config.provider.split(".")[0].trim();
93
+ }
94
+
95
+ get supportedUrls() {
96
+ return this.config.supportedUrls?.() ?? {};
97
+ }
98
+
99
+ private async getArgs({
100
+ prompt,
101
+ maxOutputTokens,
102
+ temperature,
103
+ topP,
104
+ topK,
105
+ frequencyPenalty,
106
+ presencePenalty,
107
+ providerOptions,
108
+ stopSequences,
109
+ responseFormat,
110
+ seed,
111
+ toolChoice,
112
+ tools,
113
+ }: Parameters<LanguageModelV2["doGenerate"]>[0]) {
114
+ const warnings: LanguageModelV2CallWarning[] = [];
115
+
116
+ // Parse provider options
117
+ const compatibleOptions = Object.assign(
118
+ (await parseProviderOptions({
119
+ provider: "copilot",
120
+ providerOptions,
121
+ schema: openaiCompatibleProviderOptions,
122
+ })) ?? {},
123
+ (await parseProviderOptions({
124
+ provider: this.providerOptionsName,
125
+ providerOptions,
126
+ schema: openaiCompatibleProviderOptions,
127
+ })) ?? {},
128
+ );
129
+
130
+ if (topK != null) {
131
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
132
+ }
133
+
134
+ if (
135
+ responseFormat?.type === "json" &&
136
+ responseFormat.schema != null &&
137
+ !this.supportsStructuredOutputs
138
+ ) {
139
+ warnings.push({
140
+ type: "unsupported-setting",
141
+ setting: "responseFormat",
142
+ details:
143
+ "JSON response format schema is only supported with structuredOutputs",
144
+ });
145
+ }
146
+
147
+ const {
148
+ tools: openaiTools,
149
+ toolChoice: openaiToolChoice,
150
+ toolWarnings,
151
+ } = prepareTools({
152
+ tools,
153
+ toolChoice,
154
+ });
155
+
156
+ return {
157
+ args: {
158
+ // model id:
159
+ model: this.modelId,
160
+
161
+ // model specific settings:
162
+ user: compatibleOptions.user,
163
+
164
+ // standardized settings:
165
+ max_tokens: maxOutputTokens,
166
+ temperature,
167
+ top_p: topP,
168
+ frequency_penalty: frequencyPenalty,
169
+ presence_penalty: presencePenalty,
170
+ response_format:
171
+ responseFormat?.type === "json"
172
+ ? this.supportsStructuredOutputs === true &&
173
+ responseFormat.schema != null
174
+ ? {
175
+ type: "json_schema",
176
+ json_schema: {
177
+ schema: responseFormat.schema,
178
+ name: responseFormat.name ?? "response",
179
+ description: responseFormat.description,
180
+ },
181
+ }
182
+ : { type: "json_object" }
183
+ : undefined,
184
+
185
+ stop: stopSequences,
186
+ seed,
187
+ ...Object.fromEntries(
188
+ Object.entries(
189
+ providerOptions?.[this.providerOptionsName] ?? {},
190
+ ).filter(
191
+ ([key]) =>
192
+ !Object.keys(openaiCompatibleProviderOptions.shape).includes(key),
193
+ ),
194
+ ),
195
+
196
+ reasoning_effort: compatibleOptions.reasoningEffort,
197
+ verbosity: compatibleOptions.textVerbosity,
198
+
199
+ // messages:
200
+ messages: convertToOpenAICompatibleChatMessages(prompt),
201
+
202
+ // tools:
203
+ tools: openaiTools,
204
+ tool_choice: openaiToolChoice,
205
+
206
+ // thinking_budget for Claude models on Copilot
207
+ thinking_budget: compatibleOptions.thinking_budget,
208
+ },
209
+ warnings: [...warnings, ...toolWarnings],
210
+ };
211
+ }
212
+
213
+ async doGenerate(
214
+ options: Parameters<LanguageModelV2["doGenerate"]>[0],
215
+ ): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
216
+ const { args, warnings } = await this.getArgs({ ...options });
217
+
218
+ const body = JSON.stringify(args);
219
+
220
+ const {
221
+ responseHeaders,
222
+ value: responseBody,
223
+ rawValue: rawResponse,
224
+ } = await postJsonToApi({
225
+ url: this.config.url({
226
+ path: "/chat/completions",
227
+ modelId: this.modelId,
228
+ }),
229
+ headers: combineHeaders(this.config.headers(), options.headers),
230
+ body: args,
231
+ failedResponseHandler: this.failedResponseHandler,
232
+ successfulResponseHandler: createJsonResponseHandler(
233
+ OpenAICompatibleChatResponseSchema,
234
+ ),
235
+ abortSignal: options.abortSignal,
236
+ fetch: this.config.fetch,
237
+ });
238
+
239
+ const choice = responseBody.choices[0];
240
+ const content: Array<LanguageModelV2Content> = [];
241
+
242
+ // text content:
243
+ const text = choice.message.content;
244
+ if (text != null && text.length > 0) {
245
+ content.push({ type: "text", text });
246
+ }
247
+
248
+ // reasoning content (Copilot uses reasoning_text):
249
+ const reasoning = choice.message.reasoning_text;
250
+ if (reasoning != null && reasoning.length > 0) {
251
+ content.push({
252
+ type: "reasoning",
253
+ text: reasoning,
254
+ // Include reasoning_opaque for Copilot multi-turn reasoning
255
+ providerMetadata: choice.message.reasoning_opaque
256
+ ? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
257
+ : undefined,
258
+ });
259
+ }
260
+
261
+ // tool calls:
262
+ if (choice.message.tool_calls != null) {
263
+ for (const toolCall of choice.message.tool_calls) {
264
+ content.push({
265
+ type: "tool-call",
266
+ toolCallId: toolCall.id ?? generateId(),
267
+ toolName: toolCall.function.name,
268
+ input: toolCall.function.arguments!,
269
+ });
270
+ }
271
+ }
272
+
273
+ // provider metadata:
274
+ const providerMetadata: SharedV2ProviderMetadata = {
275
+ [this.providerOptionsName]: {},
276
+ ...(await this.config.metadataExtractor?.extractMetadata?.({
277
+ parsedBody: rawResponse,
278
+ })),
279
+ };
280
+ const completionTokenDetails =
281
+ responseBody.usage?.completion_tokens_details;
282
+ if (completionTokenDetails?.accepted_prediction_tokens != null) {
283
+ providerMetadata[this.providerOptionsName].acceptedPredictionTokens =
284
+ completionTokenDetails?.accepted_prediction_tokens;
285
+ }
286
+ if (completionTokenDetails?.rejected_prediction_tokens != null) {
287
+ providerMetadata[this.providerOptionsName].rejectedPredictionTokens =
288
+ completionTokenDetails?.rejected_prediction_tokens;
289
+ }
290
+
291
+ return {
292
+ content,
293
+ finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
294
+ usage: {
295
+ inputTokens: responseBody.usage?.prompt_tokens ?? undefined,
296
+ outputTokens: responseBody.usage?.completion_tokens ?? undefined,
297
+ totalTokens: responseBody.usage?.total_tokens ?? undefined,
298
+ reasoningTokens:
299
+ responseBody.usage?.completion_tokens_details?.reasoning_tokens ??
300
+ undefined,
301
+ cachedInputTokens:
302
+ responseBody.usage?.prompt_tokens_details?.cached_tokens ?? undefined,
303
+ },
304
+ providerMetadata,
305
+ request: { body },
306
+ response: {
307
+ ...getResponseMetadata(responseBody),
308
+ headers: responseHeaders,
309
+ body: rawResponse,
310
+ },
311
+ warnings,
312
+ };
313
+ }
314
+
315
+ async doStream(
316
+ options: Parameters<LanguageModelV2["doStream"]>[0],
317
+ ): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
318
+ const { args, warnings } = await this.getArgs({ ...options });
319
+
320
+ const body = {
321
+ ...args,
322
+ stream: true,
323
+
324
+ // only include stream_options when in strict compatibility mode:
325
+ stream_options: this.config.includeUsage
326
+ ? { include_usage: true }
327
+ : undefined,
328
+ };
329
+
330
+ const metadataExtractor =
331
+ this.config.metadataExtractor?.createStreamExtractor();
332
+
333
+ const { responseHeaders, value: response } = await postJsonToApi({
334
+ url: this.config.url({
335
+ path: "/chat/completions",
336
+ modelId: this.modelId,
337
+ }),
338
+ headers: combineHeaders(this.config.headers(), options.headers),
339
+ body,
340
+ failedResponseHandler: this.failedResponseHandler,
341
+ successfulResponseHandler: createEventSourceResponseHandler(
342
+ this.chunkSchema,
343
+ ),
344
+ abortSignal: options.abortSignal,
345
+ fetch: this.config.fetch,
346
+ });
347
+
348
+ const toolCalls: Array<{
349
+ id: string;
350
+ type: "function";
351
+ function: {
352
+ name: string;
353
+ arguments: string;
354
+ };
355
+ hasFinished: boolean;
356
+ }> = [];
357
+
358
+ let finishReason: LanguageModelV2FinishReason = "unknown";
359
+ const usage: {
360
+ completionTokens: number | undefined;
361
+ completionTokensDetails: {
362
+ reasoningTokens: number | undefined;
363
+ acceptedPredictionTokens: number | undefined;
364
+ rejectedPredictionTokens: number | undefined;
365
+ };
366
+ promptTokens: number | undefined;
367
+ promptTokensDetails: {
368
+ cachedTokens: number | undefined;
369
+ };
370
+ totalTokens: number | undefined;
371
+ } = {
372
+ completionTokens: undefined,
373
+ completionTokensDetails: {
374
+ reasoningTokens: undefined,
375
+ acceptedPredictionTokens: undefined,
376
+ rejectedPredictionTokens: undefined,
377
+ },
378
+ promptTokens: undefined,
379
+ promptTokensDetails: {
380
+ cachedTokens: undefined,
381
+ },
382
+ totalTokens: undefined,
383
+ };
384
+ let isFirstChunk = true;
385
+ const providerOptionsName = this.providerOptionsName;
386
+ let isActiveReasoning = false;
387
+ let isActiveText = false;
388
+ let reasoningOpaque: string | undefined;
389
+
390
+ return {
391
+ stream: response.pipeThrough(
392
+ new TransformStream<
393
+ ParseResult<z.infer<typeof this.chunkSchema>>,
394
+ LanguageModelV2StreamPart
395
+ >({
396
+ start(controller) {
397
+ controller.enqueue({ type: "stream-start", warnings });
398
+ },
399
+
400
+ transform(chunk, controller) {
401
+ // Emit raw chunk if requested (before anything else)
402
+ if (options.includeRawChunks) {
403
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
404
+ }
405
+
406
+ // handle failed chunk parsing / validation:
407
+ if (!chunk.success) {
408
+ finishReason = "error";
409
+ controller.enqueue({ type: "error", error: chunk.error });
410
+ return;
411
+ }
412
+ const value = chunk.value;
413
+
414
+ metadataExtractor?.processChunk(chunk.rawValue);
415
+
416
+ // handle error chunks:
417
+ if ("error" in value) {
418
+ finishReason = "error";
419
+ controller.enqueue({
420
+ type: "error",
421
+ error: (value as any).error.message,
422
+ });
423
+ return;
424
+ }
425
+
426
+ if (isFirstChunk) {
427
+ isFirstChunk = false;
428
+
429
+ controller.enqueue({
430
+ type: "response-metadata",
431
+ ...getResponseMetadata(value),
432
+ });
433
+ }
434
+
435
+ if ((value as any).usage != null) {
436
+ const {
437
+ prompt_tokens,
438
+ completion_tokens,
439
+ total_tokens,
440
+ prompt_tokens_details,
441
+ completion_tokens_details,
442
+ } = (value as any).usage;
443
+
444
+ usage.promptTokens = prompt_tokens ?? undefined;
445
+ usage.completionTokens = completion_tokens ?? undefined;
446
+ usage.totalTokens = total_tokens ?? undefined;
447
+ if (completion_tokens_details?.reasoning_tokens != null) {
448
+ usage.completionTokensDetails.reasoningTokens =
449
+ completion_tokens_details?.reasoning_tokens;
450
+ }
451
+ if (
452
+ completion_tokens_details?.accepted_prediction_tokens != null
453
+ ) {
454
+ usage.completionTokensDetails.acceptedPredictionTokens =
455
+ completion_tokens_details?.accepted_prediction_tokens;
456
+ }
457
+ if (
458
+ completion_tokens_details?.rejected_prediction_tokens != null
459
+ ) {
460
+ usage.completionTokensDetails.rejectedPredictionTokens =
461
+ completion_tokens_details?.rejected_prediction_tokens;
462
+ }
463
+ if (prompt_tokens_details?.cached_tokens != null) {
464
+ usage.promptTokensDetails.cachedTokens =
465
+ prompt_tokens_details?.cached_tokens;
466
+ }
467
+ }
468
+
469
+ const choice = (value as any).choices[0];
470
+
471
+ if (choice?.finish_reason != null) {
472
+ finishReason = mapOpenAICompatibleFinishReason(
473
+ choice.finish_reason,
474
+ );
475
+ }
476
+
477
+ if (choice?.delta == null) {
478
+ return;
479
+ }
480
+
481
+ const delta = choice.delta;
482
+
483
+ // Capture reasoning_opaque for Copilot multi-turn reasoning
484
+ if (delta.reasoning_opaque) {
485
+ if (reasoningOpaque != null) {
486
+ throw new InvalidResponseDataError({
487
+ data: delta,
488
+ message:
489
+ "Multiple reasoning_opaque values received in a single response. Only one thinking part per response is supported.",
490
+ });
491
+ }
492
+ reasoningOpaque = delta.reasoning_opaque;
493
+ }
494
+
495
+ // enqueue reasoning before text deltas (Copilot uses reasoning_text):
496
+ const reasoningContent = delta.reasoning_text;
497
+ if (reasoningContent) {
498
+ if (!isActiveReasoning) {
499
+ controller.enqueue({
500
+ type: "reasoning-start",
501
+ id: "reasoning-0",
502
+ });
503
+ isActiveReasoning = true;
504
+ }
505
+
506
+ controller.enqueue({
507
+ type: "reasoning-delta",
508
+ id: "reasoning-0",
509
+ delta: reasoningContent,
510
+ });
511
+ }
512
+
513
+ if (delta.content) {
514
+ // If reasoning was active and we're starting text, end reasoning first
515
+ if (isActiveReasoning && !isActiveText) {
516
+ controller.enqueue({
517
+ type: "reasoning-end",
518
+ id: "reasoning-0",
519
+ providerMetadata: reasoningOpaque
520
+ ? { copilot: { reasoningOpaque } }
521
+ : undefined,
522
+ });
523
+ isActiveReasoning = false;
524
+ }
525
+
526
+ if (!isActiveText) {
527
+ controller.enqueue({ type: "text-start", id: "txt-0" });
528
+ isActiveText = true;
529
+ }
530
+
531
+ controller.enqueue({
532
+ type: "text-delta",
533
+ id: "txt-0",
534
+ delta: delta.content,
535
+ });
536
+ }
537
+
538
+ if (delta.tool_calls != null) {
539
+ // If reasoning was active and we're starting tool calls, end reasoning first
540
+ if (isActiveReasoning) {
541
+ controller.enqueue({
542
+ type: "reasoning-end",
543
+ id: "reasoning-0",
544
+ providerMetadata: reasoningOpaque
545
+ ? { copilot: { reasoningOpaque } }
546
+ : undefined,
547
+ });
548
+ isActiveReasoning = false;
549
+ }
550
+ for (const toolCallDelta of delta.tool_calls) {
551
+ const index = toolCallDelta.index;
552
+
553
+ if (toolCalls[index] == null) {
554
+ if (toolCallDelta.id == null) {
555
+ throw new InvalidResponseDataError({
556
+ data: toolCallDelta,
557
+ message: `Expected 'id' to be a string.`,
558
+ });
559
+ }
560
+
561
+ if (toolCallDelta.function?.name == null) {
562
+ throw new InvalidResponseDataError({
563
+ data: toolCallDelta,
564
+ message: `Expected 'function.name' to be a string.`,
565
+ });
566
+ }
567
+
568
+ controller.enqueue({
569
+ type: "tool-input-start",
570
+ id: toolCallDelta.id,
571
+ toolName: toolCallDelta.function.name,
572
+ });
573
+
574
+ toolCalls[index] = {
575
+ id: toolCallDelta.id,
576
+ type: "function",
577
+ function: {
578
+ name: toolCallDelta.function.name,
579
+ arguments: toolCallDelta.function.arguments ?? "",
580
+ },
581
+ hasFinished: false,
582
+ };
583
+
584
+ const toolCall = toolCalls[index];
585
+
586
+ if (
587
+ toolCall.function?.name != null &&
588
+ toolCall.function?.arguments != null
589
+ ) {
590
+ // send delta if the argument text has already started:
591
+ if (toolCall.function.arguments.length > 0) {
592
+ controller.enqueue({
593
+ type: "tool-input-delta",
594
+ id: toolCall.id,
595
+ delta: toolCall.function.arguments,
596
+ });
597
+ }
598
+
599
+ // check if tool call is complete
600
+ if (isParsableJson(toolCall.function.arguments)) {
601
+ controller.enqueue({
602
+ type: "tool-input-end",
603
+ id: toolCall.id,
604
+ });
605
+
606
+ controller.enqueue({
607
+ type: "tool-call",
608
+ toolCallId: toolCall.id ?? generateId(),
609
+ toolName: toolCall.function.name,
610
+ input: toolCall.function.arguments,
611
+ });
612
+ toolCall.hasFinished = true;
613
+ }
614
+ }
615
+
616
+ continue;
617
+ }
618
+
619
+ // existing tool call, merge if not finished
620
+ const toolCall = toolCalls[index];
621
+
622
+ if (toolCall.hasFinished) {
623
+ continue;
624
+ }
625
+
626
+ if (toolCallDelta.function?.arguments != null) {
627
+ toolCall.function!.arguments +=
628
+ toolCallDelta.function?.arguments ?? "";
629
+ }
630
+
631
+ // send delta
632
+ controller.enqueue({
633
+ type: "tool-input-delta",
634
+ id: toolCall.id,
635
+ delta: toolCallDelta.function.arguments ?? "",
636
+ });
637
+
638
+ // check if tool call is complete
639
+ if (
640
+ toolCall.function?.name != null &&
641
+ toolCall.function?.arguments != null &&
642
+ isParsableJson(toolCall.function.arguments)
643
+ ) {
644
+ controller.enqueue({
645
+ type: "tool-input-end",
646
+ id: toolCall.id,
647
+ });
648
+
649
+ controller.enqueue({
650
+ type: "tool-call",
651
+ toolCallId: toolCall.id ?? generateId(),
652
+ toolName: toolCall.function.name,
653
+ input: toolCall.function.arguments,
654
+ });
655
+ toolCall.hasFinished = true;
656
+ }
657
+ }
658
+ }
659
+ },
660
+
661
+ flush(controller) {
662
+ if (isActiveReasoning) {
663
+ controller.enqueue({
664
+ type: "reasoning-end",
665
+ id: "reasoning-0",
666
+ providerMetadata: reasoningOpaque
667
+ ? { copilot: { reasoningOpaque } }
668
+ : undefined,
669
+ });
670
+ }
671
+
672
+ if (isActiveText) {
673
+ controller.enqueue({ type: "text-end", id: "txt-0" });
674
+ }
675
+
676
+ // go through all tool calls and send the ones that are not finished
677
+ for (const toolCall of toolCalls.filter(
678
+ (toolCall) => !toolCall.hasFinished,
679
+ )) {
680
+ controller.enqueue({
681
+ type: "tool-input-end",
682
+ id: toolCall.id,
683
+ });
684
+
685
+ controller.enqueue({
686
+ type: "tool-call",
687
+ toolCallId: toolCall.id ?? generateId(),
688
+ toolName: toolCall.function.name,
689
+ input: toolCall.function.arguments,
690
+ });
691
+ }
692
+
693
+ const providerMetadata: SharedV2ProviderMetadata = {
694
+ [providerOptionsName]: {},
695
+ ...(reasoningOpaque ? { copilot: { reasoningOpaque } } : {}),
696
+ ...metadataExtractor?.buildMetadata(),
697
+ };
698
+ if (
699
+ usage.completionTokensDetails.acceptedPredictionTokens != null
700
+ ) {
701
+ providerMetadata[providerOptionsName].acceptedPredictionTokens =
702
+ usage.completionTokensDetails.acceptedPredictionTokens;
703
+ }
704
+ if (
705
+ usage.completionTokensDetails.rejectedPredictionTokens != null
706
+ ) {
707
+ providerMetadata[providerOptionsName].rejectedPredictionTokens =
708
+ usage.completionTokensDetails.rejectedPredictionTokens;
709
+ }
710
+
711
+ controller.enqueue({
712
+ type: "finish",
713
+ finishReason,
714
+ usage: {
715
+ inputTokens: usage.promptTokens ?? undefined,
716
+ outputTokens: usage.completionTokens ?? undefined,
717
+ totalTokens: usage.totalTokens ?? undefined,
718
+ reasoningTokens:
719
+ usage.completionTokensDetails.reasoningTokens ?? undefined,
720
+ cachedInputTokens:
721
+ usage.promptTokensDetails.cachedTokens ?? undefined,
722
+ },
723
+ providerMetadata,
724
+ });
725
+ },
726
+ }),
727
+ ),
728
+ request: { body: JSON.stringify(body) },
729
+ response: { headers: responseHeaders },
730
+ };
731
+ }
732
+ }
733
+
734
+ const openaiCompatibleTokenUsageSchema = z
735
+ .object({
736
+ prompt_tokens: z.number().nullish(),
737
+ completion_tokens: z.number().nullish(),
738
+ total_tokens: z.number().nullish(),
739
+ prompt_tokens_details: z
740
+ .object({
741
+ cached_tokens: z.number().nullish(),
742
+ })
743
+ .nullish(),
744
+ completion_tokens_details: z
745
+ .object({
746
+ reasoning_tokens: z.number().nullish(),
747
+ accepted_prediction_tokens: z.number().nullish(),
748
+ rejected_prediction_tokens: z.number().nullish(),
749
+ })
750
+ .nullish(),
751
+ })
752
+ .nullish();
753
+
754
+ // limited version of the schema, focussed on what is needed for the implementation
755
+ const OpenAICompatibleChatResponseSchema = z.object({
756
+ id: z.string().nullish(),
757
+ created: z.number().nullish(),
758
+ model: z.string().nullish(),
759
+ choices: z.array(
760
+ z.object({
761
+ message: z.object({
762
+ role: z.literal("assistant").nullish(),
763
+ content: z.string().nullish(),
764
+ // Copilot-specific reasoning fields
765
+ reasoning_text: z.string().nullish(),
766
+ reasoning_opaque: z.string().nullish(),
767
+ tool_calls: z
768
+ .array(
769
+ z.object({
770
+ id: z.string().nullish(),
771
+ function: z.object({
772
+ name: z.string(),
773
+ arguments: z.string(),
774
+ }),
775
+ }),
776
+ )
777
+ .nullish(),
778
+ }),
779
+ finish_reason: z.string().nullish(),
780
+ }),
781
+ ),
782
+ usage: openaiCompatibleTokenUsageSchema,
783
+ });
784
+
785
+ // limited version of the schema, focussed on what is needed for the implementation
786
+ const createOpenAICompatibleChatChunkSchema = <ERROR_SCHEMA extends z.ZodType>(
787
+ errorSchema: ERROR_SCHEMA,
788
+ ) =>
789
+ z.union([
790
+ z.object({
791
+ id: z.string().nullish(),
792
+ created: z.number().nullish(),
793
+ model: z.string().nullish(),
794
+ choices: z.array(
795
+ z.object({
796
+ delta: z
797
+ .object({
798
+ role: z.enum(["assistant"]).nullish(),
799
+ content: z.string().nullish(),
800
+ // Copilot-specific reasoning fields
801
+ reasoning_text: z.string().nullish(),
802
+ reasoning_opaque: z.string().nullish(),
803
+ tool_calls: z
804
+ .array(
805
+ z.object({
806
+ index: z.number(),
807
+ id: z.string().nullish(),
808
+ function: z.object({
809
+ name: z.string().nullish(),
810
+ arguments: z.string().nullish(),
811
+ }),
812
+ }),
813
+ )
814
+ .nullish(),
815
+ })
816
+ .nullish(),
817
+ finish_reason: z.string().nullish(),
818
+ }),
819
+ ),
820
+ usage: openaiCompatibleTokenUsageSchema,
821
+ }),
822
+ errorSchema,
823
+ ]);