opencodekit 0.15.19 → 0.15.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/dist/index.js +1 -1
  2. package/dist/template/.opencode/memory/observations/2026-01-30-discovery-context-management-research-critical-gap.md +14 -0
  3. package/dist/template/.opencode/memory/observations/2026-01-31-decision-copilot-auth-plugin-updated-with-baseurl.md +63 -0
  4. package/dist/template/.opencode/memory/observations/2026-01-31-learning-opencode-copilot-auth-comparison-finding.md +61 -0
  5. package/dist/template/.opencode/memory/observations/2026-01-31-learning-opencode-copilot-reasoning-architecture-.md +66 -0
  6. package/dist/template/.opencode/memory/observations/2026-01-31-warning-copilot-claude-v1-endpoint-returns-404-c.md +48 -0
  7. package/dist/template/.opencode/memory/research/context-management-analysis.md +685 -0
  8. package/dist/template/.opencode/opencode.json +52 -156
  9. package/dist/template/.opencode/package.json +1 -1
  10. package/dist/template/.opencode/plugins/copilot-auth.ts +289 -24
  11. package/dist/template/.opencode/plugins/sdk/copilot/chat/convert-to-openai-compatible-chat-messages.ts +181 -0
  12. package/dist/template/.opencode/plugins/sdk/copilot/chat/get-response-metadata.ts +15 -0
  13. package/dist/template/.opencode/plugins/sdk/copilot/chat/map-openai-compatible-finish-reason.ts +19 -0
  14. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-api-types.ts +72 -0
  15. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-chat-language-model.ts +823 -0
  16. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-chat-options.ts +30 -0
  17. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-metadata-extractor.ts +48 -0
  18. package/dist/template/.opencode/plugins/sdk/copilot/chat/openai-compatible-prepare-tools.ts +92 -0
  19. package/dist/template/.opencode/plugins/sdk/copilot/copilot-provider.ts +94 -0
  20. package/dist/template/.opencode/plugins/sdk/copilot/index.ts +5 -0
  21. package/dist/template/.opencode/plugins/sdk/copilot/openai-compatible-error.ts +30 -0
  22. package/package.json +1 -1
@@ -1,12 +1,43 @@
1
1
  /**
2
2
  * GitHub Copilot Auth Plugin
3
3
  * Simplified auth provider without token expiration checks
4
+ *
5
+ * Claude Reasoning Support:
6
+ * This plugin adds `thinking_budget` to the request body for Claude models.
7
+ * The Copilot API accepts this parameter and returns reasoning in the response.
8
+ *
9
+ * NOTE: Response parsing for reasoning_text/reasoning_opaque is handled by
10
+ * the @ai-sdk/github-copilot SDK if it supports it, or will need additional
11
+ * response transformation.
4
12
  */
5
13
 
6
14
  import type { Plugin } from "@opencode-ai/plugin";
7
15
 
8
16
  const CLIENT_ID = "Ov23li8tweQw6odWQebz";
9
17
 
18
+ // Logger function that will be set by the plugin
19
+ let log: (
20
+ level: "debug" | "info" | "warn" | "error",
21
+ message: string,
22
+ extra?: Record<string, any>,
23
+ ) => void = () => {};
24
+
25
+ /**
26
+ * Set the logger function from the plugin context
27
+ */
28
+ function setLogger(client: any) {
29
+ log = (level, message, extra) => {
30
+ client.app
31
+ .log({
32
+ service: "copilot-auth",
33
+ level,
34
+ message,
35
+ extra,
36
+ })
37
+ .catch(() => {}); // Fire and forget, don't block on logging
38
+ };
39
+ }
40
+
10
41
  // Add a small safety buffer when polling to avoid hitting the server
11
42
  // slightly too early due to clock skew / timer drift.
12
43
  const OAUTH_POLLING_SAFETY_MARGIN_MS = 3000; // 3 seconds
@@ -70,7 +101,155 @@ function calculateRetryDelay(attempt: number): number {
70
101
  return Math.round(delay);
71
102
  }
72
103
 
73
- export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
104
+ /**
105
+ * Transform a streaming SSE response to convert reasoning_text/reasoning_opaque
106
+ * to proper reasoning content parts that the AI SDK can understand.
107
+ *
108
+ * Copilot returns reasoning in these fields:
109
+ * - reasoning_text: The actual reasoning content (visible thinking)
110
+ * - reasoning_opaque: Encrypted/opaque reasoning data
111
+ *
112
+ * We convert these to OpenAI-style content parts with type "reasoning"
113
+ */
114
+ function transformStreamingResponse(
115
+ body: ReadableStream<Uint8Array>,
116
+ ): ReadableStream<Uint8Array> {
117
+ const reader = body.getReader();
118
+ const encoder = new TextEncoder();
119
+ const decoder = new TextDecoder();
120
+
121
+ let buffer = "";
122
+
123
+ return new ReadableStream({
124
+ async pull(controller) {
125
+ try {
126
+ const { done, value } = await reader.read();
127
+
128
+ if (done) {
129
+ // Process any remaining buffer
130
+ if (buffer.trim()) {
131
+ const transformed = transformSSEChunk(buffer);
132
+ if (transformed) {
133
+ controller.enqueue(encoder.encode(transformed));
134
+ }
135
+ }
136
+ controller.close();
137
+ return;
138
+ }
139
+
140
+ // Decode and add to buffer
141
+ buffer += decoder.decode(value, { stream: true });
142
+
143
+ // Process complete SSE events (separated by double newlines)
144
+ const events = buffer.split("\n\n");
145
+ buffer = events.pop() || ""; // Keep incomplete event in buffer
146
+
147
+ for (const event of events) {
148
+ if (event.trim()) {
149
+ const transformed = transformSSEChunk(event);
150
+ if (transformed) {
151
+ controller.enqueue(encoder.encode(transformed + "\n\n"));
152
+ }
153
+ }
154
+ }
155
+ } catch (error) {
156
+ controller.error(error);
157
+ }
158
+ },
159
+ cancel() {
160
+ reader.cancel();
161
+ },
162
+ });
163
+ }
164
+
165
+ /**
166
+ * Transform a single SSE chunk to include reasoning content
167
+ */
168
+ function transformSSEChunk(chunk: string): string {
169
+ // Parse the SSE event
170
+ const lines = chunk.split("\n");
171
+ let eventType = "";
172
+ let data = "";
173
+
174
+ for (const line of lines) {
175
+ if (line.startsWith("event:")) {
176
+ eventType = line.slice(6).trim();
177
+ } else if (line.startsWith("data:")) {
178
+ data = line.slice(5).trim();
179
+ }
180
+ }
181
+
182
+ // Skip non-data events or [DONE] marker
183
+ if (!data || data === "[DONE]") {
184
+ return chunk;
185
+ }
186
+
187
+ try {
188
+ const json = JSON.parse(data);
189
+
190
+ // Check if this chunk has reasoning data
191
+ if (json.choices && json.choices.length > 0) {
192
+ const choice = json.choices[0];
193
+ const delta = choice.delta || {};
194
+
195
+ // Extract reasoning fields from Copilot response
196
+ const reasoningText = delta.reasoning_text || json.reasoning_text;
197
+
198
+ if (reasoningText) {
199
+ log("debug", "Found reasoning_text in streaming response");
200
+
201
+ // The AI SDK expects "reasoning" to be a plain string, not an object
202
+ delta.reasoning = reasoningText;
203
+
204
+ // Remove the original reasoning_text field to avoid confusion
205
+ delete delta.reasoning_text;
206
+ delete json.reasoning_text;
207
+
208
+ // Rebuild the SSE event
209
+ const newData = JSON.stringify(json);
210
+ return eventType
211
+ ? `event: ${eventType}\ndata: ${newData}`
212
+ : `data: ${newData}`;
213
+ }
214
+ }
215
+ } catch {
216
+ // Not valid JSON, return as-is
217
+ }
218
+
219
+ return chunk;
220
+ }
221
+
222
+ /**
223
+ * Transform a non-streaming JSON response to include reasoning content
224
+ */
225
+ function transformNonStreamingResponse(json: any): any {
226
+ if (!json.choices || json.choices.length === 0) {
227
+ return json;
228
+ }
229
+
230
+ // Check for reasoning in the response
231
+ const reasoningText = json.reasoning_text;
232
+
233
+ if (reasoningText) {
234
+ log("debug", "Found reasoning_text in non-streaming response");
235
+
236
+ // Add reasoning as a plain string to the message
237
+ const choice = json.choices[0];
238
+ if (choice.message) {
239
+ choice.message.reasoning = reasoningText;
240
+ }
241
+
242
+ // Remove original field
243
+ delete json.reasoning_text;
244
+ }
245
+
246
+ return json;
247
+ }
248
+
249
+ export const CopilotAuthPlugin: Plugin = async ({ client: sdk }) => {
250
+ // Initialize logger with the SDK client
251
+ setLogger(sdk);
252
+
74
253
  return {
75
254
  auth: {
76
255
  provider: "github-copilot",
@@ -78,8 +257,14 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
78
257
  const info = await getAuth();
79
258
  if (!info || info.type !== "oauth") return {};
80
259
 
260
+ // Enterprise URL support for baseURL
261
+ const enterpriseUrl = (info as any).enterpriseUrl;
262
+ const baseURL = enterpriseUrl
263
+ ? `https://copilot-api.${normalizeDomain(enterpriseUrl)}`
264
+ : undefined;
265
+
81
266
  if (provider && provider.models) {
82
- for (const model of Object.values(provider.models)) {
267
+ for (const [modelId, model] of Object.entries(provider.models)) {
83
268
  model.cost = {
84
269
  input: 0,
85
270
  output: 0,
@@ -89,23 +274,16 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
89
274
  },
90
275
  };
91
276
 
92
- // TODO: re-enable once messages api has higher rate limits
93
- // Claude routing is disabled to avoid "Not Found" errors and rate limits
94
- // const base = baseURL ?? model.api.url ?? "https://api.githubcopilot.com";
95
- // const isClaude = model.id.includes("claude");
96
- // let url = base;
97
- // if (isClaude) {
98
- // if (!url.endsWith("/v1")) {
99
- // url = url.endsWith("/") ? `${url}v1` : `${url}/v1`;
100
- // }
101
- // }
102
- // model.api.url = url;
103
- // model.api.npm = isClaude ? "@ai-sdk/anthropic" : "@ai-sdk/github-copilot";
277
+ // All models use the standard github-copilot SDK
278
+ // Reasoning support for Claude models is handled via:
279
+ // 1. The fetch wrapper adds thinking_budget to request body
280
+ // 2. Response parsing extracts reasoning_text/reasoning_opaque fields
104
281
  model.api.npm = "@ai-sdk/github-copilot";
105
282
  }
106
283
  }
107
284
 
108
285
  return {
286
+ baseURL,
109
287
  apiKey: "",
110
288
  async fetch(input, init) {
111
289
  const info = await getAuth();
@@ -113,6 +291,9 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
113
291
 
114
292
  let isAgentCall = false;
115
293
  let isVisionRequest = false;
294
+ let modifiedBody: any = undefined;
295
+ let isClaudeModel = false;
296
+
116
297
  try {
117
298
  const body =
118
299
  typeof init?.body === "string"
@@ -121,6 +302,10 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
121
302
 
122
303
  const url = input.toString();
123
304
 
305
+ // Check if this is a Claude model request
306
+ const modelId = body?.model || "";
307
+ isClaudeModel = modelId.toLowerCase().includes("claude");
308
+
124
309
  // Completions API
125
310
  if (body?.messages && url.includes("completions")) {
126
311
  // Keep local logic: detect if any message is assistant/tool
@@ -132,6 +317,21 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
132
317
  Array.isArray(msg.content) &&
133
318
  msg.content.some((part: any) => part.type === "image_url"),
134
319
  );
320
+
321
+ // For Claude models, add thinking_budget to enable reasoning
322
+ // The Copilot API accepts this parameter and returns reasoning_text/reasoning_opaque
323
+ if (isClaudeModel) {
324
+ // Use configured thinking_budget from model options, or default to 10000
325
+ const thinkingBudget = body.thinking_budget || 10000;
326
+ modifiedBody = {
327
+ ...body,
328
+ thinking_budget: thinkingBudget,
329
+ };
330
+ log("info", `Adding thinking_budget for Claude model`, {
331
+ model: modelId,
332
+ thinking_budget: thinkingBudget,
333
+ });
334
+ }
135
335
  }
136
336
 
137
337
  // Responses API
@@ -189,6 +389,13 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
189
389
  delete headers["x-api-key"];
190
390
  delete headers["authorization"];
191
391
 
392
+ // Prepare the final init object with potentially modified body
393
+ const finalInit = {
394
+ ...init,
395
+ headers,
396
+ ...(modifiedBody ? { body: JSON.stringify(modifiedBody) } : {}),
397
+ };
398
+
192
399
  // Retry logic with exponential backoff for rate limiting
193
400
  let lastError: Error | undefined;
194
401
  for (
@@ -197,10 +404,7 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
197
404
  attempt++
198
405
  ) {
199
406
  try {
200
- const response = await fetch(input, {
201
- ...init,
202
- headers,
203
- });
407
+ const response = await fetch(input, finalInit);
204
408
 
205
409
  // If we get a 429, retry with backoff
206
410
  if (
@@ -208,13 +412,43 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
208
412
  attempt < RATE_LIMIT_CONFIG.maxRetries
209
413
  ) {
210
414
  const delay = calculateRetryDelay(attempt);
211
- console.log(
212
- `[Copilot] Rate limited (429), retrying in ${delay}ms... (attempt ${attempt + 1}/${RATE_LIMIT_CONFIG.maxRetries})`,
213
- );
415
+ log("warn", `Rate limited (429), retrying`, {
416
+ delay_ms: delay,
417
+ attempt: attempt + 1,
418
+ max_retries: RATE_LIMIT_CONFIG.maxRetries,
419
+ });
214
420
  await sleep(delay);
215
421
  continue;
216
422
  }
217
423
 
424
+ // For Claude models with streaming, transform the response to include reasoning
425
+ if (isClaudeModel && response.ok && response.body) {
426
+ const contentType =
427
+ response.headers.get("content-type") || "";
428
+ const isStreaming = contentType.includes("text/event-stream");
429
+
430
+ if (isStreaming) {
431
+ // Transform streaming response to convert reasoning_text to reasoning content
432
+ const transformedBody = transformStreamingResponse(
433
+ response.body,
434
+ );
435
+ return new Response(transformedBody, {
436
+ status: response.status,
437
+ statusText: response.statusText,
438
+ headers: response.headers,
439
+ });
440
+ } else {
441
+ // Non-streaming: transform JSON response
442
+ const json = await response.json();
443
+ const transformedJson = transformNonStreamingResponse(json);
444
+ return new Response(JSON.stringify(transformedJson), {
445
+ status: response.status,
446
+ statusText: response.statusText,
447
+ headers: response.headers,
448
+ });
449
+ }
450
+ }
451
+
218
452
  return response;
219
453
  } catch (error) {
220
454
  lastError = error as Error;
@@ -222,9 +456,12 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
222
456
  // Network errors might be transient, retry
223
457
  if (attempt < RATE_LIMIT_CONFIG.maxRetries) {
224
458
  const delay = calculateRetryDelay(attempt);
225
- console.log(
226
- `[Copilot] Request failed, retrying in ${delay}ms... (attempt ${attempt + 1}/${RATE_LIMIT_CONFIG.maxRetries})`,
227
- );
459
+ log("warn", `Request failed, retrying`, {
460
+ delay_ms: delay,
461
+ attempt: attempt + 1,
462
+ max_retries: RATE_LIMIT_CONFIG.maxRetries,
463
+ error: lastError.message,
464
+ });
228
465
  await sleep(delay);
229
466
  continue;
230
467
  }
@@ -408,5 +645,33 @@ export const CopilotAuthPlugin: Plugin = async ({ client: _client }) => {
408
645
  },
409
646
  ],
410
647
  },
648
+ // Hook to add custom headers for Claude reasoning support
649
+ "chat.headers": async (input: any, output: any) => {
650
+ // Only apply to GitHub Copilot provider
651
+ if (!input.model?.providerID?.includes("github-copilot")) return;
652
+
653
+ // Add Anthropic beta header for interleaved thinking (extended reasoning)
654
+ // This is required for Claude models to return thinking blocks
655
+ if (input.model?.api?.npm === "@ai-sdk/anthropic") {
656
+ output.headers["anthropic-beta"] = "interleaved-thinking-2025-05-14";
657
+ }
658
+
659
+ // Mark subagent sessions as agent-initiated (matching standard Copilot tools)
660
+ try {
661
+ const session = await sdk.session
662
+ .get({
663
+ path: {
664
+ id: input.sessionID,
665
+ },
666
+ throwOnError: true,
667
+ })
668
+ .catch(() => undefined);
669
+ if (session?.data?.parentID) {
670
+ output.headers["x-initiator"] = "agent";
671
+ }
672
+ } catch {
673
+ // Ignore errors from session lookup
674
+ }
675
+ },
411
676
  };
412
677
  };
@@ -0,0 +1,181 @@
1
+ import {
2
+ type LanguageModelV2Prompt,
3
+ type SharedV2ProviderMetadata,
4
+ UnsupportedFunctionalityError,
5
+ } from "@ai-sdk/provider";
6
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
7
+ import type { OpenAICompatibleChatPrompt } from "./openai-compatible-api-types.js";
8
+
9
+ function getOpenAIMetadata(message: {
10
+ providerOptions?: SharedV2ProviderMetadata;
11
+ }) {
12
+ return message?.providerOptions?.copilot ?? {};
13
+ }
14
+
15
+ export function convertToOpenAICompatibleChatMessages(
16
+ prompt: LanguageModelV2Prompt,
17
+ ): OpenAICompatibleChatPrompt {
18
+ const messages: OpenAICompatibleChatPrompt = [];
19
+ for (const { role, content, ...message } of prompt) {
20
+ const metadata = getOpenAIMetadata({ ...message });
21
+ switch (role) {
22
+ case "system": {
23
+ messages.push({
24
+ role: "system",
25
+ content: [
26
+ {
27
+ type: "text",
28
+ text: content,
29
+ },
30
+ ],
31
+ ...metadata,
32
+ });
33
+ break;
34
+ }
35
+
36
+ case "user": {
37
+ if (content.length === 1 && content[0].type === "text") {
38
+ messages.push({
39
+ role: "user",
40
+ content: content[0].text,
41
+ ...getOpenAIMetadata(content[0]),
42
+ });
43
+ break;
44
+ }
45
+
46
+ messages.push({
47
+ role: "user",
48
+ content: content.map((part) => {
49
+ const partMetadata = getOpenAIMetadata(part);
50
+ switch (part.type) {
51
+ case "text": {
52
+ return {
53
+ type: "text" as const,
54
+ text: part.text,
55
+ ...partMetadata,
56
+ };
57
+ }
58
+ case "file": {
59
+ if (part.mediaType.startsWith("image/")) {
60
+ const mediaType =
61
+ part.mediaType === "image/*"
62
+ ? "image/jpeg"
63
+ : part.mediaType;
64
+
65
+ return {
66
+ type: "image_url" as const,
67
+ image_url: {
68
+ url:
69
+ part.data instanceof URL
70
+ ? part.data.toString()
71
+ : `data:${mediaType};base64,${convertToBase64(part.data)}`,
72
+ },
73
+ ...partMetadata,
74
+ };
75
+ } else {
76
+ throw new UnsupportedFunctionalityError({
77
+ functionality: `file part media type ${part.mediaType}`,
78
+ });
79
+ }
80
+ }
81
+ }
82
+ }),
83
+ ...metadata,
84
+ });
85
+
86
+ break;
87
+ }
88
+
89
+ case "assistant": {
90
+ let text = "";
91
+ let reasoningText: string | undefined;
92
+ let reasoningOpaque: string | undefined;
93
+ const toolCalls: Array<{
94
+ id: string;
95
+ type: "function";
96
+ function: { name: string; arguments: string };
97
+ }> = [];
98
+
99
+ for (const part of content) {
100
+ const partMetadata = getOpenAIMetadata(part);
101
+ // Check for reasoningOpaque on any part (may be attached to text/tool-call)
102
+ const partOpaque = (
103
+ part.providerOptions as { copilot?: { reasoningOpaque?: string } }
104
+ )?.copilot?.reasoningOpaque;
105
+ if (partOpaque && !reasoningOpaque) {
106
+ reasoningOpaque = partOpaque;
107
+ }
108
+
109
+ switch (part.type) {
110
+ case "text": {
111
+ text += part.text;
112
+ break;
113
+ }
114
+ case "reasoning": {
115
+ reasoningText = part.text;
116
+ break;
117
+ }
118
+ case "tool-call": {
119
+ toolCalls.push({
120
+ id: part.toolCallId,
121
+ type: "function",
122
+ function: {
123
+ name: part.toolName,
124
+ arguments: JSON.stringify(part.input),
125
+ },
126
+ ...partMetadata,
127
+ });
128
+ break;
129
+ }
130
+ }
131
+ }
132
+
133
+ messages.push({
134
+ role: "assistant",
135
+ content: text || null,
136
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
137
+ reasoning_text: reasoningText,
138
+ reasoning_opaque: reasoningOpaque,
139
+ ...metadata,
140
+ });
141
+
142
+ break;
143
+ }
144
+
145
+ case "tool": {
146
+ for (const toolResponse of content) {
147
+ const output = toolResponse.output;
148
+
149
+ let contentValue: string;
150
+ switch (output.type) {
151
+ case "text":
152
+ case "error-text":
153
+ contentValue = output.value;
154
+ break;
155
+ case "content":
156
+ case "json":
157
+ case "error-json":
158
+ contentValue = JSON.stringify(output.value);
159
+ break;
160
+ }
161
+
162
+ const toolResponseMetadata = getOpenAIMetadata(toolResponse);
163
+ messages.push({
164
+ role: "tool",
165
+ tool_call_id: toolResponse.toolCallId,
166
+ content: contentValue,
167
+ ...toolResponseMetadata,
168
+ });
169
+ }
170
+ break;
171
+ }
172
+
173
+ default: {
174
+ const _exhaustiveCheck: never = role;
175
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
176
+ }
177
+ }
178
+ }
179
+
180
+ return messages;
181
+ }
@@ -0,0 +1,15 @@
1
+ export function getResponseMetadata({
2
+ id,
3
+ model,
4
+ created,
5
+ }: {
6
+ id?: string | undefined | null;
7
+ created?: number | undefined | null;
8
+ model?: string | undefined | null;
9
+ }) {
10
+ return {
11
+ id: id ?? undefined,
12
+ modelId: model ?? undefined,
13
+ timestamp: created != null ? new Date(created * 1000) : undefined,
14
+ };
15
+ }
@@ -0,0 +1,19 @@
1
+ import type { LanguageModelV2FinishReason } from "@ai-sdk/provider";
2
+
3
+ export function mapOpenAICompatibleFinishReason(
4
+ finishReason: string | null | undefined,
5
+ ): LanguageModelV2FinishReason {
6
+ switch (finishReason) {
7
+ case "stop":
8
+ return "stop";
9
+ case "length":
10
+ return "length";
11
+ case "content_filter":
12
+ return "content-filter";
13
+ case "function_call":
14
+ case "tool_calls":
15
+ return "tool-calls";
16
+ default:
17
+ return "unknown";
18
+ }
19
+ }
@@ -0,0 +1,72 @@
1
+ import type { JSONValue } from "@ai-sdk/provider";
2
+
3
+ export type OpenAICompatibleChatPrompt = Array<OpenAICompatibleMessage>;
4
+
5
+ export type OpenAICompatibleMessage =
6
+ | OpenAICompatibleSystemMessage
7
+ | OpenAICompatibleUserMessage
8
+ | OpenAICompatibleAssistantMessage
9
+ | OpenAICompatibleToolMessage;
10
+
11
+ // Allow for arbitrary additional properties for general purpose
12
+ // provider-metadata-specific extensibility.
13
+ type JsonRecord<T = never> = Record<
14
+ string,
15
+ JSONValue | JSONValue[] | T | T[] | undefined
16
+ >;
17
+
18
+ export interface OpenAICompatibleSystemMessage
19
+ extends JsonRecord<OpenAICompatibleSystemContentPart> {
20
+ role: "system";
21
+ content: string | Array<OpenAICompatibleSystemContentPart>;
22
+ }
23
+
24
+ export interface OpenAICompatibleSystemContentPart extends JsonRecord {
25
+ type: "text";
26
+ text: string;
27
+ }
28
+
29
+ export interface OpenAICompatibleUserMessage
30
+ extends JsonRecord<OpenAICompatibleContentPart> {
31
+ role: "user";
32
+ content: string | Array<OpenAICompatibleContentPart>;
33
+ }
34
+
35
+ export type OpenAICompatibleContentPart =
36
+ | OpenAICompatibleContentPartText
37
+ | OpenAICompatibleContentPartImage;
38
+
39
+ export interface OpenAICompatibleContentPartImage extends JsonRecord {
40
+ type: "image_url";
41
+ image_url: { url: string };
42
+ }
43
+
44
+ export interface OpenAICompatibleContentPartText extends JsonRecord {
45
+ type: "text";
46
+ text: string;
47
+ }
48
+
49
+ export interface OpenAICompatibleAssistantMessage
50
+ extends JsonRecord<OpenAICompatibleMessageToolCall> {
51
+ role: "assistant";
52
+ content?: string | null;
53
+ tool_calls?: Array<OpenAICompatibleMessageToolCall>;
54
+ // Copilot-specific reasoning fields
55
+ reasoning_text?: string;
56
+ reasoning_opaque?: string;
57
+ }
58
+
59
+ export interface OpenAICompatibleMessageToolCall extends JsonRecord {
60
+ type: "function";
61
+ id: string;
62
+ function: {
63
+ arguments: string;
64
+ name: string;
65
+ };
66
+ }
67
+
68
+ export interface OpenAICompatibleToolMessage extends JsonRecord {
69
+ role: "tool";
70
+ content: string;
71
+ tool_call_id: string;
72
+ }