@copilotkit/runtime 1.52.0 → 1.52.1-next.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/index.cjs +2 -0
  3. package/dist/index.d.cts +2 -1
  4. package/dist/index.d.mts +2 -1
  5. package/dist/index.mjs +2 -1
  6. package/dist/lib/runtime/copilot-runtime.cjs +4 -1
  7. package/dist/lib/runtime/copilot-runtime.cjs.map +1 -1
  8. package/dist/lib/runtime/copilot-runtime.d.cts.map +1 -1
  9. package/dist/lib/runtime/copilot-runtime.d.mts.map +1 -1
  10. package/dist/lib/runtime/copilot-runtime.mjs +4 -1
  11. package/dist/lib/runtime/copilot-runtime.mjs.map +1 -1
  12. package/dist/package.cjs +4 -1
  13. package/dist/package.mjs +4 -1
  14. package/dist/service-adapters/anthropic/anthropic-adapter.cjs +12 -0
  15. package/dist/service-adapters/anthropic/anthropic-adapter.cjs.map +1 -1
  16. package/dist/service-adapters/anthropic/anthropic-adapter.d.cts +2 -0
  17. package/dist/service-adapters/anthropic/anthropic-adapter.d.cts.map +1 -1
  18. package/dist/service-adapters/anthropic/anthropic-adapter.d.mts +2 -0
  19. package/dist/service-adapters/anthropic/anthropic-adapter.d.mts.map +1 -1
  20. package/dist/service-adapters/anthropic/anthropic-adapter.mjs +12 -0
  21. package/dist/service-adapters/anthropic/anthropic-adapter.mjs.map +1 -1
  22. package/dist/service-adapters/groq/groq-adapter.cjs +13 -0
  23. package/dist/service-adapters/groq/groq-adapter.cjs.map +1 -1
  24. package/dist/service-adapters/groq/groq-adapter.d.cts +2 -0
  25. package/dist/service-adapters/groq/groq-adapter.d.cts.map +1 -1
  26. package/dist/service-adapters/groq/groq-adapter.d.mts +2 -0
  27. package/dist/service-adapters/groq/groq-adapter.d.mts.map +1 -1
  28. package/dist/service-adapters/groq/groq-adapter.mjs +13 -0
  29. package/dist/service-adapters/groq/groq-adapter.mjs.map +1 -1
  30. package/dist/service-adapters/index.d.cts +1 -0
  31. package/dist/service-adapters/index.d.mts +1 -0
  32. package/dist/service-adapters/openai/openai-adapter.cjs +14 -0
  33. package/dist/service-adapters/openai/openai-adapter.cjs.map +1 -1
  34. package/dist/service-adapters/openai/openai-adapter.d.cts +2 -0
  35. package/dist/service-adapters/openai/openai-adapter.d.cts.map +1 -1
  36. package/dist/service-adapters/openai/openai-adapter.d.mts +2 -0
  37. package/dist/service-adapters/openai/openai-adapter.d.mts.map +1 -1
  38. package/dist/service-adapters/openai/openai-adapter.mjs +14 -0
  39. package/dist/service-adapters/openai/openai-adapter.mjs.map +1 -1
  40. package/dist/service-adapters/service-adapter.d.cts +8 -0
  41. package/dist/service-adapters/service-adapter.d.cts.map +1 -1
  42. package/dist/service-adapters/service-adapter.d.mts +8 -0
  43. package/dist/service-adapters/service-adapter.d.mts.map +1 -1
  44. package/dist/service-adapters/shared/index.d.mts +2 -1
  45. package/dist/service-adapters/shared/sdk-client-utils.cjs +17 -0
  46. package/dist/service-adapters/shared/sdk-client-utils.cjs.map +1 -0
  47. package/dist/service-adapters/shared/sdk-client-utils.d.cts +14 -0
  48. package/dist/service-adapters/shared/sdk-client-utils.d.cts.map +1 -0
  49. package/dist/service-adapters/shared/sdk-client-utils.d.mts +14 -0
  50. package/dist/service-adapters/shared/sdk-client-utils.d.mts.map +1 -0
  51. package/dist/service-adapters/shared/sdk-client-utils.mjs +16 -0
  52. package/dist/service-adapters/shared/sdk-client-utils.mjs.map +1 -0
  53. package/package.json +7 -4
  54. package/src/lib/runtime/copilot-runtime.ts +4 -3
  55. package/src/service-adapters/anthropic/anthropic-adapter.ts +15 -1
  56. package/src/service-adapters/groq/groq-adapter.ts +16 -1
  57. package/src/service-adapters/openai/openai-adapter.ts +17 -1
  58. package/src/service-adapters/service-adapter.ts +9 -0
  59. package/src/service-adapters/shared/index.ts +1 -0
  60. package/src/service-adapters/shared/sdk-client-utils.ts +19 -0
  61. package/tests/service-adapters/anthropic/anthropic-adapter-language-model.test.ts +101 -0
  62. package/tests/service-adapters/groq/groq-adapter-language-model.test.ts +102 -0
  63. package/tests/service-adapters/openai/openai-adapter-language-model.test.ts +122 -0
  64. package/tests/service-adapters/shared/sdk-client-utils.test.ts +36 -0
@@ -1,5 +1,6 @@
1
1
 
2
2
  import { CopilotRuntimeChatCompletionRequest, CopilotRuntimeChatCompletionResponse, CopilotServiceAdapter } from "../service-adapter.cjs";
3
+ import { LanguageModel } from "ai";
3
4
  import OpenAI from "openai";
4
5
 
5
6
  //#region src/service-adapters/openai/openai-adapter.d.ts
@@ -39,6 +40,7 @@ declare class OpenAIAdapter implements CopilotServiceAdapter {
39
40
  get openai(): OpenAI;
40
41
  get name(): string;
41
42
  constructor(params?: OpenAIAdapterParams);
43
+ getLanguageModel(): LanguageModel;
42
44
  private ensureOpenAI;
43
45
  process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
44
46
  }
@@ -1 +1 @@
1
- {"version":3,"file":"openai-adapter.d.cts","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"mappings":";;;;;UAkEiB,mBAAA;;;;;EAKf,MAAA,GAAS,MAAA;;;;EAKT,KAAA;;;;;;;;;EAUA,wBAAA;;;;;;;EAQA,cAAA;AAAA;AAAA,cAGW,aAAA,YAAyB,qBAAA;EAC7B,KAAA;EACA,QAAA;EAAA,QAEC,wBAAA;EAAA,QACA,OAAA;EAAA,QACA,cAAA;EAAA,IAEG,MAAA,CAAA,GAAU,MAAA;EAAA,IAGV,IAAA,CAAA;cAIC,MAAA,GAAS,mBAAA;EAAA,QAab,YAAA;EASF,OAAA,CACJ,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;AAAA"}
1
+ {"version":3,"file":"openai-adapter.d.cts","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"mappings":";;;;;;UAoEiB,mBAAA;;;;;EAKf,MAAA,GAAS,MAAA;;;;EAKT,KAAA;;;;;;;;;EAUA,wBAAA;;;;;;;EAQA,cAAA;AAAA;AAAA,cAGW,aAAA,YAAyB,qBAAA;EAC7B,KAAA;EACA,QAAA;EAAA,QAEC,wBAAA;EAAA,QACA,OAAA;EAAA,QACA,cAAA;EAAA,IAEG,MAAA,CAAA,GAAU,MAAA;EAAA,IAGV,IAAA,CAAA;cAIC,MAAA,GAAS,mBAAA;EAarB,gBAAA,CAAA,GAAoB,aAAA;EAAA,QAcZ,YAAA;EASF,OAAA,CACJ,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;AAAA"}
@@ -1,6 +1,7 @@
1
1
  import "reflect-metadata";
2
2
  import { CopilotRuntimeChatCompletionRequest, CopilotRuntimeChatCompletionResponse, CopilotServiceAdapter } from "../service-adapter.mjs";
3
3
  import OpenAI from "openai";
4
+ import { LanguageModel } from "ai";
4
5
 
5
6
  //#region src/service-adapters/openai/openai-adapter.d.ts
6
7
  interface OpenAIAdapterParams {
@@ -39,6 +40,7 @@ declare class OpenAIAdapter implements CopilotServiceAdapter {
39
40
  get openai(): OpenAI;
40
41
  get name(): string;
41
42
  constructor(params?: OpenAIAdapterParams);
43
+ getLanguageModel(): LanguageModel;
42
44
  private ensureOpenAI;
43
45
  process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
44
46
  }
@@ -1 +1 @@
1
- {"version":3,"file":"openai-adapter.d.mts","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"mappings":";;;;;UAkEiB,mBAAA;;;;;EAKf,MAAA,GAAS,MAAA;;;;EAKT,KAAA;;;;;;;;;EAUA,wBAAA;;;;;;;EAQA,cAAA;AAAA;AAAA,cAGW,aAAA,YAAyB,qBAAA;EAC7B,KAAA;EACA,QAAA;EAAA,QAEC,wBAAA;EAAA,QACA,OAAA;EAAA,QACA,cAAA;EAAA,IAEG,MAAA,CAAA,GAAU,MAAA;EAAA,IAGV,IAAA,CAAA;cAIC,MAAA,GAAS,mBAAA;EAAA,QAab,YAAA;EASF,OAAA,CACJ,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;AAAA"}
1
+ {"version":3,"file":"openai-adapter.d.mts","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"mappings":";;;;;;UAoEiB,mBAAA;;;;;EAKf,MAAA,GAAS,MAAA;;;;EAKT,KAAA;;;;;;;;;EAUA,wBAAA;;;;;;;EAQA,cAAA;AAAA;AAAA,cAGW,aAAA,YAAyB,qBAAA;EAC7B,KAAA;EACA,QAAA;EAAA,QAEC,wBAAA;EAAA,QACA,OAAA;EAAA,QACA,cAAA;EAAA,IAEG,MAAA,CAAA,GAAU,MAAA;EAAA,IAGV,IAAA,CAAA;cAIC,MAAA,GAAS,mBAAA;EAarB,gBAAA,CAAA,GAAoB,aAAA;EAAA,QAcZ,YAAA;EASF,OAAA,CACJ,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;AAAA"}
@@ -2,6 +2,8 @@ import "reflect-metadata";
2
2
  import { __require } from "../../_virtual/_rolldown/runtime.mjs";
3
3
  import { convertActionInputToOpenAITool, convertMessageToOpenAIMessage, limitMessagesToTokenCount } from "./utils.mjs";
4
4
  import { convertServiceAdapterError } from "../shared/error-utils.mjs";
5
+ import { getSdkClientOptions } from "../shared/sdk-client-utils.mjs";
6
+ import { createOpenAI } from "@ai-sdk/openai";
5
7
  import { randomUUID } from "@copilotkit/shared";
6
8
 
7
9
  //#region src/service-adapters/openai/openai-adapter.ts
@@ -23,6 +25,18 @@ var OpenAIAdapter = class {
23
25
  this.disableParallelToolCalls = params?.disableParallelToolCalls || false;
24
26
  this.keepSystemRole = params?.keepSystemRole ?? false;
25
27
  }
28
+ getLanguageModel() {
29
+ const openai = this.ensureOpenAI();
30
+ const options = getSdkClientOptions(openai);
31
+ return createOpenAI({
32
+ baseURL: openai.baseURL,
33
+ apiKey: openai.apiKey,
34
+ organization: openai.organization ?? void 0,
35
+ project: openai.project ?? void 0,
36
+ headers: options.defaultHeaders,
37
+ fetch: options.fetch
38
+ })(this.model);
39
+ }
26
40
  ensureOpenAI() {
27
41
  if (!this._openai) {
28
42
  const OpenAI = __require("openai").default;
@@ -1 +1 @@
1
- {"version":3,"file":"openai-adapter.mjs","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"sourcesContent":["/**\n * Copilot Runtime adapter for OpenAI.\n *\n * ## Example\n *\n * ```ts\n * import { CopilotRuntime, OpenAIAdapter } from \"@copilotkit/runtime\";\n * import OpenAI from \"openai\";\n *\n * const copilotKit = new CopilotRuntime();\n *\n * const openai = new OpenAI({\n * organization: \"<your-organization-id>\", // optional\n * apiKey: \"<your-api-key>\",\n * });\n *\n * return new OpenAIAdapter({ openai });\n * ```\n *\n * ## Example with Azure OpenAI\n *\n * ```ts\n * import { CopilotRuntime, OpenAIAdapter } from \"@copilotkit/runtime\";\n * import OpenAI from \"openai\";\n *\n * // The name of your Azure OpenAI Instance.\n * // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource\n * const instance = \"<your instance name>\";\n *\n * // Corresponds to your Model deployment within your OpenAI resource, e.g. my-gpt35-16k-deployment\n * // Navigate to the Azure OpenAI Studio to deploy a model.\n * const model = \"<your model>\";\n *\n * const apiKey = process.env[\"AZURE_OPENAI_API_KEY\"];\n * if (!apiKey) {\n * throw new Error(\"The AZURE_OPENAI_API_KEY environment variable is missing or empty.\");\n * }\n *\n * const copilotKit = new CopilotRuntime();\n *\n * const openai = new OpenAI({\n * apiKey,\n * baseURL: `https://${instance}.openai.azure.com/openai/deployments/${model}`,\n * defaultQuery: { \"api-version\": \"2024-04-01-preview\" },\n * defaultHeaders: { \"api-key\": apiKey },\n * });\n *\n * return new OpenAIAdapter({ openai });\n * ```\n */\nimport type OpenAI from \"openai\";\nimport {\n CopilotServiceAdapter,\n CopilotRuntimeChatCompletionRequest,\n CopilotRuntimeChatCompletionResponse,\n} from \"../service-adapter\";\nimport {\n convertActionInputToOpenAITool,\n convertMessageToOpenAIMessage,\n limitMessagesToTokenCount,\n} from \"./utils\";\nimport { randomUUID } from \"@copilotkit/shared\";\nimport { convertServiceAdapterError } from \"../shared\";\n\nconst DEFAULT_MODEL = \"gpt-4o\";\n\nexport interface OpenAIAdapterParams {\n /**\n * An optional OpenAI instance to use. If not provided, a new instance will be\n * created.\n */\n openai?: OpenAI;\n\n /**\n * The model to use.\n */\n model?: string;\n\n /**\n * Whether to disable parallel tool calls.\n * You can disable parallel tool calls to force the model to execute tool calls sequentially.\n * This is useful if you want to execute tool calls in a specific order so that the state changes\n * introduced by one tool call are visible to the next tool call. (i.e. new actions or readables)\n *\n * @default false\n */\n disableParallelToolCalls?: boolean;\n\n /**\n * Whether to keep the role in system messages as \"System\".\n * By default, it is converted to \"developer\", which is used by newer OpenAI models\n *\n * @default false\n */\n keepSystemRole?: boolean;\n}\n\nexport class OpenAIAdapter implements CopilotServiceAdapter {\n public model: string = DEFAULT_MODEL;\n public provider = \"openai\";\n\n private disableParallelToolCalls: boolean = false;\n private _openai: OpenAI;\n private keepSystemRole: boolean = false;\n\n public get openai(): OpenAI {\n return this._openai;\n }\n public get name() {\n return \"OpenAIAdapter\";\n }\n\n constructor(params?: OpenAIAdapterParams) {\n if (params?.openai) {\n this._openai = params.openai;\n }\n // If no instance provided, we'll lazy-load in ensureOpenAI()\n\n if (params?.model) {\n this.model = params.model;\n }\n this.disableParallelToolCalls = params?.disableParallelToolCalls || false;\n this.keepSystemRole = params?.keepSystemRole ?? false;\n }\n\n private ensureOpenAI(): OpenAI {\n if (!this._openai) {\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n const OpenAI = require(\"openai\").default;\n this._openai = new OpenAI();\n }\n return this._openai;\n }\n\n async process(\n request: CopilotRuntimeChatCompletionRequest,\n ): Promise<CopilotRuntimeChatCompletionResponse> {\n const {\n threadId: threadIdFromRequest,\n model = this.model,\n messages,\n actions,\n eventSource,\n forwardedParameters,\n } = request;\n const tools = actions.map(convertActionInputToOpenAITool);\n const threadId = threadIdFromRequest ?? randomUUID();\n\n // ALLOWLIST APPROACH: Only include tool_result messages that correspond to valid tool_calls\n // Step 1: Extract valid tool_call IDs\n const validToolUseIds = new Set<string>();\n\n for (const message of messages) {\n if (message.isActionExecutionMessage()) {\n validToolUseIds.add(message.id);\n }\n }\n\n // Step 2: Filter messages, keeping only those with valid tool_call IDs\n const filteredMessages = messages.filter((message) => {\n if (message.isResultMessage()) {\n // Skip if there's no corresponding tool_call\n if (!validToolUseIds.has(message.actionExecutionId)) {\n return false;\n }\n\n // Remove this ID from valid IDs so we don't process duplicates\n validToolUseIds.delete(message.actionExecutionId);\n return true;\n }\n\n // Keep all non-tool-result messages\n return true;\n });\n\n let openaiMessages = filteredMessages.map((m) =>\n convertMessageToOpenAIMessage(m, { keepSystemRole: this.keepSystemRole }),\n );\n openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);\n\n let toolChoice: any = forwardedParameters?.toolChoice;\n if (forwardedParameters?.toolChoice === \"function\") {\n toolChoice = {\n type: \"function\",\n function: { name: forwardedParameters.toolChoiceFunctionName },\n };\n }\n\n try {\n const openai = this.ensureOpenAI();\n const stream = openai.beta.chat.completions.stream({\n model: model,\n stream: true,\n messages: openaiMessages,\n ...(tools.length > 0 && { tools }),\n ...(forwardedParameters?.maxTokens && {\n max_completion_tokens: forwardedParameters.maxTokens,\n }),\n ...(forwardedParameters?.stop && { stop: forwardedParameters.stop }),\n ...(toolChoice && { tool_choice: toolChoice }),\n ...(this.disableParallelToolCalls && { parallel_tool_calls: false }),\n ...(forwardedParameters?.temperature && {\n temperature: forwardedParameters.temperature,\n }),\n });\n\n eventSource.stream(async (eventStream$) => {\n let mode: \"function\" | \"message\" | null = null;\n let currentMessageId: string;\n let currentToolCallId: string;\n\n try {\n for await (const chunk of stream) {\n if (chunk.choices.length === 0) {\n continue;\n }\n\n const toolCall = chunk.choices[0].delta.tool_calls?.[0];\n const content = chunk.choices[0].delta.content;\n\n // When switching from message to function or vice versa,\n // send the respective end event.\n // If toolCall?.id is defined, it means a new tool call starts.\n if (mode === \"message\" && toolCall?.id) {\n mode = null;\n eventStream$.sendTextMessageEnd({ messageId: currentMessageId });\n } else if (\n mode === \"function\" &&\n (toolCall === undefined || toolCall?.id)\n ) {\n mode = null;\n eventStream$.sendActionExecutionEnd({\n actionExecutionId: currentToolCallId,\n });\n }\n\n // If we send a new message type, send the appropriate start event.\n if (mode === null) {\n if (toolCall?.id) {\n mode = \"function\";\n currentToolCallId = toolCall!.id;\n eventStream$.sendActionExecutionStart({\n actionExecutionId: currentToolCallId,\n parentMessageId: chunk.id,\n actionName: toolCall!.function!.name,\n });\n } else if (content) {\n mode = \"message\";\n currentMessageId = chunk.id;\n eventStream$.sendTextMessageStart({\n messageId: currentMessageId,\n });\n }\n }\n\n // send the content events\n if (mode === \"message\" && content) {\n eventStream$.sendTextMessageContent({\n messageId: currentMessageId,\n content: content,\n });\n } else if (mode === \"function\" && toolCall?.function?.arguments) {\n eventStream$.sendActionExecutionArgs({\n actionExecutionId: currentToolCallId,\n args: toolCall.function.arguments,\n });\n }\n }\n\n // send the end events\n if (mode === \"message\") {\n eventStream$.sendTextMessageEnd({ messageId: currentMessageId });\n } else if (mode === \"function\") {\n eventStream$.sendActionExecutionEnd({\n actionExecutionId: currentToolCallId,\n });\n }\n } catch (error) {\n console.error(\"[OpenAI] Error during API call:\", error);\n throw convertServiceAdapterError(error, \"OpenAI\");\n }\n\n eventStream$.complete();\n });\n } catch (error) {\n console.error(\"[OpenAI] Error during API call:\", error);\n throw convertServiceAdapterError(error, \"OpenAI\");\n }\n\n return {\n threadId,\n };\n }\n}\n"],"mappings":";;;;;;;AAgEA,MAAM,gBAAgB;AAiCtB,IAAa,gBAAb,MAA4D;CAQ1D,IAAW,SAAiB;AAC1B,SAAO,KAAK;;CAEd,IAAW,OAAO;AAChB,SAAO;;CAGT,YAAY,QAA8B;eAdnB;kBACL;kCAE0B;wBAEV;AAUhC,MAAI,QAAQ,OACV,MAAK,UAAU,OAAO;AAIxB,MAAI,QAAQ,MACV,MAAK,QAAQ,OAAO;AAEtB,OAAK,2BAA2B,QAAQ,4BAA4B;AACpE,OAAK,iBAAiB,QAAQ,kBAAkB;;CAGlD,AAAQ,eAAuB;AAC7B,MAAI,CAAC,KAAK,SAAS;GAEjB,MAAM,mBAAiB,SAAS,CAAC;AACjC,QAAK,UAAU,IAAI,QAAQ;;AAE7B,SAAO,KAAK;;CAGd,MAAM,QACJ,SAC+C;EAC/C,MAAM,EACJ,UAAU,qBACV,QAAQ,KAAK,OACb,UACA,SACA,aACA,wBACE;EACJ,MAAM,QAAQ,QAAQ,IAAI,+BAA+B;EACzD,MAAM,WAAW,uBAAuB,YAAY;EAIpD,MAAM,kCAAkB,IAAI,KAAa;AAEzC,OAAK,MAAM,WAAW,SACpB,KAAI,QAAQ,0BAA0B,CACpC,iBAAgB,IAAI,QAAQ,GAAG;EAqBnC,IAAI,iBAhBqB,SAAS,QAAQ,YAAY;AACpD,OAAI,QAAQ,iBAAiB,EAAE;AAE7B,QAAI,CAAC,gBAAgB,IAAI,QAAQ,kBAAkB,CACjD,QAAO;AAIT,oBAAgB,OAAO,QAAQ,kBAAkB;AACjD,WAAO;;AAIT,UAAO;IACP,CAEoC,KAAK,MACzC,8BAA8B,GAAG,EAAE,gBAAgB,KAAK,gBAAgB,CAAC,CAC1E;AACD,mBAAiB,0BAA0B,gBAAgB,OAAO,MAAM;EAExE,IAAI,aAAkB,qBAAqB;AAC3C,MAAI,qBAAqB,eAAe,WACtC,cAAa;GACX,MAAM;GACN,UAAU,EAAE,MAAM,oBAAoB,wBAAwB;GAC/D;AAGH,MAAI;GAEF,MAAM,SADS,KAAK,cAAc,CACZ,KAAK,KAAK,YAAY,OAAO;IAC1C;IACP,QAAQ;IACR,UAAU;IACV,GAAI,MAAM,SAAS,KAAK,EAAE,OAAO;IACjC,GAAI,qBAAqB,aAAa,EACpC,uBAAuB,oBAAoB,WAC5C;IACD,GAAI,qBAAqB,QAAQ,EAAE,MAAM,oBAAoB,MAAM;IACnE,GAAI,cAAc,EAAE,aAAa,YAAY;IAC7C,GAAI,KAAK,4BAA4B,EAAE,qBAAqB,OAAO;IACnE,GAAI,qBAAqB,eAAe,EACtC,aAAa,oBAAoB,aAClC;IACF,CAAC;AAEF,eAAY,OAAO,OAAO,iBAAiB;IACzC,IAAI,OAAsC;IAC1C,IAAI;IACJ,IAAI;AAEJ,QAAI;AACF,gBAAW,MAAM,SAAS,QAAQ;AAChC,UAAI,MAAM,QAAQ,WAAW,EAC3B;MAGF,MAAM,WAAW,MAAM,QAAQ,GAAG,MAAM,aAAa;MACrD,MAAM,UAAU,MAAM,QAAQ,GAAG,MAAM;AAKvC,UAAI,SAAS,aAAa,UAAU,IAAI;AACtC,cAAO;AACP,oBAAa,mBAAmB,EAAE,WAAW,kBAAkB,CAAC;iBAEhE,SAAS,eACR,aAAa,UAAa,UAAU,KACrC;AACA,cAAO;AACP,oBAAa,uBAAuB,EAClC,mBAAmB,mBACpB,CAAC;;AAIJ,UAAI,SAAS,MACX;WAAI,UAAU,IAAI;AAChB,eAAO;AACP,4BAAoB,SAAU;AAC9B,qBAAa,yBAAyB;SACpC,mBAAmB;SACnB,iBAAiB,MAAM;SACvB,YAAY,SAAU,SAAU;SACjC,CAAC;kBACO,SAAS;AAClB,eAAO;AACP,2BAAmB,MAAM;AACzB,qBAAa,qBAAqB,EAChC,WAAW,kBACZ,CAAC;;;AAKN,UAAI,SAAS,aAAa,QACxB,cAAa,uBAAuB;OAClC,WAAW;OACF;OACV,CAAC;eACO,SAAS,cAAc,UAAU,UAAU,UACpD,cAAa,wBAAwB;OACnC,mBAAmB;OACnB,MAAM,SAAS,SAAS;OACzB,CAAC;;AAKN,SAAI,SAAS,UACX,cAAa,mBAAmB,EAAE,WAAW,kBAAkB,CAAC;cACvD,SAAS,WAClB,cAAa,uBAAuB,EAClC,mBAAmB,mBACpB,CAAC;aAEG,OAAO;AACd,aAAQ,MAAM,mCAAmC,MAAM;AACvD,WAAM,2BAA2B,OAAO,SAAS;;AAGnD,iBAAa,UAAU;KACvB;WACK,OAAO;AACd,WAAQ,MAAM,mCAAmC,MAAM;AACvD,SAAM,2BAA2B,OAAO,SAAS;;AAGnD,SAAO,EACL,UACD"}
1
+ {"version":3,"file":"openai-adapter.mjs","names":[],"sources":["../../../src/service-adapters/openai/openai-adapter.ts"],"sourcesContent":["/**\n * Copilot Runtime adapter for OpenAI.\n *\n * ## Example\n *\n * ```ts\n * import { CopilotRuntime, OpenAIAdapter } from \"@copilotkit/runtime\";\n * import OpenAI from \"openai\";\n *\n * const copilotKit = new CopilotRuntime();\n *\n * const openai = new OpenAI({\n * organization: \"<your-organization-id>\", // optional\n * apiKey: \"<your-api-key>\",\n * });\n *\n * return new OpenAIAdapter({ openai });\n * ```\n *\n * ## Example with Azure OpenAI\n *\n * ```ts\n * import { CopilotRuntime, OpenAIAdapter } from \"@copilotkit/runtime\";\n * import OpenAI from \"openai\";\n *\n * // The name of your Azure OpenAI Instance.\n * // https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource\n * const instance = \"<your instance name>\";\n *\n * // Corresponds to your Model deployment within your OpenAI resource, e.g. my-gpt35-16k-deployment\n * // Navigate to the Azure OpenAI Studio to deploy a model.\n * const model = \"<your model>\";\n *\n * const apiKey = process.env[\"AZURE_OPENAI_API_KEY\"];\n * if (!apiKey) {\n * throw new Error(\"The AZURE_OPENAI_API_KEY environment variable is missing or empty.\");\n * }\n *\n * const copilotKit = new CopilotRuntime();\n *\n * const openai = new OpenAI({\n * apiKey,\n * baseURL: `https://${instance}.openai.azure.com/openai/deployments/${model}`,\n * defaultQuery: { \"api-version\": \"2024-04-01-preview\" },\n * defaultHeaders: { \"api-key\": apiKey },\n * });\n *\n * return new OpenAIAdapter({ openai });\n * ```\n */\nimport type { LanguageModel } from \"ai\";\nimport { createOpenAI } from \"@ai-sdk/openai\";\nimport type OpenAI from \"openai\";\nimport {\n CopilotServiceAdapter,\n CopilotRuntimeChatCompletionRequest,\n CopilotRuntimeChatCompletionResponse,\n} from \"../service-adapter\";\nimport {\n convertActionInputToOpenAITool,\n convertMessageToOpenAIMessage,\n limitMessagesToTokenCount,\n} from \"./utils\";\nimport { randomUUID } from \"@copilotkit/shared\";\nimport { convertServiceAdapterError, getSdkClientOptions } from \"../shared\";\n\nconst DEFAULT_MODEL = \"gpt-4o\";\n\nexport interface OpenAIAdapterParams {\n /**\n * An optional OpenAI instance to use. If not provided, a new instance will be\n * created.\n */\n openai?: OpenAI;\n\n /**\n * The model to use.\n */\n model?: string;\n\n /**\n * Whether to disable parallel tool calls.\n * You can disable parallel tool calls to force the model to execute tool calls sequentially.\n * This is useful if you want to execute tool calls in a specific order so that the state changes\n * introduced by one tool call are visible to the next tool call. (i.e. new actions or readables)\n *\n * @default false\n */\n disableParallelToolCalls?: boolean;\n\n /**\n * Whether to keep the role in system messages as \"System\".\n * By default, it is converted to \"developer\", which is used by newer OpenAI models\n *\n * @default false\n */\n keepSystemRole?: boolean;\n}\n\nexport class OpenAIAdapter implements CopilotServiceAdapter {\n public model: string = DEFAULT_MODEL;\n public provider = \"openai\";\n\n private disableParallelToolCalls: boolean = false;\n private _openai: OpenAI;\n private keepSystemRole: boolean = false;\n\n public get openai(): OpenAI {\n return this._openai;\n }\n public get name() {\n return \"OpenAIAdapter\";\n }\n\n constructor(params?: OpenAIAdapterParams) {\n if (params?.openai) {\n this._openai = params.openai;\n }\n // If no instance provided, we'll lazy-load in ensureOpenAI()\n\n if (params?.model) {\n this.model = params.model;\n }\n this.disableParallelToolCalls = params?.disableParallelToolCalls || false;\n this.keepSystemRole = params?.keepSystemRole ?? false;\n }\n\n getLanguageModel(): LanguageModel {\n const openai = this.ensureOpenAI();\n const options = getSdkClientOptions(openai);\n const provider = createOpenAI({\n baseURL: openai.baseURL,\n apiKey: openai.apiKey,\n organization: openai.organization ?? undefined,\n project: openai.project ?? undefined,\n headers: options.defaultHeaders,\n fetch: options.fetch,\n });\n return provider(this.model);\n }\n\n private ensureOpenAI(): OpenAI {\n if (!this._openai) {\n // eslint-disable-next-line @typescript-eslint/no-var-requires\n const OpenAI = require(\"openai\").default;\n this._openai = new OpenAI();\n }\n return this._openai;\n }\n\n async process(\n request: CopilotRuntimeChatCompletionRequest,\n ): Promise<CopilotRuntimeChatCompletionResponse> {\n const {\n threadId: threadIdFromRequest,\n model = this.model,\n messages,\n actions,\n eventSource,\n forwardedParameters,\n } = request;\n const tools = actions.map(convertActionInputToOpenAITool);\n const threadId = threadIdFromRequest ?? randomUUID();\n\n // ALLOWLIST APPROACH: Only include tool_result messages that correspond to valid tool_calls\n // Step 1: Extract valid tool_call IDs\n const validToolUseIds = new Set<string>();\n\n for (const message of messages) {\n if (message.isActionExecutionMessage()) {\n validToolUseIds.add(message.id);\n }\n }\n\n // Step 2: Filter messages, keeping only those with valid tool_call IDs\n const filteredMessages = messages.filter((message) => {\n if (message.isResultMessage()) {\n // Skip if there's no corresponding tool_call\n if (!validToolUseIds.has(message.actionExecutionId)) {\n return false;\n }\n\n // Remove this ID from valid IDs so we don't process duplicates\n validToolUseIds.delete(message.actionExecutionId);\n return true;\n }\n\n // Keep all non-tool-result messages\n return true;\n });\n\n let openaiMessages = filteredMessages.map((m) =>\n convertMessageToOpenAIMessage(m, { keepSystemRole: this.keepSystemRole }),\n );\n openaiMessages = limitMessagesToTokenCount(openaiMessages, tools, model);\n\n let toolChoice: any = forwardedParameters?.toolChoice;\n if (forwardedParameters?.toolChoice === \"function\") {\n toolChoice = {\n type: \"function\",\n function: { name: forwardedParameters.toolChoiceFunctionName },\n };\n }\n\n try {\n const openai = this.ensureOpenAI();\n const stream = openai.beta.chat.completions.stream({\n model: model,\n stream: true,\n messages: openaiMessages,\n ...(tools.length > 0 && { tools }),\n ...(forwardedParameters?.maxTokens && {\n max_completion_tokens: forwardedParameters.maxTokens,\n }),\n ...(forwardedParameters?.stop && { stop: forwardedParameters.stop }),\n ...(toolChoice && { tool_choice: toolChoice }),\n ...(this.disableParallelToolCalls && { parallel_tool_calls: false }),\n ...(forwardedParameters?.temperature && {\n temperature: forwardedParameters.temperature,\n }),\n });\n\n eventSource.stream(async (eventStream$) => {\n let mode: \"function\" | \"message\" | null = null;\n let currentMessageId: string;\n let currentToolCallId: string;\n\n try {\n for await (const chunk of stream) {\n if (chunk.choices.length === 0) {\n continue;\n }\n\n const toolCall = chunk.choices[0].delta.tool_calls?.[0];\n const content = chunk.choices[0].delta.content;\n\n // When switching from message to function or vice versa,\n // send the respective end event.\n // If toolCall?.id is defined, it means a new tool call starts.\n if (mode === \"message\" && toolCall?.id) {\n mode = null;\n eventStream$.sendTextMessageEnd({ messageId: currentMessageId });\n } else if (\n mode === \"function\" &&\n (toolCall === undefined || toolCall?.id)\n ) {\n mode = null;\n eventStream$.sendActionExecutionEnd({\n actionExecutionId: currentToolCallId,\n });\n }\n\n // If we send a new message type, send the appropriate start event.\n if (mode === null) {\n if (toolCall?.id) {\n mode = \"function\";\n currentToolCallId = toolCall!.id;\n eventStream$.sendActionExecutionStart({\n actionExecutionId: currentToolCallId,\n parentMessageId: chunk.id,\n actionName: toolCall!.function!.name,\n });\n } else if (content) {\n mode = \"message\";\n currentMessageId = chunk.id;\n eventStream$.sendTextMessageStart({\n messageId: currentMessageId,\n });\n }\n }\n\n // send the content events\n if (mode === \"message\" && content) {\n eventStream$.sendTextMessageContent({\n messageId: currentMessageId,\n content: content,\n });\n } else if (mode === \"function\" && toolCall?.function?.arguments) {\n eventStream$.sendActionExecutionArgs({\n actionExecutionId: currentToolCallId,\n args: toolCall.function.arguments,\n });\n }\n }\n\n // send the end events\n if (mode === \"message\") {\n eventStream$.sendTextMessageEnd({ messageId: currentMessageId });\n } else if (mode === \"function\") {\n eventStream$.sendActionExecutionEnd({\n actionExecutionId: currentToolCallId,\n });\n }\n } catch (error) {\n console.error(\"[OpenAI] Error during API call:\", error);\n throw convertServiceAdapterError(error, \"OpenAI\");\n }\n\n eventStream$.complete();\n });\n } catch (error) {\n console.error(\"[OpenAI] Error during API call:\", error);\n throw convertServiceAdapterError(error, \"OpenAI\");\n }\n\n return {\n threadId,\n };\n }\n}\n"],"mappings":";;;;;;;;;AAkEA,MAAM,gBAAgB;AAiCtB,IAAa,gBAAb,MAA4D;CAQ1D,IAAW,SAAiB;AAC1B,SAAO,KAAK;;CAEd,IAAW,OAAO;AAChB,SAAO;;CAGT,YAAY,QAA8B;eAdnB;kBACL;kCAE0B;wBAEV;AAUhC,MAAI,QAAQ,OACV,MAAK,UAAU,OAAO;AAIxB,MAAI,QAAQ,MACV,MAAK,QAAQ,OAAO;AAEtB,OAAK,2BAA2B,QAAQ,4BAA4B;AACpE,OAAK,iBAAiB,QAAQ,kBAAkB;;CAGlD,mBAAkC;EAChC,MAAM,SAAS,KAAK,cAAc;EAClC,MAAM,UAAU,oBAAoB,OAAO;AAS3C,SARiB,aAAa;GAC5B,SAAS,OAAO;GAChB,QAAQ,OAAO;GACf,cAAc,OAAO,gBAAgB;GACrC,SAAS,OAAO,WAAW;GAC3B,SAAS,QAAQ;GACjB,OAAO,QAAQ;GAChB,CAAC,CACc,KAAK,MAAM;;CAG7B,AAAQ,eAAuB;AAC7B,MAAI,CAAC,KAAK,SAAS;GAEjB,MAAM,mBAAiB,SAAS,CAAC;AACjC,QAAK,UAAU,IAAI,QAAQ;;AAE7B,SAAO,KAAK;;CAGd,MAAM,QACJ,SAC+C;EAC/C,MAAM,EACJ,UAAU,qBACV,QAAQ,KAAK,OACb,UACA,SACA,aACA,wBACE;EACJ,MAAM,QAAQ,QAAQ,IAAI,+BAA+B;EACzD,MAAM,WAAW,uBAAuB,YAAY;EAIpD,MAAM,kCAAkB,IAAI,KAAa;AAEzC,OAAK,MAAM,WAAW,SACpB,KAAI,QAAQ,0BAA0B,CACpC,iBAAgB,IAAI,QAAQ,GAAG;EAqBnC,IAAI,iBAhBqB,SAAS,QAAQ,YAAY;AACpD,OAAI,QAAQ,iBAAiB,EAAE;AAE7B,QAAI,CAAC,gBAAgB,IAAI,QAAQ,kBAAkB,CACjD,QAAO;AAIT,oBAAgB,OAAO,QAAQ,kBAAkB;AACjD,WAAO;;AAIT,UAAO;IACP,CAEoC,KAAK,MACzC,8BAA8B,GAAG,EAAE,gBAAgB,KAAK,gBAAgB,CAAC,CAC1E;AACD,mBAAiB,0BAA0B,gBAAgB,OAAO,MAAM;EAExE,IAAI,aAAkB,qBAAqB;AAC3C,MAAI,qBAAqB,eAAe,WACtC,cAAa;GACX,MAAM;GACN,UAAU,EAAE,MAAM,oBAAoB,wBAAwB;GAC/D;AAGH,MAAI;GAEF,MAAM,SADS,KAAK,cAAc,CACZ,KAAK,KAAK,YAAY,OAAO;IAC1C;IACP,QAAQ;IACR,UAAU;IACV,GAAI,MAAM,SAAS,KAAK,EAAE,OAAO;IACjC,GAAI,qBAAqB,aAAa,EACpC,uBAAuB,oBAAoB,WAC5C;IACD,GAAI,qBAAqB,QAAQ,EAAE,MAAM,oBAAoB,MAAM;IACnE,GAAI,cAAc,EAAE,aAAa,YAAY;IAC7C,GAAI,KAAK,4BAA4B,EAAE,qBAAqB,OAAO;IACnE,GAAI,qBAAqB,eAAe,EACtC,aAAa,oBAAoB,aAClC;IACF,CAAC;AAEF,eAAY,OAAO,OAAO,iBAAiB;IACzC,IAAI,OAAsC;IAC1C,IAAI;IACJ,IAAI;AAEJ,QAAI;AACF,gBAAW,MAAM,SAAS,QAAQ;AAChC,UAAI,MAAM,QAAQ,WAAW,EAC3B;MAGF,MAAM,WAAW,MAAM,QAAQ,GAAG,MAAM,aAAa;MACrD,MAAM,UAAU,MAAM,QAAQ,GAAG,MAAM;AAKvC,UAAI,SAAS,aAAa,UAAU,IAAI;AACtC,cAAO;AACP,oBAAa,mBAAmB,EAAE,WAAW,kBAAkB,CAAC;iBAEhE,SAAS,eACR,aAAa,UAAa,UAAU,KACrC;AACA,cAAO;AACP,oBAAa,uBAAuB,EAClC,mBAAmB,mBACpB,CAAC;;AAIJ,UAAI,SAAS,MACX;WAAI,UAAU,IAAI;AAChB,eAAO;AACP,4BAAoB,SAAU;AAC9B,qBAAa,yBAAyB;SACpC,mBAAmB;SACnB,iBAAiB,MAAM;SACvB,YAAY,SAAU,SAAU;SACjC,CAAC;kBACO,SAAS;AAClB,eAAO;AACP,2BAAmB,MAAM;AACzB,qBAAa,qBAAqB,EAChC,WAAW,kBACZ,CAAC;;;AAKN,UAAI,SAAS,aAAa,QACxB,cAAa,uBAAuB;OAClC,WAAW;OACF;OACV,CAAC;eACO,SAAS,cAAc,UAAU,UAAU,UACpD,cAAa,wBAAwB;OACnC,mBAAmB;OACnB,MAAM,SAAS,SAAS;OACzB,CAAC;;AAKN,SAAI,SAAS,UACX,cAAa,mBAAmB,EAAE,WAAW,kBAAkB,CAAC;cACvD,SAAS,WAClB,cAAa,uBAAuB,EAClC,mBAAmB,mBACpB,CAAC;aAEG,OAAO;AACd,aAAQ,MAAM,mCAAmC,MAAM;AACvD,WAAM,2BAA2B,OAAO,SAAS;;AAGnD,iBAAa,UAAU;KACvB;WACK,OAAO;AACd,WAAQ,MAAM,mCAAmC,MAAM;AACvD,SAAM,2BAA2B,OAAO,SAAS;;AAGnD,SAAO,EACL,UACD"}
@@ -7,6 +7,7 @@ import { ForwardedParametersInput } from "../graphql/inputs/forwarded-parameters
7
7
  import { ExtensionsInput } from "../graphql/inputs/extensions.input.cjs";
8
8
  import { AgentSessionInput } from "../graphql/inputs/agent-session.input.cjs";
9
9
  import { AgentStateInput } from "../graphql/inputs/agent-state.input.cjs";
10
+ import { LanguageModel } from "ai";
10
11
 
11
12
  //#region src/service-adapters/service-adapter.d.ts
12
13
  interface CopilotRuntimeChatCompletionRequest {
@@ -31,6 +32,13 @@ interface CopilotServiceAdapter {
31
32
  model?: string;
32
33
  process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
33
34
  name?: string;
35
+ /**
36
+ * Returns a pre-configured LanguageModel for use with BuiltInAgent.
37
+ * Adapters that support custom provider configurations (e.g., Azure OpenAI
38
+ * with custom baseURL/apiKey) should implement this to ensure the
39
+ * configuration is propagated to the agent layer.
40
+ */
41
+ getLanguageModel?(): LanguageModel;
34
42
  }
35
43
  //#endregion
36
44
  export { CopilotRuntimeChatCompletionRequest, CopilotRuntimeChatCompletionResponse, CopilotServiceAdapter };
@@ -1 +1 @@
1
- {"version":3,"file":"service-adapter.d.cts","names":[],"sources":["../../src/service-adapters/service-adapter.ts"],"mappings":";;;;;;;;;;;UAciB,mCAAA;EACf,WAAA,EAAa,kBAAA;EACb,QAAA,EAAU,OAAA;EACV,OAAA,EAAS,WAAA;EACT,KAAA;EACA,QAAA;EACA,KAAA;EACA,mBAAA,GAAsB,wBAAA;EACtB,UAAA,GAAa,eAAA;EACb,YAAA,GAAe,iBAAA;EACf,WAAA,GAAc,eAAA;AAAA;AAAA,UAGC,oCAAA;EACf,QAAA;EACA,KAAA;EACA,UAAA,GAAa,kBAAA;AAAA;AAAA,UAGE,qBAAA;EACf,QAAA;EACA,KAAA;EACA,OAAA,CACE,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;EACX,IAAA;AAAA"}
1
+ {"version":3,"file":"service-adapter.d.cts","names":[],"sources":["../../src/service-adapters/service-adapter.ts"],"mappings":";;;;;;;;;;;;UAeiB,mCAAA;EACf,WAAA,EAAa,kBAAA;EACb,QAAA,EAAU,OAAA;EACV,OAAA,EAAS,WAAA;EACT,KAAA;EACA,QAAA;EACA,KAAA;EACA,mBAAA,GAAsB,wBAAA;EACtB,UAAA,GAAa,eAAA;EACb,YAAA,GAAe,iBAAA;EACf,WAAA,GAAc,eAAA;AAAA;AAAA,UAGC,oCAAA;EACf,QAAA;EACA,KAAA;EACA,UAAA,GAAa,kBAAA;AAAA;AAAA,UAGE,qBAAA;EACf,QAAA;EACA,KAAA;EACA,OAAA,CACE,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;EACX,IAAA;EAjBa;;;;;;EAyBb,gBAAA,KAAqB,aAAA;AAAA"}
@@ -7,6 +7,7 @@ import { ForwardedParametersInput } from "../graphql/inputs/forwarded-parameters
7
7
  import { ExtensionsInput } from "../graphql/inputs/extensions.input.mjs";
8
8
  import { AgentSessionInput } from "../graphql/inputs/agent-session.input.mjs";
9
9
  import { AgentStateInput } from "../graphql/inputs/agent-state.input.mjs";
10
+ import { LanguageModel } from "ai";
10
11
 
11
12
  //#region src/service-adapters/service-adapter.d.ts
12
13
  interface CopilotRuntimeChatCompletionRequest {
@@ -31,6 +32,13 @@ interface CopilotServiceAdapter {
31
32
  model?: string;
32
33
  process(request: CopilotRuntimeChatCompletionRequest): Promise<CopilotRuntimeChatCompletionResponse>;
33
34
  name?: string;
35
+ /**
36
+ * Returns a pre-configured LanguageModel for use with BuiltInAgent.
37
+ * Adapters that support custom provider configurations (e.g., Azure OpenAI
38
+ * with custom baseURL/apiKey) should implement this to ensure the
39
+ * configuration is propagated to the agent layer.
40
+ */
41
+ getLanguageModel?(): LanguageModel;
34
42
  }
35
43
  //#endregion
36
44
  export { CopilotRuntimeChatCompletionRequest, CopilotRuntimeChatCompletionResponse, CopilotServiceAdapter };
@@ -1 +1 @@
1
- {"version":3,"file":"service-adapter.d.mts","names":[],"sources":["../../src/service-adapters/service-adapter.ts"],"mappings":";;;;;;;;;;;UAciB,mCAAA;EACf,WAAA,EAAa,kBAAA;EACb,QAAA,EAAU,OAAA;EACV,OAAA,EAAS,WAAA;EACT,KAAA;EACA,QAAA;EACA,KAAA;EACA,mBAAA,GAAsB,wBAAA;EACtB,UAAA,GAAa,eAAA;EACb,YAAA,GAAe,iBAAA;EACf,WAAA,GAAc,eAAA;AAAA;AAAA,UAGC,oCAAA;EACf,QAAA;EACA,KAAA;EACA,UAAA,GAAa,kBAAA;AAAA;AAAA,UAGE,qBAAA;EACf,QAAA;EACA,KAAA;EACA,OAAA,CACE,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;EACX,IAAA;AAAA"}
1
+ {"version":3,"file":"service-adapter.d.mts","names":[],"sources":["../../src/service-adapters/service-adapter.ts"],"mappings":";;;;;;;;;;;;UAeiB,mCAAA;EACf,WAAA,EAAa,kBAAA;EACb,QAAA,EAAU,OAAA;EACV,OAAA,EAAS,WAAA;EACT,KAAA;EACA,QAAA;EACA,KAAA;EACA,mBAAA,GAAsB,wBAAA;EACtB,UAAA,GAAa,eAAA;EACb,YAAA,GAAe,iBAAA;EACf,WAAA,GAAc,eAAA;AAAA;AAAA,UAGC,oCAAA;EACf,QAAA;EACA,KAAA;EACA,UAAA,GAAa,kBAAA;AAAA;AAAA,UAGE,qBAAA;EACf,QAAA;EACA,KAAA;EACA,OAAA,CACE,OAAA,EAAS,mCAAA,GACR,OAAA,CAAQ,oCAAA;EACX,IAAA;EAjBa;;;;;;EAyBb,gBAAA,KAAqB,aAAA;AAAA"}
@@ -1,2 +1,3 @@
1
1
  import "reflect-metadata";
2
- import { convertServiceAdapterError } from "./error-utils.mjs";
2
+ import { convertServiceAdapterError } from "./error-utils.mjs";
3
+ import { getSdkClientOptions } from "./sdk-client-utils.mjs";
@@ -0,0 +1,17 @@
1
+ require("reflect-metadata");
2
+
3
+ //#region src/service-adapters/shared/sdk-client-utils.ts
4
+ /**
5
+ * SDK clients (OpenAI, Anthropic, Groq) store constructor options like
6
+ * `defaultHeaders` and `fetch` in a private/protected `_options` field
7
+ * with no public accessor. This extracts them with a narrow type assertion.
8
+ */
9
+ function getSdkClientOptions(client) {
10
+ const options = client._options;
11
+ if (options != null && typeof options === "object") return options;
12
+ return {};
13
+ }
14
+
15
+ //#endregion
16
+ exports.getSdkClientOptions = getSdkClientOptions;
17
+ //# sourceMappingURL=sdk-client-utils.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sdk-client-utils.cjs","names":[],"sources":["../../../src/service-adapters/shared/sdk-client-utils.ts"],"sourcesContent":["/**\n * SDK clients (OpenAI, Anthropic, Groq) store constructor options like\n * `defaultHeaders` and `fetch` in a private/protected `_options` field\n * with no public accessor. This extracts them with a narrow type assertion.\n */\nexport function getSdkClientOptions(client: object): {\n defaultHeaders?: Record<string, string>;\n fetch?: typeof globalThis.fetch;\n} {\n const rec = client as Record<string, unknown>;\n const options = rec._options;\n if (options != null && typeof options === \"object\") {\n return options as {\n defaultHeaders?: Record<string, string>;\n fetch?: typeof globalThis.fetch;\n };\n }\n return {};\n}\n"],"mappings":";;;;;;;;AAKA,SAAgB,oBAAoB,QAGlC;CAEA,MAAM,UADM,OACQ;AACpB,KAAI,WAAW,QAAQ,OAAO,YAAY,SACxC,QAAO;AAKT,QAAO,EAAE"}
@@ -0,0 +1,14 @@
1
+
2
+ //#region src/service-adapters/shared/sdk-client-utils.d.ts
3
+ /**
4
+ * SDK clients (OpenAI, Anthropic, Groq) store constructor options like
5
+ * `defaultHeaders` and `fetch` in a private/protected `_options` field
6
+ * with no public accessor. This extracts them with a narrow type assertion.
7
+ */
8
+ declare function getSdkClientOptions(client: object): {
9
+ defaultHeaders?: Record<string, string>;
10
+ fetch?: typeof globalThis.fetch;
11
+ };
12
+ //#endregion
13
+ export { getSdkClientOptions };
14
+ //# sourceMappingURL=sdk-client-utils.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sdk-client-utils.d.cts","names":[],"sources":["../../../src/service-adapters/shared/sdk-client-utils.ts"],"mappings":";;;AAKA;;;;iBAAgB,mBAAA,CAAoB,MAAA;EAClC,cAAA,GAAiB,MAAA;EACjB,KAAA,UAAe,UAAA,CAAW,KAAA;AAAA"}
@@ -0,0 +1,14 @@
1
+ import "reflect-metadata";
2
+ //#region src/service-adapters/shared/sdk-client-utils.d.ts
3
+ /**
4
+ * SDK clients (OpenAI, Anthropic, Groq) store constructor options like
5
+ * `defaultHeaders` and `fetch` in a private/protected `_options` field
6
+ * with no public accessor. This extracts them with a narrow type assertion.
7
+ */
8
+ declare function getSdkClientOptions(client: object): {
9
+ defaultHeaders?: Record<string, string>;
10
+ fetch?: typeof globalThis.fetch;
11
+ };
12
+ //#endregion
13
+ export { getSdkClientOptions };
14
+ //# sourceMappingURL=sdk-client-utils.d.mts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sdk-client-utils.d.mts","names":[],"sources":["../../../src/service-adapters/shared/sdk-client-utils.ts"],"mappings":";;;AAKA;;;;iBAAgB,mBAAA,CAAoB,MAAA;EAClC,cAAA,GAAiB,MAAA;EACjB,KAAA,UAAe,UAAA,CAAW,KAAA;AAAA"}
@@ -0,0 +1,16 @@
1
+ import "reflect-metadata";
2
+ //#region src/service-adapters/shared/sdk-client-utils.ts
3
+ /**
4
+ * SDK clients (OpenAI, Anthropic, Groq) store constructor options like
5
+ * `defaultHeaders` and `fetch` in a private/protected `_options` field
6
+ * with no public accessor. This extracts them with a narrow type assertion.
7
+ */
8
+ function getSdkClientOptions(client) {
9
+ const options = client._options;
10
+ if (options != null && typeof options === "object") return options;
11
+ return {};
12
+ }
13
+
14
+ //#endregion
15
+ export { getSdkClientOptions };
16
+ //# sourceMappingURL=sdk-client-utils.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"sdk-client-utils.mjs","names":[],"sources":["../../../src/service-adapters/shared/sdk-client-utils.ts"],"sourcesContent":["/**\n * SDK clients (OpenAI, Anthropic, Groq) store constructor options like\n * `defaultHeaders` and `fetch` in a private/protected `_options` field\n * with no public accessor. This extracts them with a narrow type assertion.\n */\nexport function getSdkClientOptions(client: object): {\n defaultHeaders?: Record<string, string>;\n fetch?: typeof globalThis.fetch;\n} {\n const rec = client as Record<string, unknown>;\n const options = rec._options;\n if (options != null && typeof options === \"object\") {\n return options as {\n defaultHeaders?: Record<string, string>;\n fetch?: typeof globalThis.fetch;\n };\n }\n return {};\n}\n"],"mappings":";;;;;;;AAKA,SAAgB,oBAAoB,QAGlC;CAEA,MAAM,UADM,OACQ;AACpB,KAAI,WAAW,QAAQ,OAAO,YAAY,SACxC,QAAO;AAKT,QAAO,EAAE"}
package/package.json CHANGED
@@ -9,7 +9,7 @@
9
9
  "publishConfig": {
10
10
  "access": "public"
11
11
  },
12
- "version": "1.52.0",
12
+ "version": "1.52.1-next.0",
13
13
  "sideEffects": [
14
14
  "./dist/index.mjs",
15
15
  "./dist/index.cjs",
@@ -52,6 +52,8 @@
52
52
  "@ag-ui/client": "^0.0.45",
53
53
  "@ag-ui/core": "^0.0.45",
54
54
  "@ag-ui/langgraph": "^0.0.24",
55
+ "@ai-sdk/anthropic": "^2.0.22",
56
+ "@ai-sdk/openai": "^2.0.42",
55
57
  "@graphql-yoga/plugin-defer-stream": "^3.3.1",
56
58
  "@hono/node-server": "^1.13.5",
57
59
  "@scarf/scarf": "^1.3.0",
@@ -60,6 +62,7 @@
60
62
  "graphql": "^16.8.1",
61
63
  "graphql-scalars": "^1.23.0",
62
64
  "graphql-yoga": "^5.3.1",
65
+ "ai": "^5.0.92",
63
66
  "hono": "^4.11.4",
64
67
  "openai": "^4.85.1",
65
68
  "partial-json": "^0.1.7",
@@ -69,9 +72,9 @@
69
72
  "rxjs": "7.8.1",
70
73
  "type-graphql": "2.0.0-rc.1",
71
74
  "zod": "^3.23.3",
72
- "@copilotkitnext/agent": "1.52.0",
73
- "@copilotkitnext/runtime": "1.52.0",
74
- "@copilotkit/shared": "1.52.0"
75
+ "@copilotkit/shared": "1.52.1-next.0",
76
+ "@copilotkitnext/agent": "1.52.1-next.0",
77
+ "@copilotkitnext/runtime": "1.52.1-next.0"
75
78
  },
76
79
  "peerDependencies": {
77
80
  "@anthropic-ai/sdk": "^0.57.0",
@@ -416,9 +416,10 @@ export class CopilotRuntime<const T extends Parameter[] | [] = []> {
416
416
  }
417
417
 
418
418
  if (isAgentsListEmpty) {
419
- agentsList.default = new BuiltInAgent({
420
- model: `${serviceAdapter.provider}/${serviceAdapter.model}`,
421
- });
419
+ const model =
420
+ serviceAdapter.getLanguageModel?.() ??
421
+ `${serviceAdapter.provider}/${serviceAdapter.model}`;
422
+ agentsList.default = new BuiltInAgent({ model });
422
423
  }
423
424
 
424
425
  const actions = this.params?.actions;
@@ -22,6 +22,8 @@
22
22
  * });
23
23
  * ```
24
24
  */
25
+ import type { LanguageModel } from "ai";
26
+ import { createAnthropic } from "@ai-sdk/anthropic";
25
27
  import type Anthropic from "@anthropic-ai/sdk";
26
28
  import {
27
29
  CopilotServiceAdapter,
@@ -35,7 +37,7 @@ import {
35
37
  } from "./utils";
36
38
 
37
39
  import { randomId, randomUUID } from "@copilotkit/shared";
38
- import { convertServiceAdapterError } from "../shared";
40
+ import { convertServiceAdapterError, getSdkClientOptions } from "../shared";
39
41
 
40
42
  const DEFAULT_MODEL = "claude-3-5-sonnet-latest";
41
43
 
@@ -94,6 +96,18 @@ export class AnthropicAdapter implements CopilotServiceAdapter {
94
96
  this.promptCaching = params?.promptCaching || { enabled: false };
95
97
  }
96
98
 
99
+ getLanguageModel(): LanguageModel {
100
+ const anthropic = this.ensureAnthropic();
101
+ const options = getSdkClientOptions(anthropic);
102
+ const provider = createAnthropic({
103
+ baseURL: anthropic.baseURL,
104
+ apiKey: anthropic.apiKey,
105
+ headers: options.defaultHeaders,
106
+ fetch: options.fetch,
107
+ });
108
+ return provider(this.model);
109
+ }
110
+
97
111
  private ensureAnthropic(): Anthropic {
98
112
  if (!this._anthropic) {
99
113
  // eslint-disable-next-line @typescript-eslint/no-var-requires
@@ -14,6 +14,8 @@
14
14
  * return new GroqAdapter({ groq, model: "<model-name>" });
15
15
  * ```
16
16
  */
17
+ import type { LanguageModel } from "ai";
18
+ import { createOpenAI } from "@ai-sdk/openai";
17
19
  import type { Groq } from "groq-sdk";
18
20
  import type { ChatCompletionMessageParam } from "groq-sdk/resources/chat";
19
21
  import {
@@ -27,7 +29,7 @@ import {
27
29
  limitMessagesToTokenCount,
28
30
  } from "../openai/utils";
29
31
  import { randomUUID } from "@copilotkit/shared";
30
- import { convertServiceAdapterError } from "../shared";
32
+ import { convertServiceAdapterError, getSdkClientOptions } from "../shared";
31
33
 
32
34
  const DEFAULT_MODEL = "llama-3.3-70b-versatile";
33
35
 
@@ -77,6 +79,19 @@ export class GroqAdapter implements CopilotServiceAdapter {
77
79
  this.disableParallelToolCalls = params?.disableParallelToolCalls || false;
78
80
  }
79
81
 
82
+ getLanguageModel(): LanguageModel {
83
+ const groq = this.ensureGroq();
84
+ const options = getSdkClientOptions(groq);
85
+ const provider = createOpenAI({
86
+ baseURL: groq.baseURL,
87
+ apiKey: groq.apiKey,
88
+ headers: options.defaultHeaders,
89
+ fetch: options.fetch,
90
+ name: "groq",
91
+ });
92
+ return provider(this.model);
93
+ }
94
+
80
95
  private ensureGroq(): Groq {
81
96
  if (!this._groq) {
82
97
  // eslint-disable-next-line @typescript-eslint/no-var-requires
@@ -48,6 +48,8 @@
48
48
  * return new OpenAIAdapter({ openai });
49
49
  * ```
50
50
  */
51
+ import type { LanguageModel } from "ai";
52
+ import { createOpenAI } from "@ai-sdk/openai";
51
53
  import type OpenAI from "openai";
52
54
  import {
53
55
  CopilotServiceAdapter,
@@ -60,7 +62,7 @@ import {
60
62
  limitMessagesToTokenCount,
61
63
  } from "./utils";
62
64
  import { randomUUID } from "@copilotkit/shared";
63
- import { convertServiceAdapterError } from "../shared";
65
+ import { convertServiceAdapterError, getSdkClientOptions } from "../shared";
64
66
 
65
67
  const DEFAULT_MODEL = "gpt-4o";
66
68
 
@@ -123,6 +125,20 @@ export class OpenAIAdapter implements CopilotServiceAdapter {
123
125
  this.keepSystemRole = params?.keepSystemRole ?? false;
124
126
  }
125
127
 
128
+ getLanguageModel(): LanguageModel {
129
+ const openai = this.ensureOpenAI();
130
+ const options = getSdkClientOptions(openai);
131
+ const provider = createOpenAI({
132
+ baseURL: openai.baseURL,
133
+ apiKey: openai.apiKey,
134
+ organization: openai.organization ?? undefined,
135
+ project: openai.project ?? undefined,
136
+ headers: options.defaultHeaders,
137
+ fetch: options.fetch,
138
+ });
139
+ return provider(this.model);
140
+ }
141
+
126
142
  private ensureOpenAI(): OpenAI {
127
143
  if (!this._openai) {
128
144
  // eslint-disable-next-line @typescript-eslint/no-var-requires
@@ -1,3 +1,4 @@
1
+ import type { LanguageModel } from "ai";
1
2
  import { Message } from "../graphql/types/converted";
2
3
  import { RuntimeEventSource } from "./events";
3
4
  import { ActionInput } from "../graphql/inputs/action.input";
@@ -38,4 +39,12 @@ export interface CopilotServiceAdapter {
38
39
  request: CopilotRuntimeChatCompletionRequest,
39
40
  ): Promise<CopilotRuntimeChatCompletionResponse>;
40
41
  name?: string;
42
+
43
+ /**
44
+ * Returns a pre-configured LanguageModel for use with BuiltInAgent.
45
+ * Adapters that support custom provider configurations (e.g., Azure OpenAI
46
+ * with custom baseURL/apiKey) should implement this to ensure the
47
+ * configuration is propagated to the agent layer.
48
+ */
49
+ getLanguageModel?(): LanguageModel;
41
50
  }
@@ -1 +1,2 @@
1
1
  export * from "./error-utils";
2
+ export * from "./sdk-client-utils";
@@ -0,0 +1,19 @@
1
+ /**
2
+ * SDK clients (OpenAI, Anthropic, Groq) store constructor options like
3
+ * `defaultHeaders` and `fetch` in a private/protected `_options` field
4
+ * with no public accessor. This extracts them with a narrow type assertion.
5
+ */
6
+ export function getSdkClientOptions(client: object): {
7
+ defaultHeaders?: Record<string, string>;
8
+ fetch?: typeof globalThis.fetch;
9
+ } {
10
+ const rec = client as Record<string, unknown>;
11
+ const options = rec._options;
12
+ if (options != null && typeof options === "object") {
13
+ return options as {
14
+ defaultHeaders?: Record<string, string>;
15
+ fetch?: typeof globalThis.fetch;
16
+ };
17
+ }
18
+ return {};
19
+ }
@@ -0,0 +1,101 @@
1
+ import { describe, it, expect, vi, beforeEach } from "vitest";
2
+ import type { AnthropicProviderSettings } from "@ai-sdk/anthropic";
3
+ import { AnthropicAdapter } from "../../../src/service-adapters/anthropic/anthropic-adapter";
4
+ import Anthropic from "@anthropic-ai/sdk";
5
+
6
+ // Keys from AnthropicProviderSettings that we forward from the Anthropic SDK client.
7
+ type ForwardedAnthropicKeys = "baseURL" | "apiKey" | "headers" | "fetch";
8
+
9
+ // We don't set `name` or `generateId` — they're provider-internal concerns.
10
+ type ControlledAnthropicKeys = "name" | "generateId";
11
+
12
+ // Compile-time exhaustiveness check: every key in AnthropicProviderSettings
13
+ // must be accounted for. If this line errors, a new key was added.
14
+ type _exhaustive =
15
+ Exclude<
16
+ keyof AnthropicProviderSettings,
17
+ ForwardedAnthropicKeys | ControlledAnthropicKeys
18
+ > extends never
19
+ ? true
20
+ : {
21
+ error: "AnthropicProviderSettings has unhandled keys";
22
+ unhandled: Exclude<
23
+ keyof AnthropicProviderSettings,
24
+ ForwardedAnthropicKeys | ControlledAnthropicKeys
25
+ >;
26
+ };
27
+ const _check: _exhaustive = true;
28
+
29
+ const { mockProviderFn, mockCreateAnthropic } = vi.hoisted(() => {
30
+ const mockProviderFn = vi.fn().mockReturnValue({ modelId: "test-model" });
31
+ const mockCreateAnthropic = vi.fn().mockReturnValue(mockProviderFn);
32
+ return { mockProviderFn, mockCreateAnthropic };
33
+ });
34
+
35
+ vi.mock("@ai-sdk/anthropic", async (importOriginal) => {
36
+ const actual = await importOriginal<typeof import("@ai-sdk/anthropic")>();
37
+ return { ...actual, createAnthropic: mockCreateAnthropic };
38
+ });
39
+
40
+ vi.mock("@anthropic-ai/sdk", () => {
41
+ return {
42
+ default: class MockAnthropic {
43
+ baseURL: string;
44
+ apiKey: string;
45
+ _options: Record<string, any>;
46
+ messages = { create: vi.fn() };
47
+
48
+ constructor(opts: any = {}) {
49
+ this.baseURL = opts.baseURL ?? "https://api.anthropic.com/v1";
50
+ this.apiKey = opts.apiKey ?? "default-key";
51
+ this._options = {
52
+ defaultHeaders: opts.defaultHeaders,
53
+ fetch: opts.fetch,
54
+ ...opts,
55
+ };
56
+ }
57
+ },
58
+ };
59
+ });
60
+
61
+ describe("AnthropicAdapter.getLanguageModel()", () => {
62
+ beforeEach(() => {
63
+ vi.clearAllMocks();
64
+ });
65
+
66
+ it("forwards all provider-relevant options from the Anthropic SDK client", () => {
67
+ const customFetch = vi.fn();
68
+ const anthropic = new Anthropic({
69
+ apiKey: "sk-ant-test",
70
+ baseURL: "https://proxy.example.com/v1",
71
+ defaultHeaders: { "x-custom": "value" },
72
+ fetch: customFetch,
73
+ });
74
+
75
+ const adapter = new AnthropicAdapter({
76
+ anthropic,
77
+ model: "claude-3-5-sonnet-latest",
78
+ });
79
+ adapter.getLanguageModel();
80
+
81
+ expect(mockCreateAnthropic).toHaveBeenCalledOnce();
82
+ const settings = mockCreateAnthropic.mock.calls[0][0];
83
+
84
+ expect(settings.baseURL).toBe("https://proxy.example.com/v1");
85
+ expect(settings.apiKey).toBe("sk-ant-test");
86
+ expect(settings.headers).toEqual({ "x-custom": "value" });
87
+ expect(settings.fetch).toBe(customFetch);
88
+
89
+ expect(mockProviderFn).toHaveBeenCalledWith("claude-3-5-sonnet-latest");
90
+ });
91
+
92
+ it("works with default Anthropic config (no custom options)", () => {
93
+ const anthropic = new Anthropic({ apiKey: "sk-ant-default" });
94
+ const adapter = new AnthropicAdapter({ anthropic });
95
+ adapter.getLanguageModel();
96
+
97
+ const settings = mockCreateAnthropic.mock.calls[0][0];
98
+ expect(settings.baseURL).toBe("https://api.anthropic.com/v1");
99
+ expect(settings.apiKey).toBe("sk-ant-default");
100
+ });
101
+ });