@livekit/agents-plugin-openai 1.0.50 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/dist/index.cjs +5 -2
  2. package/dist/index.cjs.map +1 -1
  3. package/dist/index.d.cts +1 -0
  4. package/dist/index.d.ts +1 -0
  5. package/dist/index.d.ts.map +1 -1
  6. package/dist/index.js +4 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/llm.cjs +8 -0
  9. package/dist/llm.cjs.map +1 -1
  10. package/dist/llm.d.cts +1 -0
  11. package/dist/llm.d.ts +1 -0
  12. package/dist/llm.d.ts.map +1 -1
  13. package/dist/llm.js +8 -0
  14. package/dist/llm.js.map +1 -1
  15. package/dist/llm.test.cjs +31 -16
  16. package/dist/llm.test.cjs.map +1 -1
  17. package/dist/llm.test.js +32 -17
  18. package/dist/llm.test.js.map +1 -1
  19. package/dist/realtime/api_proto.cjs.map +1 -1
  20. package/dist/realtime/api_proto.d.cts +7 -3
  21. package/dist/realtime/api_proto.d.ts +7 -3
  22. package/dist/realtime/api_proto.d.ts.map +1 -1
  23. package/dist/realtime/api_proto.js.map +1 -1
  24. package/dist/realtime/realtime_model.cjs +46 -22
  25. package/dist/realtime/realtime_model.cjs.map +1 -1
  26. package/dist/realtime/realtime_model.d.cts +2 -1
  27. package/dist/realtime/realtime_model.d.ts +2 -1
  28. package/dist/realtime/realtime_model.d.ts.map +1 -1
  29. package/dist/realtime/realtime_model.js +46 -22
  30. package/dist/realtime/realtime_model.js.map +1 -1
  31. package/dist/realtime/realtime_model.test.cjs +104 -14
  32. package/dist/realtime/realtime_model.test.cjs.map +1 -1
  33. package/dist/realtime/realtime_model.test.js +104 -14
  34. package/dist/realtime/realtime_model.test.js.map +1 -1
  35. package/dist/realtime/realtime_model_beta.cjs +40 -22
  36. package/dist/realtime/realtime_model_beta.cjs.map +1 -1
  37. package/dist/realtime/realtime_model_beta.d.ts.map +1 -1
  38. package/dist/realtime/realtime_model_beta.js +40 -22
  39. package/dist/realtime/realtime_model_beta.js.map +1 -1
  40. package/dist/responses/llm.cjs +71 -16
  41. package/dist/responses/llm.cjs.map +1 -1
  42. package/dist/responses/llm.d.cts +10 -25
  43. package/dist/responses/llm.d.ts +10 -25
  44. package/dist/responses/llm.d.ts.map +1 -1
  45. package/dist/responses/llm.js +71 -14
  46. package/dist/responses/llm.js.map +1 -1
  47. package/dist/responses/llm.test.cjs +32 -17
  48. package/dist/responses/llm.test.cjs.map +1 -1
  49. package/dist/responses/llm.test.js +33 -18
  50. package/dist/responses/llm.test.js.map +1 -1
  51. package/dist/stt.cjs +18 -3
  52. package/dist/stt.cjs.map +1 -1
  53. package/dist/stt.d.cts +2 -0
  54. package/dist/stt.d.ts +2 -0
  55. package/dist/stt.d.ts.map +1 -1
  56. package/dist/stt.js +19 -4
  57. package/dist/stt.js.map +1 -1
  58. package/dist/stt.test.cjs +11 -3
  59. package/dist/stt.test.cjs.map +1 -1
  60. package/dist/stt.test.js +12 -4
  61. package/dist/stt.test.js.map +1 -1
  62. package/dist/tts.cjs +11 -0
  63. package/dist/tts.cjs.map +1 -1
  64. package/dist/tts.d.cts +2 -0
  65. package/dist/tts.d.ts +2 -0
  66. package/dist/tts.d.ts.map +1 -1
  67. package/dist/tts.js +11 -0
  68. package/dist/tts.js.map +1 -1
  69. package/dist/tts.test.cjs +11 -3
  70. package/dist/tts.test.cjs.map +1 -1
  71. package/dist/tts.test.js +12 -4
  72. package/dist/tts.test.js.map +1 -1
  73. package/dist/ws/index.cjs +29 -0
  74. package/dist/ws/index.cjs.map +1 -0
  75. package/dist/ws/index.d.cts +3 -0
  76. package/dist/ws/index.d.ts +3 -0
  77. package/dist/ws/index.d.ts.map +1 -0
  78. package/dist/ws/index.js +5 -0
  79. package/dist/ws/index.js.map +1 -0
  80. package/dist/ws/llm.cjs +502 -0
  81. package/dist/ws/llm.cjs.map +1 -0
  82. package/dist/ws/llm.d.cts +74 -0
  83. package/dist/ws/llm.d.ts +74 -0
  84. package/dist/ws/llm.d.ts.map +1 -0
  85. package/dist/ws/llm.js +485 -0
  86. package/dist/ws/llm.js.map +1 -0
  87. package/dist/ws/llm.test.cjs +26 -0
  88. package/dist/ws/llm.test.cjs.map +1 -0
  89. package/dist/ws/llm.test.d.cts +2 -0
  90. package/dist/ws/llm.test.d.ts +2 -0
  91. package/dist/ws/llm.test.d.ts.map +1 -0
  92. package/dist/ws/llm.test.js +25 -0
  93. package/dist/ws/llm.test.js.map +1 -0
  94. package/dist/ws/types.cjs +128 -0
  95. package/dist/ws/types.cjs.map +1 -0
  96. package/dist/ws/types.d.cts +167 -0
  97. package/dist/ws/types.d.ts +167 -0
  98. package/dist/ws/types.d.ts.map +1 -0
  99. package/dist/ws/types.js +95 -0
  100. package/dist/ws/types.js.map +1 -0
  101. package/package.json +6 -5
  102. package/src/index.ts +1 -0
  103. package/src/llm.test.ts +31 -17
  104. package/src/llm.ts +9 -0
  105. package/src/realtime/api_proto.ts +8 -2
  106. package/src/realtime/realtime_model.test.ts +129 -14
  107. package/src/realtime/realtime_model.ts +51 -26
  108. package/src/realtime/realtime_model_beta.ts +42 -25
  109. package/src/responses/llm.test.ts +32 -18
  110. package/src/responses/llm.ts +105 -19
  111. package/src/stt.test.ts +12 -4
  112. package/src/stt.ts +21 -4
  113. package/src/tts.test.ts +12 -4
  114. package/src/tts.ts +13 -0
  115. package/src/ws/index.ts +17 -0
  116. package/src/ws/llm.test.ts +30 -0
  117. package/src/ws/llm.ts +665 -0
  118. package/src/ws/types.ts +131 -0
@@ -0,0 +1,74 @@
1
+ import type { APIConnectOptions } from '@livekit/agents';
2
+ import { ConnectionPool, llm, stream } from '@livekit/agents';
3
+ import { WebSocket } from 'ws';
4
+ import type { ChatModels } from '../models.js';
5
+ import type { WsResponseCreateEvent, WsServerEvent } from './types.js';
6
+ export declare class ResponsesWebSocket {
7
+ #private;
8
+ constructor(ws: WebSocket);
9
+ /**
10
+ * Send a response.create event. Returns a typed `StreamChannel<WsServerEvent>`
11
+ * that yields validated server events until the response terminates.
12
+ */
13
+ sendRequest(payload: WsResponseCreateEvent): stream.StreamChannel<WsServerEvent>;
14
+ close(): void;
15
+ }
16
+ export interface WSLLMOptions {
17
+ model: string | ChatModels;
18
+ apiKey?: string;
19
+ baseURL?: string;
20
+ temperature?: number;
21
+ parallelToolCalls?: boolean;
22
+ toolChoice?: llm.ToolChoice;
23
+ store?: boolean;
24
+ metadata?: Record<string, string>;
25
+ strictToolSchema?: boolean;
26
+ }
27
+ export declare class WSLLM extends llm.LLM {
28
+ #private;
29
+ /**
30
+ * Create a new instance of the OpenAI Responses API WebSocket LLM.
31
+ *
32
+ * @remarks
33
+ * `apiKey` must be set to your OpenAI API key, either using the argument or
34
+ * by setting the `OPENAI_API_KEY` environment variable.
35
+ *
36
+ * A persistent WebSocket connection to `/v1/responses` is maintained and
37
+ * reused across turns, reducing per-turn continuation overhead for
38
+ * tool-call-heavy workflows.
39
+ */
40
+ constructor(opts?: Partial<WSLLMOptions>);
41
+ label(): string;
42
+ get model(): string;
43
+ prewarm(): void;
44
+ close(): Promise<void>;
45
+ aclose(): Promise<void>;
46
+ /** Called by LLMStream once response.created fires to atomically persist both the
47
+ * response ID and its corresponding chat context for the next turn's diff. */
48
+ _onResponseCreated(responseId: string, chatCtx: llm.ChatContext): void;
49
+ _setPendingToolCalls(callIds: Set<string>): void;
50
+ chat({ chatCtx, toolCtx, connOptions, parallelToolCalls, toolChoice, extraKwargs, }: {
51
+ chatCtx: llm.ChatContext;
52
+ toolCtx?: llm.ToolContext;
53
+ connOptions?: APIConnectOptions;
54
+ parallelToolCalls?: boolean;
55
+ toolChoice?: llm.ToolChoice;
56
+ extraKwargs?: Record<string, unknown>;
57
+ }): WSLLMStream;
58
+ }
59
+ export declare class WSLLMStream extends llm.LLMStream {
60
+ #private;
61
+ constructor(llm: WSLLM, { pool, model, chatCtx, fullChatCtx, toolCtx, connOptions, modelOptions, prevResponseId, strictToolSchema, }: {
62
+ pool: ConnectionPool<ResponsesWebSocket>;
63
+ model: string | ChatModels;
64
+ chatCtx: llm.ChatContext;
65
+ fullChatCtx: llm.ChatContext;
66
+ toolCtx?: llm.ToolContext;
67
+ connOptions: APIConnectOptions;
68
+ modelOptions: Record<string, unknown>;
69
+ prevResponseId?: string;
70
+ strictToolSchema: boolean;
71
+ });
72
+ protected run(): Promise<void>;
73
+ }
74
+ //# sourceMappingURL=llm.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../src/ws/llm.ts"],"names":[],"mappings":"AAGA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,iBAAiB,CAAC;AACzD,OAAO,EAIL,cAAc,EAEd,GAAG,EACH,MAAM,EAEP,MAAM,iBAAiB,CAAC;AAEzB,OAAO,EAAE,SAAS,EAAE,MAAM,IAAI,CAAC;AAC/B,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,cAAc,CAAC;AAC/C,OAAO,KAAK,EAIV,qBAAqB,EAGrB,aAAa,EACd,MAAM,YAAY,CAAC;AAmBpB,qBAAa,kBAAkB;;gBAKjB,EAAE,EAAE,SAAS;IAoDzB;;;OAGG;IACH,WAAW,CAAC,OAAO,EAAE,qBAAqB,GAAG,MAAM,CAAC,aAAa,CAAC,aAAa,CAAC;IAchF,KAAK,IAAI,IAAI;CAQd;AAMD,MAAM,WAAW,YAAY;IAC3B,KAAK,EAAE,MAAM,GAAG,UAAU,CAAC;IAC3B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,iBAAiB,CAAC,EAAE,OAAO,CAAC;IAC5B,UAAU,CAAC,EAAE,GAAG,CAAC,UAAU,CAAC;IAC5B,KAAK,CAAC,EAAE,OAAO,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAClC,gBAAgB,CAAC,EAAE,OAAO,CAAC;CAC5B;AAYD,qBAAa,KAAM,SAAQ,GAAG,CAAC,GAAG;;IAOhC;;;;;;;;;;OAUG;gBACS,IAAI,GAAE,OAAO,CAAC,YAAY,CAAqB;IAuB3D,KAAK,IAAI,MAAM;IAIf,IAAI,KAAK,IAAI,MAAM,CAElB;IAED,OAAO,IAAI,IAAI;IAIT,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAIb,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC;IAItC;mFAC+E;IAC/E,kBAAkB,CAAC,UAAU,EAAE,MAAM,EAAE,OAAO,EAAE,GAAG,CAAC,WAAW,GAAG,IAAI;IAKtE,oBAAoB,CAAC,OAAO,EAAE,GAAG,CAAC,MAAM,CAAC,GAAG,IAAI;IAIhD,IAAI,CAAC,EACH,OAAO,EACP,OAAO,EACP,WAAyC,EACzC,iBAAiB,EACjB,UAAU,EACV,WAAW,GACZ,EAAE;QACD,OAAO,EAAE,GAAG,CAAC,WAAW,CAAC;QACzB,OAAO,CAAC,EAAE,GAAG,CAAC,WAAW,CAAC;QAC1B,WAAW,CAAC,EAAE,iBAAiB,CAAC;QAChC,iBAAiB,CAAC,EAAE,OAAO,CAAC;QAC5B,UAAU,CAAC,EAAE,GAAG,CAAC,UAAU,CAAC;QAC5B,WAAW,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;KACvC,GAAG,WAAW;CA8EhB;AAMD,qBAAa,WAAY,SAAQ,GAAG,CAAC,SAAS;;gBAa1C,GAAG,EAAE,KAAK,EACV,EACE,IAAI,EACJ,KAAK,EACL,OAAO,EACP,WAAW,EACX,OAAO,EACP,WAAW,EACX,YAAY,EACZ,cAAc,EACd,gBAAgB,GACjB,EAAE;QACD,IAAI,EAAE,cAAc,CAAC,kBAAkB,CAAC,CAAC;QACzC,KAAK,EAAE,MAAM,GAAG,UAAU,CAAC;QAC3B,OAAO,EAAE,GAAG,CAAC,WAAW,CAAC;QACzB,WAAW,EAAE,GAAG,CAAC,WAAW,CAAC;QAC7B,OAAO,CAAC,EAAE,GAAG,CAAC,WAAW,CAAC;QAC1B,WAAW,EAAE,iBAAiB,CAAC;QAC/B,YAAY,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;QACtC,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,gBAAgB,EAAE,OAAO,CAAC;KAC3B;cAYa,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;CAmOrC"}
package/dist/ws/llm.js ADDED
@@ -0,0 +1,485 @@
1
+ import {
2
+ APIConnectionError,
3
+ APIStatusError,
4
+ APITimeoutError,
5
+ ConnectionPool,
6
+ DEFAULT_API_CONNECT_OPTIONS,
7
+ llm,
8
+ stream,
9
+ toError
10
+ } from "@livekit/agents";
11
+ import { WebSocket } from "ws";
12
+ import { wsServerEventSchema } from "./types.js";
13
+ const OPENAI_RESPONSES_WS_URL = "wss://api.openai.com/v1/responses";
14
+ const WS_MAX_SESSION_DURATION = 36e5;
15
+ class ResponsesWebSocket {
16
+ #ws;
17
+ // FIFO queue: the front entry receives validated WsServerEvents for the in-flight response.
18
+ #outputQueue = [];
19
+ constructor(ws) {
20
+ this.#ws = ws;
21
+ ws.on("message", (data) => {
22
+ const current = this.#outputQueue[0];
23
+ if (!current) return;
24
+ let raw;
25
+ try {
26
+ raw = JSON.parse(data.toString());
27
+ } catch {
28
+ return;
29
+ }
30
+ const parsed = wsServerEventSchema.safeParse(raw);
31
+ if (!parsed.success) return;
32
+ const event = parsed.data;
33
+ void current.write(event);
34
+ if (event.type === "response.completed" || event.type === "response.failed" || event.type === "error") {
35
+ void current.close();
36
+ this.#outputQueue.shift();
37
+ }
38
+ });
39
+ ws.on("close", () => {
40
+ for (const current of this.#outputQueue) {
41
+ if (!current.closed) {
42
+ const closeError = {
43
+ type: "error",
44
+ error: {
45
+ code: "websocket_closed",
46
+ message: "OpenAI Responses WebSocket closed unexpectedly"
47
+ }
48
+ };
49
+ void current.write(closeError).finally(() => current.close());
50
+ }
51
+ }
52
+ this.#outputQueue = [];
53
+ });
54
+ }
55
+ /**
56
+ * Send a response.create event. Returns a typed `StreamChannel<WsServerEvent>`
57
+ * that yields validated server events until the response terminates.
58
+ */
59
+ sendRequest(payload) {
60
+ if (this.#ws.readyState !== WebSocket.OPEN) {
61
+ throw new APIConnectionError({
62
+ message: `OpenAI Responses WebSocket is not open (state ${getWebSocketStateLabel(this.#ws.readyState)})`,
63
+ options: { retryable: true }
64
+ });
65
+ }
66
+ const channel = stream.createStreamChannel();
67
+ this.#outputQueue.push(channel);
68
+ this.#ws.send(JSON.stringify(payload));
69
+ return channel;
70
+ }
71
+ close() {
72
+ for (const ch of this.#outputQueue) {
73
+ void ch.close();
74
+ }
75
+ this.#outputQueue = [];
76
+ this.#ws.close();
77
+ }
78
+ }
79
+ const defaultLLMOptions = {
80
+ model: "gpt-4.1",
81
+ apiKey: process.env.OPENAI_API_KEY,
82
+ strictToolSchema: true
83
+ };
84
+ class WSLLM extends llm.LLM {
85
+ #opts;
86
+ #pool;
87
+ #prevResponseId = "";
88
+ #prevChatCtx = null;
89
+ #pendingToolCalls = /* @__PURE__ */ new Set();
90
+ /**
91
+ * Create a new instance of the OpenAI Responses API WebSocket LLM.
92
+ *
93
+ * @remarks
94
+ * `apiKey` must be set to your OpenAI API key, either using the argument or
95
+ * by setting the `OPENAI_API_KEY` environment variable.
96
+ *
97
+ * A persistent WebSocket connection to `/v1/responses` is maintained and
98
+ * reused across turns, reducing per-turn continuation overhead for
99
+ * tool-call-heavy workflows.
100
+ */
101
+ constructor(opts = defaultLLMOptions) {
102
+ super();
103
+ this.#opts = { ...defaultLLMOptions, ...opts };
104
+ if (!this.#opts.apiKey) {
105
+ throw new Error("OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY");
106
+ }
107
+ this.#pool = new ConnectionPool({
108
+ maxSessionDuration: WS_MAX_SESSION_DURATION,
109
+ connectCb: async (timeoutMs) => {
110
+ const wsUrl = this.#opts.baseURL ? `${this.#opts.baseURL.replace(/^https?/, "wss").replace(/\/+$/, "")}/responses` : OPENAI_RESPONSES_WS_URL;
111
+ const ws = await connectWs(wsUrl, this.#opts.apiKey, timeoutMs);
112
+ return new ResponsesWebSocket(ws);
113
+ },
114
+ closeCb: async (conn) => {
115
+ conn.close();
116
+ }
117
+ });
118
+ }
119
+ label() {
120
+ return "openai.ws.LLM";
121
+ }
122
+ get model() {
123
+ return this.#opts.model;
124
+ }
125
+ prewarm() {
126
+ this.#pool.prewarm();
127
+ }
128
+ async close() {
129
+ await this.#pool.close();
130
+ }
131
+ async aclose() {
132
+ await this.close();
133
+ }
134
+ /** Called by LLMStream once response.created fires to atomically persist both the
135
+ * response ID and its corresponding chat context for the next turn's diff. */
136
+ _onResponseCreated(responseId, chatCtx) {
137
+ this.#prevResponseId = responseId;
138
+ this.#prevChatCtx = chatCtx;
139
+ }
140
+ _setPendingToolCalls(callIds) {
141
+ this.#pendingToolCalls = callIds;
142
+ }
143
+ chat({
144
+ chatCtx,
145
+ toolCtx,
146
+ connOptions = DEFAULT_API_CONNECT_OPTIONS,
147
+ parallelToolCalls,
148
+ toolChoice,
149
+ extraKwargs
150
+ }) {
151
+ var _a;
152
+ const modelOptions = { ...extraKwargs ?? {} };
153
+ parallelToolCalls = parallelToolCalls !== void 0 ? parallelToolCalls : this.#opts.parallelToolCalls;
154
+ if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== void 0) {
155
+ modelOptions.parallel_tool_calls = parallelToolCalls;
156
+ }
157
+ toolChoice = toolChoice !== void 0 ? toolChoice : this.#opts.toolChoice;
158
+ if (toolChoice) {
159
+ modelOptions.tool_choice = toolChoice;
160
+ }
161
+ if (this.#opts.temperature !== void 0) {
162
+ modelOptions.temperature = this.#opts.temperature;
163
+ }
164
+ if (this.#opts.store !== void 0) {
165
+ modelOptions.store = this.#opts.store;
166
+ }
167
+ if (this.#opts.metadata) {
168
+ modelOptions.metadata = this.#opts.metadata;
169
+ }
170
+ let inputChatCtx = chatCtx;
171
+ let prevResponseId;
172
+ const canUseStoredResponse = modelOptions.store !== false;
173
+ if (canUseStoredResponse && this.#prevChatCtx && this.#prevResponseId) {
174
+ const diff = llm.computeChatCtxDiff(this.#prevChatCtx, chatCtx);
175
+ const lastPrevItemId = ((_a = this.#prevChatCtx.items.at(-1)) == null ? void 0 : _a.id) ?? null;
176
+ if (diff.toRemove.length === 0 && diff.toCreate.length > 0 && diff.toCreate[0][0] === lastPrevItemId) {
177
+ const newItemIds = new Set(diff.toCreate.map(([, id]) => id));
178
+ const newItems = chatCtx.items.filter((item) => newItemIds.has(item.id));
179
+ const pendingToolCallsCompleted = this.#pendingToolCallsCompleted(newItems);
180
+ if (pendingToolCallsCompleted) {
181
+ inputChatCtx = new llm.ChatContext(newItems);
182
+ prevResponseId = this.#prevResponseId;
183
+ }
184
+ }
185
+ }
186
+ return new WSLLMStream(this, {
187
+ pool: this.#pool,
188
+ model: this.#opts.model,
189
+ chatCtx: inputChatCtx,
190
+ fullChatCtx: chatCtx,
191
+ toolCtx,
192
+ connOptions,
193
+ modelOptions,
194
+ prevResponseId,
195
+ strictToolSchema: this.#opts.strictToolSchema ?? true
196
+ });
197
+ }
198
+ #pendingToolCallsCompleted(items) {
199
+ if (this.#pendingToolCalls.size === 0) return true;
200
+ const completedCallIds = new Set(
201
+ items.filter((item) => item.type === "function_call_output").map((item) => item.callId)
202
+ );
203
+ return [...this.#pendingToolCalls].every((callId) => completedCallIds.has(callId));
204
+ }
205
+ }
206
+ class WSLLMStream extends llm.LLMStream {
207
+ #llm;
208
+ #pool;
209
+ #model;
210
+ #modelOptions;
211
+ #strictToolSchema;
212
+ #prevResponseId;
213
+ /** Full chat context — used as fallback when previous_response_id is stale. */
214
+ #fullChatCtx;
215
+ #responseId = "";
216
+ #pendingToolCalls = /* @__PURE__ */ new Set();
217
+ constructor(llm2, {
218
+ pool,
219
+ model,
220
+ chatCtx,
221
+ fullChatCtx,
222
+ toolCtx,
223
+ connOptions,
224
+ modelOptions,
225
+ prevResponseId,
226
+ strictToolSchema
227
+ }) {
228
+ super(llm2, { chatCtx, toolCtx, connOptions });
229
+ this.#llm = llm2;
230
+ this.#pool = pool;
231
+ this.#model = model;
232
+ this.#modelOptions = modelOptions;
233
+ this.#strictToolSchema = strictToolSchema;
234
+ this.#prevResponseId = prevResponseId;
235
+ this.#fullChatCtx = fullChatCtx;
236
+ }
237
+ async run() {
238
+ let retryable = true;
239
+ try {
240
+ await this.#pool.withConnection(async (conn) => {
241
+ const needsRetry = await this.#runWithConn(conn, this.chatCtx, this.#prevResponseId);
242
+ if (needsRetry) {
243
+ retryable = true;
244
+ await this.#runWithConn(conn, this.#fullChatCtx, void 0);
245
+ }
246
+ });
247
+ } catch (error) {
248
+ if (error instanceof APIStatusError || error instanceof APITimeoutError || error instanceof APIConnectionError) {
249
+ throw error;
250
+ }
251
+ throw new APIConnectionError({
252
+ message: toError(error).message,
253
+ options: { retryable }
254
+ });
255
+ }
256
+ }
257
+ /**
258
+ * Execute a single response.create round-trip on the given connection.
259
+ * Returns `true` when the caller should retry with the full chat context
260
+ * (i.e. `previous_response_not_found`), `false` otherwise.
261
+ */
262
+ async #runWithConn(conn, chatCtx, prevResponseId) {
263
+ const messages = await chatCtx.toProviderFormat(
264
+ "openai.responses"
265
+ );
266
+ const tools = this.toolCtx ? Object.entries(this.toolCtx).map(([name, func]) => {
267
+ const oaiParams = {
268
+ type: "function",
269
+ name,
270
+ description: func.description,
271
+ parameters: llm.toJsonSchema(
272
+ func.parameters,
273
+ true,
274
+ this.#strictToolSchema
275
+ )
276
+ };
277
+ if (this.#strictToolSchema) {
278
+ oaiParams.strict = true;
279
+ }
280
+ return oaiParams;
281
+ }) : void 0;
282
+ const requestOptions = { ...this.#modelOptions };
283
+ if (!tools) {
284
+ delete requestOptions.tool_choice;
285
+ }
286
+ const payload = {
287
+ type: "response.create",
288
+ model: this.#model,
289
+ input: messages,
290
+ tools: tools ?? [],
291
+ ...prevResponseId ? { previous_response_id: prevResponseId } : {},
292
+ ...requestOptions
293
+ };
294
+ let channel;
295
+ try {
296
+ channel = conn.sendRequest(payload);
297
+ } catch (error) {
298
+ if (error instanceof APIConnectionError) {
299
+ conn.close();
300
+ this.#pool.invalidate();
301
+ }
302
+ throw error;
303
+ }
304
+ const reader = channel.stream().getReader();
305
+ try {
306
+ while (true) {
307
+ const { done, value: event } = await reader.read();
308
+ if (done) break;
309
+ let chunk;
310
+ switch (event.type) {
311
+ case "error": {
312
+ const retry = this.#handleError(event, conn);
313
+ if (retry) return true;
314
+ break;
315
+ }
316
+ case "response.created":
317
+ this.#handleResponseCreated(event);
318
+ break;
319
+ case "response.output_item.done":
320
+ chunk = this.#handleOutputItemDone(event);
321
+ break;
322
+ case "response.output_text.delta":
323
+ chunk = this.#handleOutputTextDelta(event);
324
+ break;
325
+ case "response.completed":
326
+ chunk = this.#handleResponseCompleted(event);
327
+ break;
328
+ case "response.failed":
329
+ this.#handleResponseFailed(event);
330
+ break;
331
+ default:
332
+ break;
333
+ }
334
+ if (chunk) {
335
+ this.queue.put(chunk);
336
+ }
337
+ }
338
+ } finally {
339
+ reader.releaseLock();
340
+ }
341
+ return false;
342
+ }
343
+ /**
344
+ * Returns `true` when the caller should retry with full context
345
+ * (`previous_response_not_found`), throws for all other errors.
346
+ */
347
+ #handleError(event, conn) {
348
+ var _a, _b, _c;
349
+ const code = (_a = event.error) == null ? void 0 : _a.code;
350
+ if (code === "previous_response_not_found") {
351
+ return true;
352
+ }
353
+ if (code === "websocket_connection_limit_reached" || code === "websocket_closed") {
354
+ conn.close();
355
+ this.#pool.invalidate();
356
+ throw new APIConnectionError({
357
+ message: ((_b = event.error) == null ? void 0 : _b.message) ?? `WebSocket closed (${code})`,
358
+ options: { retryable: true }
359
+ });
360
+ }
361
+ throw new APIStatusError({
362
+ message: ((_c = event.error) == null ? void 0 : _c.message) ?? event.message ?? "Unknown error from OpenAI Responses WS",
363
+ options: {
364
+ statusCode: event.status ?? -1,
365
+ retryable: false
366
+ }
367
+ });
368
+ }
369
+ #handleResponseCreated(event) {
370
+ this.#responseId = event.response.id;
371
+ this.#llm._onResponseCreated(event.response.id, this.#fullChatCtx);
372
+ }
373
+ #handleOutputItemDone(event) {
374
+ if (event.item.type === "function_call") {
375
+ this.#pendingToolCalls.add(event.item.call_id);
376
+ return {
377
+ id: this.#responseId,
378
+ delta: {
379
+ role: "assistant",
380
+ content: void 0,
381
+ toolCalls: [
382
+ llm.FunctionCall.create({
383
+ callId: event.item.call_id,
384
+ name: event.item.name,
385
+ args: event.item.arguments
386
+ })
387
+ ]
388
+ }
389
+ };
390
+ }
391
+ return void 0;
392
+ }
393
+ #handleOutputTextDelta(event) {
394
+ return {
395
+ id: this.#responseId,
396
+ delta: {
397
+ role: "assistant",
398
+ content: event.delta
399
+ }
400
+ };
401
+ }
402
+ #handleResponseCompleted(event) {
403
+ this.#llm._setPendingToolCalls(this.#pendingToolCalls);
404
+ if (event.response.usage) {
405
+ return {
406
+ id: this.#responseId,
407
+ usage: {
408
+ completionTokens: event.response.usage.output_tokens,
409
+ promptTokens: event.response.usage.input_tokens,
410
+ promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,
411
+ totalTokens: event.response.usage.total_tokens
412
+ }
413
+ };
414
+ }
415
+ return void 0;
416
+ }
417
+ #handleResponseFailed(event) {
418
+ var _a, _b;
419
+ throw new APIStatusError({
420
+ message: ((_b = (_a = event.response) == null ? void 0 : _a.error) == null ? void 0 : _b.message) ?? "Response failed",
421
+ options: { statusCode: -1, retryable: false }
422
+ });
423
+ }
424
+ }
425
+ async function connectWs(url, apiKey, timeoutMs) {
426
+ return new Promise((resolve, reject) => {
427
+ const ws = new WebSocket(url, {
428
+ headers: { Authorization: `Bearer ${apiKey}` }
429
+ });
430
+ let settled = false;
431
+ const timer = setTimeout(() => {
432
+ settled = true;
433
+ ws.close();
434
+ reject(
435
+ new APIConnectionError({ message: "Timeout connecting to OpenAI Responses WebSocket" })
436
+ );
437
+ }, timeoutMs);
438
+ ws.once("open", () => {
439
+ if (settled) return;
440
+ settled = true;
441
+ clearTimeout(timer);
442
+ resolve(ws);
443
+ });
444
+ ws.once("error", (err) => {
445
+ if (settled) return;
446
+ settled = true;
447
+ clearTimeout(timer);
448
+ reject(
449
+ new APIConnectionError({
450
+ message: `Error connecting to OpenAI Responses WebSocket: ${err.message}`
451
+ })
452
+ );
453
+ });
454
+ ws.once("close", (code) => {
455
+ if (settled) return;
456
+ settled = true;
457
+ clearTimeout(timer);
458
+ reject(
459
+ new APIConnectionError({
460
+ message: `OpenAI Responses WebSocket closed unexpectedly during connect (code ${code})`
461
+ })
462
+ );
463
+ });
464
+ });
465
+ }
466
+ function getWebSocketStateLabel(readyState) {
467
+ switch (readyState) {
468
+ case WebSocket.CONNECTING:
469
+ return "CONNECTING";
470
+ case WebSocket.OPEN:
471
+ return "OPEN";
472
+ case WebSocket.CLOSING:
473
+ return "CLOSING";
474
+ case WebSocket.CLOSED:
475
+ return "CLOSED";
476
+ default:
477
+ return `UNKNOWN:${readyState}`;
478
+ }
479
+ }
480
+ export {
481
+ ResponsesWebSocket,
482
+ WSLLM,
483
+ WSLLMStream
484
+ };
485
+ //# sourceMappingURL=llm.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/ws/llm.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport type { APIConnectOptions } from '@livekit/agents';\nimport {\n APIConnectionError,\n APIStatusError,\n APITimeoutError,\n ConnectionPool,\n DEFAULT_API_CONNECT_OPTIONS,\n llm,\n stream,\n toError,\n} from '@livekit/agents';\nimport type OpenAI from 'openai';\nimport { WebSocket } from 'ws';\nimport type { ChatModels } from '../models.js';\nimport type {\n WsOutputItemDoneEvent,\n WsOutputTextDeltaEvent,\n WsResponseCompletedEvent,\n WsResponseCreateEvent,\n WsResponseCreatedEvent,\n WsResponseFailedEvent,\n WsServerEvent,\n} from './types.js';\nimport { wsServerEventSchema } from './types.js';\n\nconst OPENAI_RESPONSES_WS_URL = 'wss://api.openai.com/v1/responses';\n\n// OpenAI enforces a 60-minute maximum duration on Responses WebSocket connections.\nconst WS_MAX_SESSION_DURATION = 3_600_000;\n\n// ============================================================================\n// Internal: ResponsesWebSocket\n//\n// Wraps a single raw WebSocket connection. Maintains a FIFO queue of\n// StreamChannels — one per outstanding response.create request — and\n// dispatches every incoming server-event to the front of the queue.\n// A response is terminated (and its channel closed) when the service sends\n// response.completed, response.failed, or error.\n//\n// ============================================================================\n\nexport class ResponsesWebSocket {\n #ws: WebSocket;\n // FIFO queue: the front entry receives validated WsServerEvents for the in-flight response.\n #outputQueue: stream.StreamChannel<WsServerEvent>[] = [];\n\n constructor(ws: WebSocket) {\n this.#ws = ws;\n\n ws.on('message', (data: Buffer) => {\n const current = this.#outputQueue[0];\n if (!current) return;\n\n let raw: unknown;\n try {\n raw = JSON.parse(data.toString());\n } catch {\n return;\n }\n\n // Validate and type-narrow with Zod at write time so readers always\n // receive a fully-typed WsServerEvent.\n const parsed = wsServerEventSchema.safeParse(raw);\n if (!parsed.success) return;\n\n const event = parsed.data;\n void current.write(event);\n\n // Close and dequeue on any terminal event.\n if (\n event.type === 'response.completed' ||\n event.type === 'response.failed' ||\n event.type === 'error'\n ) {\n void current.close();\n this.#outputQueue.shift();\n }\n });\n\n ws.on('close', () => {\n // If the WebSocket closes while requests are still in flight, synthesise\n // a typed error event so all readers can handle it cleanly.\n for (const current of this.#outputQueue) {\n if (!current.closed) {\n const closeError: WsServerEvent = {\n type: 'error',\n error: {\n code: 'websocket_closed',\n message: 'OpenAI Responses WebSocket closed unexpectedly',\n },\n };\n void current.write(closeError).finally(() => current.close());\n }\n }\n this.#outputQueue = [];\n });\n }\n\n /**\n * Send a response.create event. Returns a typed `StreamChannel<WsServerEvent>`\n * that yields validated server events until the response terminates.\n */\n sendRequest(payload: WsResponseCreateEvent): stream.StreamChannel<WsServerEvent> {\n if (this.#ws.readyState !== WebSocket.OPEN) {\n throw new APIConnectionError({\n message: `OpenAI Responses WebSocket is not open (state ${getWebSocketStateLabel(this.#ws.readyState)})`,\n options: { retryable: true },\n });\n }\n\n const channel = stream.createStreamChannel<WsServerEvent>();\n this.#outputQueue.push(channel);\n this.#ws.send(JSON.stringify(payload));\n return channel;\n }\n\n close(): void {\n // Drain pending channels before closing the socket.\n for (const ch of this.#outputQueue) {\n void ch.close();\n }\n this.#outputQueue = [];\n this.#ws.close();\n }\n}\n\n// ============================================================================\n// LLMOptions\n// ============================================================================\n\nexport interface WSLLMOptions {\n model: string | ChatModels;\n apiKey?: string;\n baseURL?: string;\n temperature?: number;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n store?: boolean;\n metadata?: Record<string, string>;\n strictToolSchema?: boolean;\n}\n\nconst defaultLLMOptions: WSLLMOptions = {\n model: 'gpt-4.1',\n apiKey: process.env.OPENAI_API_KEY,\n strictToolSchema: true,\n};\n\n// ============================================================================\n// LLM\n// ============================================================================\n\nexport class WSLLM extends llm.LLM {\n #opts: WSLLMOptions;\n #pool: ConnectionPool<ResponsesWebSocket>;\n #prevResponseId = '';\n #prevChatCtx: llm.ChatContext | null = null;\n #pendingToolCalls = new Set<string>();\n\n /**\n * Create a new instance of the OpenAI Responses API WebSocket LLM.\n *\n * @remarks\n * `apiKey` must be set to your OpenAI API key, either using the argument or\n * by setting the `OPENAI_API_KEY` environment variable.\n *\n * A persistent WebSocket connection to `/v1/responses` is maintained and\n * reused across turns, reducing per-turn continuation overhead for\n * tool-call-heavy workflows.\n */\n constructor(opts: Partial<WSLLMOptions> = defaultLLMOptions) {\n super();\n\n this.#opts = { ...defaultLLMOptions, ...opts };\n if (!this.#opts.apiKey) {\n throw new Error('OpenAI API key is required, whether as an argument or as $OPENAI_API_KEY');\n }\n\n this.#pool = new ConnectionPool<ResponsesWebSocket>({\n maxSessionDuration: WS_MAX_SESSION_DURATION,\n connectCb: async (timeoutMs: number) => {\n const wsUrl = this.#opts.baseURL\n ? `${this.#opts.baseURL.replace(/^https?/, 'wss').replace(/\\/+$/, '')}/responses`\n : OPENAI_RESPONSES_WS_URL;\n const ws = await connectWs(wsUrl, this.#opts.apiKey!, timeoutMs);\n return new ResponsesWebSocket(ws);\n },\n closeCb: async (conn: ResponsesWebSocket) => {\n conn.close();\n },\n });\n }\n\n label(): string {\n return 'openai.ws.LLM';\n }\n\n get model(): string {\n return this.#opts.model;\n }\n\n prewarm(): void {\n this.#pool.prewarm();\n }\n\n async close(): Promise<void> {\n await this.#pool.close();\n }\n\n override async aclose(): Promise<void> {\n await this.close();\n }\n\n /** Called by LLMStream once response.created fires to atomically persist both the\n * response ID and its corresponding chat context for the next turn's diff. */\n _onResponseCreated(responseId: string, chatCtx: llm.ChatContext): void {\n this.#prevResponseId = responseId;\n this.#prevChatCtx = chatCtx;\n }\n\n _setPendingToolCalls(callIds: Set<string>): void {\n this.#pendingToolCalls = callIds;\n }\n\n chat({\n chatCtx,\n toolCtx,\n connOptions = DEFAULT_API_CONNECT_OPTIONS,\n parallelToolCalls,\n toolChoice,\n extraKwargs,\n }: {\n chatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions?: APIConnectOptions;\n parallelToolCalls?: boolean;\n toolChoice?: llm.ToolChoice;\n extraKwargs?: Record<string, unknown>;\n }): WSLLMStream {\n const modelOptions: Record<string, unknown> = { ...(extraKwargs ?? {}) };\n\n parallelToolCalls =\n parallelToolCalls !== undefined ? parallelToolCalls : this.#opts.parallelToolCalls;\n if (toolCtx && Object.keys(toolCtx).length > 0 && parallelToolCalls !== undefined) {\n modelOptions.parallel_tool_calls = parallelToolCalls;\n }\n\n toolChoice =\n toolChoice !== undefined ? toolChoice : (this.#opts.toolChoice as llm.ToolChoice | undefined);\n if (toolChoice) {\n modelOptions.tool_choice = toolChoice;\n }\n\n if (this.#opts.temperature !== undefined) {\n modelOptions.temperature = this.#opts.temperature;\n }\n\n if (this.#opts.store !== undefined) {\n modelOptions.store = this.#opts.store;\n }\n\n if (this.#opts.metadata) {\n modelOptions.metadata = this.#opts.metadata;\n }\n\n let inputChatCtx = chatCtx;\n let prevResponseId: string | undefined;\n const canUseStoredResponse = modelOptions.store !== false;\n\n if (canUseStoredResponse && this.#prevChatCtx && this.#prevResponseId) {\n const diff = llm.computeChatCtxDiff(this.#prevChatCtx, chatCtx);\n const lastPrevItemId = this.#prevChatCtx.items.at(-1)?.id ?? null;\n\n if (\n diff.toRemove.length === 0 &&\n diff.toCreate.length > 0 &&\n diff.toCreate[0]![0] === lastPrevItemId\n ) {\n // All new items are appended after the tail of the previous context —\n // safe to send only the incremental input with previous_response_id,\n // but only if all pending tool calls from the previous response have\n // their corresponding function_call_output in the new items.\n const newItemIds = new Set(diff.toCreate.map(([, id]) => id));\n const newItems = chatCtx.items.filter((item: llm.ChatItem) => newItemIds.has(item.id));\n const pendingToolCallsCompleted = this.#pendingToolCallsCompleted(newItems);\n if (pendingToolCallsCompleted) {\n inputChatCtx = new llm.ChatContext(newItems);\n prevResponseId = this.#prevResponseId;\n }\n }\n // Otherwise: items were removed or inserted mid-history — fall back to\n // sending the full context with no previous_response_id.\n }\n\n return new WSLLMStream(this, {\n pool: this.#pool,\n model: this.#opts.model,\n chatCtx: inputChatCtx,\n fullChatCtx: chatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n prevResponseId,\n strictToolSchema: this.#opts.strictToolSchema ?? true,\n });\n }\n\n #pendingToolCallsCompleted(items: llm.ChatItem[]): boolean {\n if (this.#pendingToolCalls.size === 0) return true;\n const completedCallIds = new Set(\n items\n .filter((item): item is llm.FunctionCallOutput => item.type === 'function_call_output')\n .map((item) => item.callId),\n );\n return [...this.#pendingToolCalls].every((callId) => completedCallIds.has(callId));\n }\n}\n\n// ============================================================================\n// WsLLMStream\n// ============================================================================\n\nexport class WSLLMStream extends llm.LLMStream {\n #llm: WSLLM;\n #pool: ConnectionPool<ResponsesWebSocket>;\n #model: string | ChatModels;\n #modelOptions: Record<string, unknown>;\n #strictToolSchema: boolean;\n #prevResponseId?: string;\n /** Full chat context — used as fallback when previous_response_id is stale. */\n #fullChatCtx: llm.ChatContext;\n #responseId = '';\n #pendingToolCalls = new Set<string>();\n\n constructor(\n llm: WSLLM,\n {\n pool,\n model,\n chatCtx,\n fullChatCtx,\n toolCtx,\n connOptions,\n modelOptions,\n prevResponseId,\n strictToolSchema,\n }: {\n pool: ConnectionPool<ResponsesWebSocket>;\n model: string | ChatModels;\n chatCtx: llm.ChatContext;\n fullChatCtx: llm.ChatContext;\n toolCtx?: llm.ToolContext;\n connOptions: APIConnectOptions;\n modelOptions: Record<string, unknown>;\n prevResponseId?: string;\n strictToolSchema: boolean;\n },\n ) {\n super(llm, { chatCtx, toolCtx, connOptions });\n this.#llm = llm;\n this.#pool = pool;\n this.#model = model;\n this.#modelOptions = modelOptions;\n this.#strictToolSchema = strictToolSchema;\n this.#prevResponseId = prevResponseId;\n this.#fullChatCtx = fullChatCtx;\n }\n\n protected async run(): Promise<void> {\n let retryable = true;\n\n try {\n await this.#pool.withConnection(async (conn: ResponsesWebSocket) => {\n const needsRetry = await this.#runWithConn(conn, this.chatCtx, this.#prevResponseId);\n\n if (needsRetry) {\n // previous_response_id was evicted from the server-side cache.\n // Retry once on the same connection with the full context and no ID.\n retryable = true;\n await this.#runWithConn(conn, this.#fullChatCtx, undefined);\n }\n });\n } catch (error) {\n if (\n error instanceof APIStatusError ||\n error instanceof APITimeoutError ||\n error instanceof APIConnectionError\n ) {\n throw error;\n }\n throw new APIConnectionError({\n message: toError(error).message,\n options: { retryable },\n });\n }\n }\n\n /**\n * Execute a single response.create round-trip on the given connection.\n * Returns `true` when the caller should retry with the full chat context\n * (i.e. `previous_response_not_found`), `false` otherwise.\n */\n async #runWithConn(\n conn: ResponsesWebSocket,\n chatCtx: llm.ChatContext,\n prevResponseId: string | undefined,\n ): Promise<boolean> {\n const messages = (await chatCtx.toProviderFormat(\n 'openai.responses',\n )) as OpenAI.Responses.ResponseInputItem[];\n\n const tools = this.toolCtx\n ? Object.entries(this.toolCtx).map(([name, func]) => {\n const oaiParams = {\n type: 'function' as const,\n name,\n description: func.description,\n parameters: llm.toJsonSchema(\n func.parameters,\n true,\n this.#strictToolSchema,\n ) as unknown as OpenAI.Responses.FunctionTool['parameters'],\n } as OpenAI.Responses.FunctionTool;\n\n if (this.#strictToolSchema) {\n oaiParams.strict = true;\n }\n\n return oaiParams;\n })\n : undefined;\n\n const requestOptions: Record<string, unknown> = { ...this.#modelOptions };\n if (!tools) {\n delete requestOptions.tool_choice;\n }\n\n const payload: WsResponseCreateEvent = {\n type: 'response.create',\n model: this.#model as string,\n input: messages as unknown[],\n tools: (tools ?? []) as unknown[],\n ...(prevResponseId ? { previous_response_id: prevResponseId } : {}),\n ...requestOptions,\n };\n\n let channel: stream.StreamChannel<WsServerEvent>;\n try {\n channel = conn.sendRequest(payload);\n } catch (error) {\n if (error instanceof APIConnectionError) {\n conn.close();\n this.#pool.invalidate();\n }\n throw error;\n }\n const reader = channel.stream().getReader();\n\n // Events are already Zod-validated by ResponsesWebSocket before being\n // written to the channel, so no re-parsing is needed here.\n try {\n while (true) {\n const { done, value: event } = await reader.read();\n if (done) break;\n\n let chunk: llm.ChatChunk | undefined;\n\n switch (event.type) {\n case 'error': {\n const retry = this.#handleError(event, conn);\n if (retry) return true;\n break;\n }\n case 'response.created':\n this.#handleResponseCreated(event);\n break;\n case 'response.output_item.done':\n chunk = this.#handleOutputItemDone(event);\n break;\n case 'response.output_text.delta':\n chunk = this.#handleOutputTextDelta(event);\n break;\n case 'response.completed':\n chunk = this.#handleResponseCompleted(event);\n break;\n case 'response.failed':\n this.#handleResponseFailed(event);\n break;\n default:\n break;\n }\n\n if (chunk) {\n this.queue.put(chunk);\n }\n }\n } finally {\n reader.releaseLock();\n }\n\n return false;\n }\n\n /**\n * Returns `true` when the caller should retry with full context\n * (`previous_response_not_found`), throws for all other errors.\n */\n #handleError(event: WsServerEvent & { type: 'error' }, conn: ResponsesWebSocket): boolean {\n const code = event.error?.code;\n\n if (code === 'previous_response_not_found') {\n // The server-side in-memory cache was evicted (e.g. after a failed turn\n // or reconnect). Signal the caller to retry with the full context.\n return true;\n }\n\n if (code === 'websocket_connection_limit_reached' || code === 'websocket_closed') {\n // Transient connection issue (timeout, network drop, or 60-min limit).\n // Evict this connection so the pool opens a fresh one on retry.\n conn.close();\n this.#pool.invalidate();\n throw new APIConnectionError({\n message: event.error?.message ?? `WebSocket closed (${code})`,\n options: { retryable: true },\n });\n }\n\n throw new APIStatusError({\n message: event.error?.message ?? event.message ?? 'Unknown error from OpenAI Responses WS',\n options: {\n statusCode: event.status ?? -1,\n retryable: false,\n },\n });\n }\n\n #handleResponseCreated(event: WsResponseCreatedEvent): void {\n this.#responseId = event.response.id;\n this.#llm._onResponseCreated(event.response.id, this.#fullChatCtx);\n }\n\n #handleOutputItemDone(event: WsOutputItemDoneEvent): llm.ChatChunk | undefined {\n if (event.item.type === 'function_call') {\n this.#pendingToolCalls.add(event.item.call_id);\n return {\n id: this.#responseId,\n delta: {\n role: 'assistant',\n content: undefined,\n toolCalls: [\n llm.FunctionCall.create({\n callId: event.item.call_id,\n name: event.item.name,\n args: event.item.arguments,\n }),\n ],\n },\n };\n }\n return undefined;\n }\n\n #handleOutputTextDelta(event: WsOutputTextDeltaEvent): llm.ChatChunk {\n return {\n id: this.#responseId,\n delta: {\n role: 'assistant',\n content: event.delta,\n },\n };\n }\n\n #handleResponseCompleted(event: WsResponseCompletedEvent): llm.ChatChunk | undefined {\n this.#llm._setPendingToolCalls(this.#pendingToolCalls);\n\n if (event.response.usage) {\n return {\n id: this.#responseId,\n usage: {\n completionTokens: event.response.usage.output_tokens,\n promptTokens: event.response.usage.input_tokens,\n promptCachedTokens: event.response.usage.input_tokens_details.cached_tokens,\n totalTokens: event.response.usage.total_tokens,\n },\n };\n }\n return undefined;\n }\n\n #handleResponseFailed(event: WsResponseFailedEvent): void {\n throw new APIStatusError({\n message: event.response?.error?.message ?? 'Response failed',\n options: { statusCode: -1, retryable: false },\n });\n }\n}\n\n// ============================================================================\n// Internal helpers\n// ============================================================================\n\nasync function connectWs(url: string, apiKey: string, timeoutMs: number): Promise<WebSocket> {\n return new Promise<WebSocket>((resolve, reject) => {\n const ws = new WebSocket(url, {\n headers: { Authorization: `Bearer ${apiKey}` },\n });\n\n let settled = false;\n\n const timer = setTimeout(() => {\n settled = true;\n ws.close();\n reject(\n new APIConnectionError({ message: 'Timeout connecting to OpenAI Responses WebSocket' }),\n );\n }, timeoutMs);\n\n ws.once('open', () => {\n if (settled) return;\n settled = true;\n clearTimeout(timer);\n resolve(ws);\n });\n\n ws.once('error', (err) => {\n if (settled) return;\n settled = true;\n clearTimeout(timer);\n reject(\n new APIConnectionError({\n message: `Error connecting to OpenAI Responses WebSocket: ${err.message}`,\n }),\n );\n });\n\n ws.once('close', (code) => {\n if (settled) return;\n settled = true;\n clearTimeout(timer);\n reject(\n new APIConnectionError({\n message: `OpenAI Responses WebSocket closed unexpectedly during connect (code ${code})`,\n }),\n );\n });\n });\n}\n\nfunction getWebSocketStateLabel(readyState: number): string {\n switch (readyState) {\n case WebSocket.CONNECTING:\n return 'CONNECTING';\n case WebSocket.OPEN:\n return 'OPEN';\n case WebSocket.CLOSING:\n return 'CLOSING';\n case WebSocket.CLOSED:\n return 'CLOSED';\n default:\n return `UNKNOWN:${readyState}`;\n }\n}\n"],"mappings":"AAIA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AAEP,SAAS,iBAAiB;AAW1B,SAAS,2BAA2B;AAEpC,MAAM,0BAA0B;AAGhC,MAAM,0BAA0B;AAazB,MAAM,mBAAmB;AAAA,EAC9B;AAAA;AAAA,EAEA,eAAsD,CAAC;AAAA,EAEvD,YAAY,IAAe;AACzB,SAAK,MAAM;AAEX,OAAG,GAAG,WAAW,CAAC,SAAiB;AACjC,YAAM,UAAU,KAAK,aAAa,CAAC;AACnC,UAAI,CAAC,QAAS;AAEd,UAAI;AACJ,UAAI;AACF,cAAM,KAAK,MAAM,KAAK,SAAS,CAAC;AAAA,MAClC,QAAQ;AACN;AAAA,MACF;AAIA,YAAM,SAAS,oBAAoB,UAAU,GAAG;AAChD,UAAI,CAAC,OAAO,QAAS;AAErB,YAAM,QAAQ,OAAO;AACrB,WAAK,QAAQ,MAAM,KAAK;AAGxB,UACE,MAAM,SAAS,wBACf,MAAM,SAAS,qBACf,MAAM,SAAS,SACf;AACA,aAAK,QAAQ,MAAM;AACnB,aAAK,aAAa,MAAM;AAAA,MAC1B;AAAA,IACF,CAAC;AAED,OAAG,GAAG,SAAS,MAAM;AAGnB,iBAAW,WAAW,KAAK,cAAc;AACvC,YAAI,CAAC,QAAQ,QAAQ;AACnB,gBAAM,aAA4B;AAAA,YAChC,MAAM;AAAA,YACN,OAAO;AAAA,cACL,MAAM;AAAA,cACN,SAAS;AAAA,YACX;AAAA,UACF;AACA,eAAK,QAAQ,MAAM,UAAU,EAAE,QAAQ,MAAM,QAAQ,MAAM,CAAC;AAAA,QAC9D;AAAA,MACF;AACA,WAAK,eAAe,CAAC;AAAA,IACvB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,YAAY,SAAqE;AAC/E,QAAI,KAAK,IAAI,eAAe,UAAU,MAAM;AAC1C,YAAM,IAAI,mBAAmB;AAAA,QAC3B,SAAS,iDAAiD,uBAAuB,KAAK,IAAI,UAAU,CAAC;AAAA,QACrG,SAAS,EAAE,WAAW,KAAK;AAAA,MAC7B,CAAC;AAAA,IACH;AAEA,UAAM,UAAU,OAAO,oBAAmC;AAC1D,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,IAAI,KAAK,KAAK,UAAU,OAAO,CAAC;AACrC,WAAO;AAAA,EACT;AAAA,EAEA,QAAc;AAEZ,eAAW,MAAM,KAAK,cAAc;AAClC,WAAK,GAAG,MAAM;AAAA,IAChB;AACA,SAAK,eAAe,CAAC;AACrB,SAAK,IAAI,MAAM;AAAA,EACjB;AACF;AAkBA,MAAM,oBAAkC;AAAA,EACtC,OAAO;AAAA,EACP,QAAQ,QAAQ,IAAI;AAAA,EACpB,kBAAkB;AACpB;AAMO,MAAM,cAAc,IAAI,IAAI;AAAA,EACjC;AAAA,EACA;AAAA,EACA,kBAAkB;AAAA,EAClB,eAAuC;AAAA,EACvC,oBAAoB,oBAAI,IAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAapC,YAAY,OAA8B,mBAAmB;AAC3D,UAAM;AAEN,SAAK,QAAQ,EAAE,GAAG,mBAAmB,GAAG,KAAK;AAC7C,QAAI,CAAC,KAAK,MAAM,QAAQ;AACtB,YAAM,IAAI,MAAM,0EAA0E;AAAA,IAC5F;AAEA,SAAK,QAAQ,IAAI,eAAmC;AAAA,MAClD,oBAAoB;AAAA,MACpB,WAAW,OAAO,cAAsB;AACtC,cAAM,QAAQ,KAAK,MAAM,UACrB,GAAG,KAAK,MAAM,QAAQ,QAAQ,WAAW,KAAK,EAAE,QAAQ,QAAQ,EAAE,CAAC,eACnE;AACJ,cAAM,KAAK,MAAM,UAAU,OAAO,KAAK,MAAM,QAAS,SAAS;AAC/D,eAAO,IAAI,mBAAmB,EAAE;AAAA,MAClC;AAAA,MACA,SAAS,OAAO,SAA6B;AAC3C,aAAK,MAAM;AAAA,MACb;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,QAAgB;AACd,WAAO;AAAA,EACT;AAAA,EAEA,IAAI,QAAgB;AAClB,WAAO,KAAK,MAAM;AAAA,EACpB;AAAA,EAEA,UAAgB;AACd,SAAK,MAAM,QAAQ;AAAA,EACrB;AAAA,EAEA,MAAM,QAAuB;AAC3B,UAAM,KAAK,MAAM,MAAM;AAAA,EACzB;AAAA,EAEA,MAAe,SAAwB;AACrC,UAAM,KAAK,MAAM;AAAA,EACnB;AAAA;AAAA;AAAA,EAIA,mBAAmB,YAAoB,SAAgC;AACrE,SAAK,kBAAkB;AACvB,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,qBAAqB,SAA4B;AAC/C,SAAK,oBAAoB;AAAA,EAC3B;AAAA,EAEA,KAAK;AAAA,IACH;AAAA,IACA;AAAA,IACA,cAAc;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAOgB;AAjPlB;AAkPI,UAAM,eAAwC,EAAE,GAAI,eAAe,CAAC,EAAG;AAEvE,wBACE,sBAAsB,SAAY,oBAAoB,KAAK,MAAM;AACnE,QAAI,WAAW,OAAO,KAAK,OAAO,EAAE,SAAS,KAAK,sBAAsB,QAAW;AACjF,mBAAa,sBAAsB;AAAA,IACrC;AAEA,iBACE,eAAe,SAAY,aAAc,KAAK,MAAM;AACtD,QAAI,YAAY;AACd,mBAAa,cAAc;AAAA,IAC7B;AAEA,QAAI,KAAK,MAAM,gBAAgB,QAAW;AACxC,mBAAa,cAAc,KAAK,MAAM;AAAA,IACxC;AAEA,QAAI,KAAK,MAAM,UAAU,QAAW;AAClC,mBAAa,QAAQ,KAAK,MAAM;AAAA,IAClC;AAEA,QAAI,KAAK,MAAM,UAAU;AACvB,mBAAa,WAAW,KAAK,MAAM;AAAA,IACrC;AAEA,QAAI,eAAe;AACnB,QAAI;AACJ,UAAM,uBAAuB,aAAa,UAAU;AAEpD,QAAI,wBAAwB,KAAK,gBAAgB,KAAK,iBAAiB;AACrE,YAAM,OAAO,IAAI,mBAAmB,KAAK,cAAc,OAAO;AAC9D,YAAM,mBAAiB,UAAK,aAAa,MAAM,GAAG,EAAE,MAA7B,mBAAgC,OAAM;AAE7D,UACE,KAAK,SAAS,WAAW,KACzB,KAAK,SAAS,SAAS,KACvB,KAAK,SAAS,CAAC,EAAG,CAAC,MAAM,gBACzB;AAKA,cAAM,aAAa,IAAI,IAAI,KAAK,SAAS,IAAI,CAAC,CAAC,EAAE,EAAE,MAAM,EAAE,CAAC;AAC5D,cAAM,WAAW,QAAQ,MAAM,OAAO,CAAC,SAAuB,WAAW,IAAI,KAAK,EAAE,CAAC;AACrF,cAAM,4BAA4B,KAAK,2BAA2B,QAAQ;AAC1E,YAAI,2BAA2B;AAC7B,yBAAe,IAAI,IAAI,YAAY,QAAQ;AAC3C,2BAAiB,KAAK;AAAA,QACxB;AAAA,MACF;AAAA,IAGF;AAEA,WAAO,IAAI,YAAY,MAAM;AAAA,MAC3B,MAAM,KAAK;AAAA,MACX,OAAO,KAAK,MAAM;AAAA,MAClB,SAAS;AAAA,MACT,aAAa;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,kBAAkB,KAAK,MAAM,oBAAoB;AAAA,IACnD,CAAC;AAAA,EACH;AAAA,EAEA,2BAA2B,OAAgC;AACzD,QAAI,KAAK,kBAAkB,SAAS,EAAG,QAAO;AAC9C,UAAM,mBAAmB,IAAI;AAAA,MAC3B,MACG,OAAO,CAAC,SAAyC,KAAK,SAAS,sBAAsB,EACrF,IAAI,CAAC,SAAS,KAAK,MAAM;AAAA,IAC9B;AACA,WAAO,CAAC,GAAG,KAAK,iBAAiB,EAAE,MAAM,CAAC,WAAW,iBAAiB,IAAI,MAAM,CAAC;AAAA,EACnF;AACF;AAMO,MAAM,oBAAoB,IAAI,UAAU;AAAA,EAC7C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAEA;AAAA,EACA,cAAc;AAAA,EACd,oBAAoB,oBAAI,IAAY;AAAA,EAEpC,YACEA,MACA;AAAA,IACE;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAWA;AACA,UAAMA,MAAK,EAAE,SAAS,SAAS,YAAY,CAAC;AAC5C,SAAK,OAAOA;AACZ,SAAK,QAAQ;AACb,SAAK,SAAS;AACd,SAAK,gBAAgB;AACrB,SAAK,oBAAoB;AACzB,SAAK,kBAAkB;AACvB,SAAK,eAAe;AAAA,EACtB;AAAA,EAEA,MAAgB,MAAqB;AACnC,QAAI,YAAY;AAEhB,QAAI;AACF,YAAM,KAAK,MAAM,eAAe,OAAO,SAA6B;AAClE,cAAM,aAAa,MAAM,KAAK,aAAa,MAAM,KAAK,SAAS,KAAK,eAAe;AAEnF,YAAI,YAAY;AAGd,sBAAY;AACZ,gBAAM,KAAK,aAAa,MAAM,KAAK,cAAc,MAAS;AAAA,QAC5D;AAAA,MACF,CAAC;AAAA,IACH,SAAS,OAAO;AACd,UACE,iBAAiB,kBACjB,iBAAiB,mBACjB,iBAAiB,oBACjB;AACA,cAAM;AAAA,MACR;AACA,YAAM,IAAI,mBAAmB;AAAA,QAC3B,SAAS,QAAQ,KAAK,EAAE;AAAA,QACxB,SAAS,EAAE,UAAU;AAAA,MACvB,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,aACJ,MACA,SACA,gBACkB;AAClB,UAAM,WAAY,MAAM,QAAQ;AAAA,MAC9B;AAAA,IACF;AAEA,UAAM,QAAQ,KAAK,UACf,OAAO,QAAQ,KAAK,OAAO,EAAE,IAAI,CAAC,CAAC,MAAM,IAAI,MAAM;AACjD,YAAM,YAAY;AAAA,QAChB,MAAM;AAAA,QACN;AAAA,QACA,aAAa,KAAK;AAAA,QAClB,YAAY,IAAI;AAAA,UACd,KAAK;AAAA,UACL;AAAA,UACA,KAAK;AAAA,QACP;AAAA,MACF;AAEA,UAAI,KAAK,mBAAmB;AAC1B,kBAAU,SAAS;AAAA,MACrB;AAEA,aAAO;AAAA,IACT,CAAC,IACD;AAEJ,UAAM,iBAA0C,EAAE,GAAG,KAAK,cAAc;AACxE,QAAI,CAAC,OAAO;AACV,aAAO,eAAe;AAAA,IACxB;AAEA,UAAM,UAAiC;AAAA,MACrC,MAAM;AAAA,MACN,OAAO,KAAK;AAAA,MACZ,OAAO;AAAA,MACP,OAAQ,SAAS,CAAC;AAAA,MAClB,GAAI,iBAAiB,EAAE,sBAAsB,eAAe,IAAI,CAAC;AAAA,MACjE,GAAG;AAAA,IACL;AAEA,QAAI;AACJ,QAAI;AACF,gBAAU,KAAK,YAAY,OAAO;AAAA,IACpC,SAAS,OAAO;AACd,UAAI,iBAAiB,oBAAoB;AACvC,aAAK,MAAM;AACX,aAAK,MAAM,WAAW;AAAA,MACxB;AACA,YAAM;AAAA,IACR;AACA,UAAM,SAAS,QAAQ,OAAO,EAAE,UAAU;AAI1C,QAAI;AACF,aAAO,MAAM;AACX,cAAM,EAAE,MAAM,OAAO,MAAM,IAAI,MAAM,OAAO,KAAK;AACjD,YAAI,KAAM;AAEV,YAAI;AAEJ,gBAAQ,MAAM,MAAM;AAAA,UAClB,KAAK,SAAS;AACZ,kBAAM,QAAQ,KAAK,aAAa,OAAO,IAAI;AAC3C,gBAAI,MAAO,QAAO;AAClB;AAAA,UACF;AAAA,UACA,KAAK;AACH,iBAAK,uBAAuB,KAAK;AACjC;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,sBAAsB,KAAK;AACxC;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,uBAAuB,KAAK;AACzC;AAAA,UACF,KAAK;AACH,oBAAQ,KAAK,yBAAyB,KAAK;AAC3C;AAAA,UACF,KAAK;AACH,iBAAK,sBAAsB,KAAK;AAChC;AAAA,UACF;AACE;AAAA,QACJ;AAEA,YAAI,OAAO;AACT,eAAK,MAAM,IAAI,KAAK;AAAA,QACtB;AAAA,MACF;AAAA,IACF,UAAE;AACA,aAAO,YAAY;AAAA,IACrB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,aAAa,OAA0C,MAAmC;AA9f5F;AA+fI,UAAM,QAAO,WAAM,UAAN,mBAAa;AAE1B,QAAI,SAAS,+BAA+B;AAG1C,aAAO;AAAA,IACT;AAEA,QAAI,SAAS,wCAAwC,SAAS,oBAAoB;AAGhF,WAAK,MAAM;AACX,WAAK,MAAM,WAAW;AACtB,YAAM,IAAI,mBAAmB;AAAA,QAC3B,WAAS,WAAM,UAAN,mBAAa,YAAW,qBAAqB,IAAI;AAAA,QAC1D,SAAS,EAAE,WAAW,KAAK;AAAA,MAC7B,CAAC;AAAA,IACH;AAEA,UAAM,IAAI,eAAe;AAAA,MACvB,WAAS,WAAM,UAAN,mBAAa,YAAW,MAAM,WAAW;AAAA,MAClD,SAAS;AAAA,QACP,YAAY,MAAM,UAAU;AAAA,QAC5B,WAAW;AAAA,MACb;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,uBAAuB,OAAqC;AAC1D,SAAK,cAAc,MAAM,SAAS;AAClC,SAAK,KAAK,mBAAmB,MAAM,SAAS,IAAI,KAAK,YAAY;AAAA,EACnE;AAAA,EAEA,sBAAsB,OAAyD;AAC7E,QAAI,MAAM,KAAK,SAAS,iBAAiB;AACvC,WAAK,kBAAkB,IAAI,MAAM,KAAK,OAAO;AAC7C,aAAO;AAAA,QACL,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS;AAAA,UACT,WAAW;AAAA,YACT,IAAI,aAAa,OAAO;AAAA,cACtB,QAAQ,MAAM,KAAK;AAAA,cACnB,MAAM,MAAM,KAAK;AAAA,cACjB,MAAM,MAAM,KAAK;AAAA,YACnB,CAAC;AAAA,UACH;AAAA,QACF;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,uBAAuB,OAA8C;AACnE,WAAO;AAAA,MACL,IAAI,KAAK;AAAA,MACT,OAAO;AAAA,QACL,MAAM;AAAA,QACN,SAAS,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA,EAEA,yBAAyB,OAA4D;AACnF,SAAK,KAAK,qBAAqB,KAAK,iBAAiB;AAErD,QAAI,MAAM,SAAS,OAAO;AACxB,aAAO;AAAA,QACL,IAAI,KAAK;AAAA,QACT,OAAO;AAAA,UACL,kBAAkB,MAAM,SAAS,MAAM;AAAA,UACvC,cAAc,MAAM,SAAS,MAAM;AAAA,UACnC,oBAAoB,MAAM,SAAS,MAAM,qBAAqB;AAAA,UAC9D,aAAa,MAAM,SAAS,MAAM;AAAA,QACpC;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,sBAAsB,OAAoC;AAhlB5D;AAilBI,UAAM,IAAI,eAAe;AAAA,MACvB,WAAS,iBAAM,aAAN,mBAAgB,UAAhB,mBAAuB,YAAW;AAAA,MAC3C,SAAS,EAAE,YAAY,IAAI,WAAW,MAAM;AAAA,IAC9C,CAAC;AAAA,EACH;AACF;AAMA,eAAe,UAAU,KAAa,QAAgB,WAAuC;AAC3F,SAAO,IAAI,QAAmB,CAAC,SAAS,WAAW;AACjD,UAAM,KAAK,IAAI,UAAU,KAAK;AAAA,MAC5B,SAAS,EAAE,eAAe,UAAU,MAAM,GAAG;AAAA,IAC/C,CAAC;AAED,QAAI,UAAU;AAEd,UAAM,QAAQ,WAAW,MAAM;AAC7B,gBAAU;AACV,SAAG,MAAM;AACT;AAAA,QACE,IAAI,mBAAmB,EAAE,SAAS,mDAAmD,CAAC;AAAA,MACxF;AAAA,IACF,GAAG,SAAS;AAEZ,OAAG,KAAK,QAAQ,MAAM;AACpB,UAAI,QAAS;AACb,gBAAU;AACV,mBAAa,KAAK;AAClB,cAAQ,EAAE;AAAA,IACZ,CAAC;AAED,OAAG,KAAK,SAAS,CAAC,QAAQ;AACxB,UAAI,QAAS;AACb,gBAAU;AACV,mBAAa,KAAK;AAClB;AAAA,QACE,IAAI,mBAAmB;AAAA,UACrB,SAAS,mDAAmD,IAAI,OAAO;AAAA,QACzE,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAED,OAAG,KAAK,SAAS,CAAC,SAAS;AACzB,UAAI,QAAS;AACb,gBAAU;AACV,mBAAa,KAAK;AAClB;AAAA,QACE,IAAI,mBAAmB;AAAA,UACrB,SAAS,uEAAuE,IAAI;AAAA,QACtF,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAAA,EACH,CAAC;AACH;AAEA,SAAS,uBAAuB,YAA4B;AAC1D,UAAQ,YAAY;AAAA,IAClB,KAAK,UAAU;AACb,aAAO;AAAA,IACT,KAAK,UAAU;AACb,aAAO;AAAA,IACT,KAAK,UAAU;AACb,aAAO;AAAA,IACT,KAAK,UAAU;AACb,aAAO;AAAA,IACT;AACE,aAAO,WAAW,UAAU;AAAA,EAChC;AACF;","names":["llm"]}
@@ -0,0 +1,26 @@
1
+ "use strict";
2
+ var import_agents = require("@livekit/agents");
3
+ var import_agents_plugins_test = require("@livekit/agents-plugins-test");
4
+ var import_vitest = require("vitest");
5
+ var import_llm = require("../responses/llm.cjs");
6
+ (0, import_agents.initializeLogger)({ level: "silent", pretty: false });
7
+ (0, import_vitest.describe)("OpenAI Responses WS wrapper", async () => {
8
+ await (0, import_agents_plugins_test.llm)(
9
+ new import_llm.LLM({
10
+ temperature: 0,
11
+ strictToolSchema: false,
12
+ useWebSocket: true
13
+ }),
14
+ true
15
+ );
16
+ });
17
+ (0, import_vitest.describe)("OpenAI Responses WS wrapper strict tool schema", async () => {
18
+ await (0, import_agents_plugins_test.llmStrict)(
19
+ new import_llm.LLM({
20
+ temperature: 0,
21
+ strictToolSchema: true,
22
+ useWebSocket: true
23
+ })
24
+ );
25
+ });
26
+ //# sourceMappingURL=llm.test.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/ws/llm.test.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2025 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { initializeLogger } from '@livekit/agents';\nimport { llm, llmStrict } from '@livekit/agents-plugins-test';\nimport { describe } from 'vitest';\nimport { LLM } from '../responses/llm.js';\n\ninitializeLogger({ level: 'silent', pretty: false });\n\ndescribe('OpenAI Responses WS wrapper', async () => {\n await llm(\n new LLM({\n temperature: 0,\n strictToolSchema: false,\n useWebSocket: true,\n }),\n true,\n );\n});\n\ndescribe('OpenAI Responses WS wrapper strict tool schema', async () => {\n await llmStrict(\n new LLM({\n temperature: 0,\n strictToolSchema: true,\n useWebSocket: true,\n }),\n );\n});\n"],"mappings":";AAGA,oBAAiC;AACjC,iCAA+B;AAC/B,oBAAyB;AACzB,iBAAoB;AAAA,IAEpB,gCAAiB,EAAE,OAAO,UAAU,QAAQ,MAAM,CAAC;AAAA,IAEnD,wBAAS,+BAA+B,YAAY;AAClD,YAAM;AAAA,IACJ,IAAI,eAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,MAClB,cAAc;AAAA,IAChB,CAAC;AAAA,IACD;AAAA,EACF;AACF,CAAC;AAAA,IAED,wBAAS,kDAAkD,YAAY;AACrE,YAAM;AAAA,IACJ,IAAI,eAAI;AAAA,MACN,aAAa;AAAA,MACb,kBAAkB;AAAA,MAClB,cAAc;AAAA,IAChB,CAAC;AAAA,EACH;AACF,CAAC;","names":[]}
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=llm.test.d.ts.map
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=llm.test.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"llm.test.d.ts","sourceRoot":"","sources":["../../src/ws/llm.test.ts"],"names":[],"mappings":""}