@lenylvt/pi-ai 0.64.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. package/README.md +203 -0
  2. package/dist/api-registry.d.ts +20 -0
  3. package/dist/api-registry.d.ts.map +1 -0
  4. package/dist/api-registry.js +44 -0
  5. package/dist/api-registry.js.map +1 -0
  6. package/dist/cli.d.ts +3 -0
  7. package/dist/cli.d.ts.map +1 -0
  8. package/dist/cli.js +119 -0
  9. package/dist/cli.js.map +1 -0
  10. package/dist/env-api-keys.d.ts +7 -0
  11. package/dist/env-api-keys.d.ts.map +1 -0
  12. package/dist/env-api-keys.js +13 -0
  13. package/dist/env-api-keys.js.map +1 -0
  14. package/dist/index.d.ts +20 -0
  15. package/dist/index.d.ts.map +1 -0
  16. package/dist/index.js +14 -0
  17. package/dist/index.js.map +1 -0
  18. package/dist/models.d.ts +24 -0
  19. package/dist/models.d.ts.map +1 -0
  20. package/dist/models.generated.d.ts +2332 -0
  21. package/dist/models.generated.d.ts.map +1 -0
  22. package/dist/models.generated.js +2186 -0
  23. package/dist/models.generated.js.map +1 -0
  24. package/dist/models.js +60 -0
  25. package/dist/models.js.map +1 -0
  26. package/dist/oauth.d.ts +2 -0
  27. package/dist/oauth.d.ts.map +1 -0
  28. package/dist/oauth.js +2 -0
  29. package/dist/oauth.js.map +1 -0
  30. package/dist/providers/anthropic.d.ts +40 -0
  31. package/dist/providers/anthropic.d.ts.map +1 -0
  32. package/dist/providers/anthropic.js +749 -0
  33. package/dist/providers/anthropic.js.map +1 -0
  34. package/dist/providers/faux.d.ts +56 -0
  35. package/dist/providers/faux.d.ts.map +1 -0
  36. package/dist/providers/faux.js +367 -0
  37. package/dist/providers/faux.js.map +1 -0
  38. package/dist/providers/github-copilot-headers.d.ts +8 -0
  39. package/dist/providers/github-copilot-headers.d.ts.map +1 -0
  40. package/dist/providers/github-copilot-headers.js +29 -0
  41. package/dist/providers/github-copilot-headers.js.map +1 -0
  42. package/dist/providers/openai-codex-responses.d.ts +9 -0
  43. package/dist/providers/openai-codex-responses.d.ts.map +1 -0
  44. package/dist/providers/openai-codex-responses.js +741 -0
  45. package/dist/providers/openai-codex-responses.js.map +1 -0
  46. package/dist/providers/openai-completions.d.ts +15 -0
  47. package/dist/providers/openai-completions.d.ts.map +1 -0
  48. package/dist/providers/openai-completions.js +687 -0
  49. package/dist/providers/openai-completions.js.map +1 -0
  50. package/dist/providers/openai-responses-shared.d.ts +17 -0
  51. package/dist/providers/openai-responses-shared.d.ts.map +1 -0
  52. package/dist/providers/openai-responses-shared.js +458 -0
  53. package/dist/providers/openai-responses-shared.js.map +1 -0
  54. package/dist/providers/openai-responses.d.ts +13 -0
  55. package/dist/providers/openai-responses.d.ts.map +1 -0
  56. package/dist/providers/openai-responses.js +190 -0
  57. package/dist/providers/openai-responses.js.map +1 -0
  58. package/dist/providers/register-builtins.d.ts +16 -0
  59. package/dist/providers/register-builtins.d.ts.map +1 -0
  60. package/dist/providers/register-builtins.js +140 -0
  61. package/dist/providers/register-builtins.js.map +1 -0
  62. package/dist/providers/simple-options.d.ts +8 -0
  63. package/dist/providers/simple-options.d.ts.map +1 -0
  64. package/dist/providers/simple-options.js +35 -0
  65. package/dist/providers/simple-options.js.map +1 -0
  66. package/dist/providers/transform-messages.d.ts +8 -0
  67. package/dist/providers/transform-messages.d.ts.map +1 -0
  68. package/dist/providers/transform-messages.js +155 -0
  69. package/dist/providers/transform-messages.js.map +1 -0
  70. package/dist/stream.d.ts +8 -0
  71. package/dist/stream.d.ts.map +1 -0
  72. package/dist/stream.js +27 -0
  73. package/dist/stream.js.map +1 -0
  74. package/dist/types.d.ts +283 -0
  75. package/dist/types.d.ts.map +1 -0
  76. package/dist/types.js +2 -0
  77. package/dist/types.js.map +1 -0
  78. package/dist/utils/event-stream.d.ts +21 -0
  79. package/dist/utils/event-stream.d.ts.map +1 -0
  80. package/dist/utils/event-stream.js +81 -0
  81. package/dist/utils/event-stream.js.map +1 -0
  82. package/dist/utils/hash.d.ts +3 -0
  83. package/dist/utils/hash.d.ts.map +1 -0
  84. package/dist/utils/hash.js +14 -0
  85. package/dist/utils/hash.js.map +1 -0
  86. package/dist/utils/json-parse.d.ts +9 -0
  87. package/dist/utils/json-parse.d.ts.map +1 -0
  88. package/dist/utils/json-parse.js +29 -0
  89. package/dist/utils/json-parse.js.map +1 -0
  90. package/dist/utils/oauth/anthropic.d.ts +25 -0
  91. package/dist/utils/oauth/anthropic.d.ts.map +1 -0
  92. package/dist/utils/oauth/anthropic.js +335 -0
  93. package/dist/utils/oauth/anthropic.js.map +1 -0
  94. package/dist/utils/oauth/github-copilot.d.ts +30 -0
  95. package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
  96. package/dist/utils/oauth/github-copilot.js +292 -0
  97. package/dist/utils/oauth/github-copilot.js.map +1 -0
  98. package/dist/utils/oauth/index.d.ts +36 -0
  99. package/dist/utils/oauth/index.d.ts.map +1 -0
  100. package/dist/utils/oauth/index.js +92 -0
  101. package/dist/utils/oauth/index.js.map +1 -0
  102. package/dist/utils/oauth/oauth-page.d.ts +3 -0
  103. package/dist/utils/oauth/oauth-page.d.ts.map +1 -0
  104. package/dist/utils/oauth/oauth-page.js +105 -0
  105. package/dist/utils/oauth/oauth-page.js.map +1 -0
  106. package/dist/utils/oauth/openai-codex.d.ts +34 -0
  107. package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
  108. package/dist/utils/oauth/openai-codex.js +373 -0
  109. package/dist/utils/oauth/openai-codex.js.map +1 -0
  110. package/dist/utils/oauth/pkce.d.ts +13 -0
  111. package/dist/utils/oauth/pkce.d.ts.map +1 -0
  112. package/dist/utils/oauth/pkce.js +31 -0
  113. package/dist/utils/oauth/pkce.js.map +1 -0
  114. package/dist/utils/oauth/types.d.ts +47 -0
  115. package/dist/utils/oauth/types.d.ts.map +1 -0
  116. package/dist/utils/oauth/types.js +2 -0
  117. package/dist/utils/oauth/types.js.map +1 -0
  118. package/dist/utils/overflow.d.ts +53 -0
  119. package/dist/utils/overflow.d.ts.map +1 -0
  120. package/dist/utils/overflow.js +119 -0
  121. package/dist/utils/overflow.js.map +1 -0
  122. package/dist/utils/sanitize-unicode.d.ts +22 -0
  123. package/dist/utils/sanitize-unicode.d.ts.map +1 -0
  124. package/dist/utils/sanitize-unicode.js +26 -0
  125. package/dist/utils/sanitize-unicode.js.map +1 -0
  126. package/dist/utils/typebox-helpers.d.ts +17 -0
  127. package/dist/utils/typebox-helpers.d.ts.map +1 -0
  128. package/dist/utils/typebox-helpers.js +21 -0
  129. package/dist/utils/typebox-helpers.js.map +1 -0
  130. package/dist/utils/validation.d.ts +18 -0
  131. package/dist/utils/validation.d.ts.map +1 -0
  132. package/dist/utils/validation.js +80 -0
  133. package/dist/utils/validation.js.map +1 -0
  134. package/package.json +89 -0
  135. package/src/api-registry.ts +98 -0
  136. package/src/cli.ts +136 -0
  137. package/src/env-api-keys.ts +22 -0
  138. package/src/index.ts +29 -0
  139. package/src/models.generated.ts +2188 -0
  140. package/src/models.ts +82 -0
  141. package/src/oauth.ts +1 -0
  142. package/src/providers/anthropic.ts +905 -0
  143. package/src/providers/faux.ts +498 -0
  144. package/src/providers/github-copilot-headers.ts +37 -0
  145. package/src/providers/openai-codex-responses.ts +929 -0
  146. package/src/providers/openai-completions.ts +811 -0
  147. package/src/providers/openai-responses-shared.ts +513 -0
  148. package/src/providers/openai-responses.ts +251 -0
  149. package/src/providers/register-builtins.ts +232 -0
  150. package/src/providers/simple-options.ts +46 -0
  151. package/src/providers/transform-messages.ts +172 -0
  152. package/src/stream.ts +59 -0
  153. package/src/types.ts +294 -0
  154. package/src/utils/event-stream.ts +87 -0
  155. package/src/utils/hash.ts +13 -0
  156. package/src/utils/json-parse.ts +28 -0
  157. package/src/utils/oauth/anthropic.ts +402 -0
  158. package/src/utils/oauth/github-copilot.ts +396 -0
  159. package/src/utils/oauth/index.ts +123 -0
  160. package/src/utils/oauth/oauth-page.ts +109 -0
  161. package/src/utils/oauth/openai-codex.ts +450 -0
  162. package/src/utils/oauth/pkce.ts +34 -0
  163. package/src/utils/oauth/types.ts +59 -0
  164. package/src/utils/overflow.ts +125 -0
  165. package/src/utils/sanitize-unicode.ts +25 -0
  166. package/src/utils/typebox-helpers.ts +24 -0
  167. package/src/utils/validation.ts +93 -0
@@ -0,0 +1,251 @@
1
+ import OpenAI from "openai";
2
+ import type { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js";
3
+ import { getEnvApiKey } from "../env-api-keys.js";
4
+ import { supportsXhigh } from "../models.js";
5
+ import type {
6
+ Api,
7
+ AssistantMessage,
8
+ CacheRetention,
9
+ Context,
10
+ Model,
11
+ SimpleStreamOptions,
12
+ StreamFunction,
13
+ StreamOptions,
14
+ Usage,
15
+ } from "../types.js";
16
+ import { AssistantMessageEventStream } from "../utils/event-stream.js";
17
+ import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copilot-headers.js";
18
+ import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js";
19
+ import { buildBaseOptions, clampReasoning } from "./simple-options.js";
20
+
21
+ const OPENAI_TOOL_CALL_PROVIDERS = new Set(["github-copilot", "openai-codex"]);
22
+
23
+ /**
24
+ * Resolve cache retention preference.
25
+ * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility.
26
+ */
27
+ function resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention {
28
+ if (cacheRetention) {
29
+ return cacheRetention;
30
+ }
31
+ if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") {
32
+ return "long";
33
+ }
34
+ return "short";
35
+ }
36
+
37
+ /**
38
+ * Get prompt cache retention based on cacheRetention and base URL.
39
+ * Only applies to direct OpenAI API calls (api.openai.com).
40
+ */
41
+ function getPromptCacheRetention(baseUrl: string, cacheRetention: CacheRetention): "24h" | undefined {
42
+ if (cacheRetention !== "long") {
43
+ return undefined;
44
+ }
45
+ if (baseUrl.includes("api.openai.com")) {
46
+ return "24h";
47
+ }
48
+ return undefined;
49
+ }
50
+
51
+ // OpenAI Responses-specific options
52
+ export interface OpenAIResponsesOptions extends StreamOptions {
53
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
54
+ reasoningSummary?: "auto" | "detailed" | "concise" | null;
55
+ serviceTier?: ResponseCreateParamsStreaming["service_tier"];
56
+ }
57
+
58
+ /**
59
+ * Generate function for OpenAI Responses API
60
+ */
61
+ export const streamOpenAIResponses: StreamFunction<"openai-responses", OpenAIResponsesOptions> = (
62
+ model: Model<"openai-responses">,
63
+ context: Context,
64
+ options?: OpenAIResponsesOptions,
65
+ ): AssistantMessageEventStream => {
66
+ const stream = new AssistantMessageEventStream();
67
+
68
+ // Start async processing
69
+ (async () => {
70
+ const output: AssistantMessage = {
71
+ role: "assistant",
72
+ content: [],
73
+ api: model.api as Api,
74
+ provider: model.provider,
75
+ model: model.id,
76
+ usage: {
77
+ input: 0,
78
+ output: 0,
79
+ cacheRead: 0,
80
+ cacheWrite: 0,
81
+ totalTokens: 0,
82
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
83
+ },
84
+ stopReason: "stop",
85
+ timestamp: Date.now(),
86
+ };
87
+
88
+ try {
89
+ // Create OpenAI client
90
+ const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
91
+ const client = createClient(model, context, apiKey, options?.headers);
92
+ let params = buildParams(model, context, options);
93
+ const nextParams = await options?.onPayload?.(params, model);
94
+ if (nextParams !== undefined) {
95
+ params = nextParams as ResponseCreateParamsStreaming;
96
+ }
97
+ const openaiStream = await client.responses.create(
98
+ params,
99
+ options?.signal ? { signal: options.signal } : undefined,
100
+ );
101
+ stream.push({ type: "start", partial: output });
102
+
103
+ await processResponsesStream(openaiStream, output, stream, model, {
104
+ serviceTier: options?.serviceTier,
105
+ applyServiceTierPricing,
106
+ });
107
+
108
+ if (options?.signal?.aborted) {
109
+ throw new Error("Request was aborted");
110
+ }
111
+
112
+ if (output.stopReason === "aborted" || output.stopReason === "error") {
113
+ throw new Error("An unknown error occurred");
114
+ }
115
+
116
+ stream.push({ type: "done", reason: output.stopReason, message: output });
117
+ stream.end();
118
+ } catch (error) {
119
+ for (const block of output.content) delete (block as { index?: number }).index;
120
+ output.stopReason = options?.signal?.aborted ? "aborted" : "error";
121
+ output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
122
+ stream.push({ type: "error", reason: output.stopReason, error: output });
123
+ stream.end();
124
+ }
125
+ })();
126
+
127
+ return stream;
128
+ };
129
+
130
+ export const streamSimpleOpenAIResponses: StreamFunction<"openai-responses", SimpleStreamOptions> = (
131
+ model: Model<"openai-responses">,
132
+ context: Context,
133
+ options?: SimpleStreamOptions,
134
+ ): AssistantMessageEventStream => {
135
+ const apiKey = options?.apiKey || getEnvApiKey(model.provider);
136
+ if (!apiKey) {
137
+ throw new Error(`No API key for provider: ${model.provider}`);
138
+ }
139
+
140
+ const base = buildBaseOptions(model, options, apiKey);
141
+ const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning);
142
+
143
+ return streamOpenAIResponses(model, context, {
144
+ ...base,
145
+ reasoningEffort,
146
+ } satisfies OpenAIResponsesOptions);
147
+ };
148
+
149
+ function createClient(
150
+ model: Model<"openai-responses">,
151
+ context: Context,
152
+ apiKey?: string,
153
+ optionsHeaders?: Record<string, string>,
154
+ ) {
155
+ if (!apiKey) {
156
+ if (!process.env.OPENAI_API_KEY) {
157
+ throw new Error(
158
+ "OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.",
159
+ );
160
+ }
161
+ apiKey = process.env.OPENAI_API_KEY;
162
+ }
163
+
164
+ const headers = { ...model.headers };
165
+ if (model.provider === "github-copilot") {
166
+ const hasImages = hasCopilotVisionInput(context.messages);
167
+ const copilotHeaders = buildCopilotDynamicHeaders({
168
+ messages: context.messages,
169
+ hasImages,
170
+ });
171
+ Object.assign(headers, copilotHeaders);
172
+ }
173
+
174
+ // Merge options headers last so they can override defaults
175
+ if (optionsHeaders) {
176
+ Object.assign(headers, optionsHeaders);
177
+ }
178
+
179
+ return new OpenAI({
180
+ apiKey,
181
+ baseURL: model.baseUrl,
182
+ dangerouslyAllowBrowser: true,
183
+ defaultHeaders: headers,
184
+ });
185
+ }
186
+
187
+ function buildParams(model: Model<"openai-responses">, context: Context, options?: OpenAIResponsesOptions) {
188
+ const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS);
189
+
190
+ const cacheRetention = resolveCacheRetention(options?.cacheRetention);
191
+ const params: ResponseCreateParamsStreaming = {
192
+ model: model.id,
193
+ input: messages,
194
+ stream: true,
195
+ prompt_cache_key: cacheRetention === "none" ? undefined : options?.sessionId,
196
+ prompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention),
197
+ store: false,
198
+ };
199
+
200
+ if (options?.maxTokens) {
201
+ params.max_output_tokens = options?.maxTokens;
202
+ }
203
+
204
+ if (options?.temperature !== undefined) {
205
+ params.temperature = options?.temperature;
206
+ }
207
+
208
+ if (options?.serviceTier !== undefined) {
209
+ params.service_tier = options.serviceTier;
210
+ }
211
+
212
+ if (context.tools) {
213
+ params.tools = convertResponsesTools(context.tools);
214
+ }
215
+
216
+ if (model.reasoning) {
217
+ if (options?.reasoningEffort || options?.reasoningSummary) {
218
+ params.reasoning = {
219
+ effort: options?.reasoningEffort || "medium",
220
+ summary: options?.reasoningSummary || "auto",
221
+ };
222
+ params.include = ["reasoning.encrypted_content"];
223
+ } else if (model.provider !== "github-copilot") {
224
+ params.reasoning = { effort: "none" };
225
+ }
226
+ }
227
+
228
+ return params;
229
+ }
230
+
231
+ function getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming["service_tier"] | undefined): number {
232
+ switch (serviceTier) {
233
+ case "flex":
234
+ return 0.5;
235
+ case "priority":
236
+ return 2;
237
+ default:
238
+ return 1;
239
+ }
240
+ }
241
+
242
+ function applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming["service_tier"] | undefined) {
243
+ const multiplier = getServiceTierCostMultiplier(serviceTier);
244
+ if (multiplier === 1) return;
245
+
246
+ usage.cost.input *= multiplier;
247
+ usage.cost.output *= multiplier;
248
+ usage.cost.cacheRead *= multiplier;
249
+ usage.cost.cacheWrite *= multiplier;
250
+ usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
251
+ }
@@ -0,0 +1,232 @@
1
+ import { clearApiProviders, registerApiProvider } from "../api-registry.js";
2
+ import type {
3
+ Api,
4
+ AssistantMessage,
5
+ AssistantMessageEvent,
6
+ Context,
7
+ Model,
8
+ SimpleStreamOptions,
9
+ StreamFunction,
10
+ StreamOptions,
11
+ } from "../types.js";
12
+ import { AssistantMessageEventStream } from "../utils/event-stream.js";
13
+ import type { AnthropicOptions } from "./anthropic.js";
14
+ import type { OpenAICodexResponsesOptions } from "./openai-codex-responses.js";
15
+ import type { OpenAICompletionsOptions } from "./openai-completions.js";
16
+ import type { OpenAIResponsesOptions } from "./openai-responses.js";
17
+
18
+ interface LazyProviderModule<
19
+ TApi extends Api,
20
+ TOptions extends StreamOptions,
21
+ TSimpleOptions extends SimpleStreamOptions,
22
+ > {
23
+ stream: (model: Model<TApi>, context: Context, options?: TOptions) => AsyncIterable<AssistantMessageEvent>;
24
+ streamSimple: (
25
+ model: Model<TApi>,
26
+ context: Context,
27
+ options?: TSimpleOptions,
28
+ ) => AsyncIterable<AssistantMessageEvent>;
29
+ }
30
+
31
+ interface AnthropicProviderModule {
32
+ streamAnthropic: StreamFunction<"anthropic-messages", AnthropicOptions>;
33
+ streamSimpleAnthropic: StreamFunction<"anthropic-messages", SimpleStreamOptions>;
34
+ }
35
+
36
+ interface OpenAICodexResponsesProviderModule {
37
+ streamOpenAICodexResponses: StreamFunction<"openai-codex-responses", OpenAICodexResponsesOptions>;
38
+ streamSimpleOpenAICodexResponses: StreamFunction<"openai-codex-responses", SimpleStreamOptions>;
39
+ }
40
+
41
+ interface OpenAICompletionsProviderModule {
42
+ streamOpenAICompletions: StreamFunction<"openai-completions", OpenAICompletionsOptions>;
43
+ streamSimpleOpenAICompletions: StreamFunction<"openai-completions", SimpleStreamOptions>;
44
+ }
45
+
46
+ interface OpenAIResponsesProviderModule {
47
+ streamOpenAIResponses: StreamFunction<"openai-responses", OpenAIResponsesOptions>;
48
+ streamSimpleOpenAIResponses: StreamFunction<"openai-responses", SimpleStreamOptions>;
49
+ }
50
+
51
+ let anthropicProviderModulePromise:
52
+ | Promise<LazyProviderModule<"anthropic-messages", AnthropicOptions, SimpleStreamOptions>>
53
+ | undefined;
54
+ let openAICodexResponsesProviderModulePromise:
55
+ | Promise<LazyProviderModule<"openai-codex-responses", OpenAICodexResponsesOptions, SimpleStreamOptions>>
56
+ | undefined;
57
+ let openAICompletionsProviderModulePromise:
58
+ | Promise<LazyProviderModule<"openai-completions", OpenAICompletionsOptions, SimpleStreamOptions>>
59
+ | undefined;
60
+ let openAIResponsesProviderModulePromise:
61
+ | Promise<LazyProviderModule<"openai-responses", OpenAIResponsesOptions, SimpleStreamOptions>>
62
+ | undefined;
63
+
64
+ function forwardStream(target: AssistantMessageEventStream, source: AsyncIterable<AssistantMessageEvent>): void {
65
+ (async () => {
66
+ for await (const event of source) {
67
+ target.push(event);
68
+ }
69
+ target.end();
70
+ })();
71
+ }
72
+
73
+ function createLazyLoadErrorMessage<TApi extends Api>(model: Model<TApi>, error: unknown): AssistantMessage {
74
+ return {
75
+ role: "assistant",
76
+ content: [],
77
+ api: model.api,
78
+ provider: model.provider,
79
+ model: model.id,
80
+ usage: {
81
+ input: 0,
82
+ output: 0,
83
+ cacheRead: 0,
84
+ cacheWrite: 0,
85
+ totalTokens: 0,
86
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
87
+ },
88
+ stopReason: "error",
89
+ errorMessage: error instanceof Error ? error.message : String(error),
90
+ timestamp: Date.now(),
91
+ };
92
+ }
93
+
94
+ function createLazyStream<TApi extends Api, TOptions extends StreamOptions, TSimpleOptions extends SimpleStreamOptions>(
95
+ loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>,
96
+ ): StreamFunction<TApi, TOptions> {
97
+ return (model, context, options) => {
98
+ const outer = new AssistantMessageEventStream();
99
+
100
+ loadModule()
101
+ .then((module) => {
102
+ forwardStream(outer, module.stream(model, context, options));
103
+ })
104
+ .catch((error) => {
105
+ const message = createLazyLoadErrorMessage(model, error);
106
+ outer.push({ type: "error", reason: "error", error: message });
107
+ outer.end(message);
108
+ });
109
+
110
+ return outer;
111
+ };
112
+ }
113
+
114
+ function createLazySimpleStream<
115
+ TApi extends Api,
116
+ TOptions extends StreamOptions,
117
+ TSimpleOptions extends SimpleStreamOptions,
118
+ >(loadModule: () => Promise<LazyProviderModule<TApi, TOptions, TSimpleOptions>>): StreamFunction<TApi, TSimpleOptions> {
119
+ return (model, context, options) => {
120
+ const outer = new AssistantMessageEventStream();
121
+
122
+ loadModule()
123
+ .then((module) => {
124
+ forwardStream(outer, module.streamSimple(model, context, options));
125
+ })
126
+ .catch((error) => {
127
+ const message = createLazyLoadErrorMessage(model, error);
128
+ outer.push({ type: "error", reason: "error", error: message });
129
+ outer.end(message);
130
+ });
131
+
132
+ return outer;
133
+ };
134
+ }
135
+
136
+ function loadAnthropicProviderModule(): Promise<
137
+ LazyProviderModule<"anthropic-messages", AnthropicOptions, SimpleStreamOptions>
138
+ > {
139
+ anthropicProviderModulePromise ||= import("./anthropic.js").then((module) => {
140
+ const provider = module as AnthropicProviderModule;
141
+ return {
142
+ stream: provider.streamAnthropic,
143
+ streamSimple: provider.streamSimpleAnthropic,
144
+ };
145
+ });
146
+
147
+ return anthropicProviderModulePromise;
148
+ }
149
+
150
+ function loadOpenAICodexResponsesProviderModule(): Promise<
151
+ LazyProviderModule<"openai-codex-responses", OpenAICodexResponsesOptions, SimpleStreamOptions>
152
+ > {
153
+ openAICodexResponsesProviderModulePromise ||= import("./openai-codex-responses.js").then((module) => {
154
+ const provider = module as OpenAICodexResponsesProviderModule;
155
+ return {
156
+ stream: provider.streamOpenAICodexResponses,
157
+ streamSimple: provider.streamSimpleOpenAICodexResponses,
158
+ };
159
+ });
160
+
161
+ return openAICodexResponsesProviderModulePromise;
162
+ }
163
+
164
+ function loadOpenAICompletionsProviderModule(): Promise<
165
+ LazyProviderModule<"openai-completions", OpenAICompletionsOptions, SimpleStreamOptions>
166
+ > {
167
+ openAICompletionsProviderModulePromise ||= import("./openai-completions.js").then((module) => {
168
+ const provider = module as OpenAICompletionsProviderModule;
169
+ return {
170
+ stream: provider.streamOpenAICompletions,
171
+ streamSimple: provider.streamSimpleOpenAICompletions,
172
+ };
173
+ });
174
+
175
+ return openAICompletionsProviderModulePromise;
176
+ }
177
+
178
+ function loadOpenAIResponsesProviderModule(): Promise<
179
+ LazyProviderModule<"openai-responses", OpenAIResponsesOptions, SimpleStreamOptions>
180
+ > {
181
+ openAIResponsesProviderModulePromise ||= import("./openai-responses.js").then((module) => {
182
+ const provider = module as OpenAIResponsesProviderModule;
183
+ return {
184
+ stream: provider.streamOpenAIResponses,
185
+ streamSimple: provider.streamSimpleOpenAIResponses,
186
+ };
187
+ });
188
+
189
+ return openAIResponsesProviderModulePromise;
190
+ }
191
+
192
+ export const streamAnthropic = createLazyStream(loadAnthropicProviderModule);
193
+ export const streamSimpleAnthropic = createLazySimpleStream(loadAnthropicProviderModule);
194
+ export const streamOpenAICodexResponses = createLazyStream(loadOpenAICodexResponsesProviderModule);
195
+ export const streamSimpleOpenAICodexResponses = createLazySimpleStream(loadOpenAICodexResponsesProviderModule);
196
+ export const streamOpenAICompletions = createLazyStream(loadOpenAICompletionsProviderModule);
197
+ export const streamSimpleOpenAICompletions = createLazySimpleStream(loadOpenAICompletionsProviderModule);
198
+ export const streamOpenAIResponses = createLazyStream(loadOpenAIResponsesProviderModule);
199
+ export const streamSimpleOpenAIResponses = createLazySimpleStream(loadOpenAIResponsesProviderModule);
200
+
201
+ export function registerBuiltInApiProviders(): void {
202
+ registerApiProvider({
203
+ api: "anthropic-messages",
204
+ stream: streamAnthropic,
205
+ streamSimple: streamSimpleAnthropic,
206
+ });
207
+
208
+ registerApiProvider({
209
+ api: "openai-completions",
210
+ stream: streamOpenAICompletions,
211
+ streamSimple: streamSimpleOpenAICompletions,
212
+ });
213
+
214
+ registerApiProvider({
215
+ api: "openai-responses",
216
+ stream: streamOpenAIResponses,
217
+ streamSimple: streamSimpleOpenAIResponses,
218
+ });
219
+
220
+ registerApiProvider({
221
+ api: "openai-codex-responses",
222
+ stream: streamOpenAICodexResponses,
223
+ streamSimple: streamSimpleOpenAICodexResponses,
224
+ });
225
+ }
226
+
227
+ export function resetApiProviders(): void {
228
+ clearApiProviders();
229
+ registerBuiltInApiProviders();
230
+ }
231
+
232
+ registerBuiltInApiProviders();
@@ -0,0 +1,46 @@
1
+ import type { Api, Model, SimpleStreamOptions, StreamOptions, ThinkingBudgets, ThinkingLevel } from "../types.js";
2
+
3
+ export function buildBaseOptions(model: Model<Api>, options?: SimpleStreamOptions, apiKey?: string): StreamOptions {
4
+ return {
5
+ temperature: options?.temperature,
6
+ maxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000),
7
+ signal: options?.signal,
8
+ apiKey: apiKey || options?.apiKey,
9
+ cacheRetention: options?.cacheRetention,
10
+ sessionId: options?.sessionId,
11
+ headers: options?.headers,
12
+ onPayload: options?.onPayload,
13
+ maxRetryDelayMs: options?.maxRetryDelayMs,
14
+ metadata: options?.metadata,
15
+ };
16
+ }
17
+
18
+ export function clampReasoning(effort: ThinkingLevel | undefined): Exclude<ThinkingLevel, "xhigh"> | undefined {
19
+ return effort === "xhigh" ? "high" : effort;
20
+ }
21
+
22
+ export function adjustMaxTokensForThinking(
23
+ baseMaxTokens: number,
24
+ modelMaxTokens: number,
25
+ reasoningLevel: ThinkingLevel,
26
+ customBudgets?: ThinkingBudgets,
27
+ ): { maxTokens: number; thinkingBudget: number } {
28
+ const defaultBudgets: ThinkingBudgets = {
29
+ minimal: 1024,
30
+ low: 2048,
31
+ medium: 8192,
32
+ high: 16384,
33
+ };
34
+ const budgets = { ...defaultBudgets, ...customBudgets };
35
+
36
+ const minOutputTokens = 1024;
37
+ const level = clampReasoning(reasoningLevel)!;
38
+ let thinkingBudget = budgets[level]!;
39
+ const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens);
40
+
41
+ if (maxTokens <= thinkingBudget) {
42
+ thinkingBudget = Math.max(0, maxTokens - minOutputTokens);
43
+ }
44
+
45
+ return { maxTokens, thinkingBudget };
46
+ }
@@ -0,0 +1,172 @@
1
+ import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "../types.js";
2
+
3
+ /**
4
+ * Normalize tool call ID for cross-provider compatibility.
5
+ * OpenAI Responses API generates IDs that are 450+ chars with special characters like `|`.
6
+ * Anthropic APIs require IDs matching ^[a-zA-Z0-9_-]+$ (max 64 chars).
7
+ */
8
+ export function transformMessages<TApi extends Api>(
9
+ messages: Message[],
10
+ model: Model<TApi>,
11
+ normalizeToolCallId?: (id: string, model: Model<TApi>, source: AssistantMessage) => string,
12
+ ): Message[] {
13
+ // Build a map of original tool call IDs to normalized IDs
14
+ const toolCallIdMap = new Map<string, string>();
15
+
16
+ // First pass: transform messages (thinking blocks, tool call ID normalization)
17
+ const transformed = messages.map((msg) => {
18
+ // User messages pass through unchanged
19
+ if (msg.role === "user") {
20
+ return msg;
21
+ }
22
+
23
+ // Handle toolResult messages - normalize toolCallId if we have a mapping
24
+ if (msg.role === "toolResult") {
25
+ const normalizedId = toolCallIdMap.get(msg.toolCallId);
26
+ if (normalizedId && normalizedId !== msg.toolCallId) {
27
+ return { ...msg, toolCallId: normalizedId };
28
+ }
29
+ return msg;
30
+ }
31
+
32
+ // Assistant messages need transformation check
33
+ if (msg.role === "assistant") {
34
+ const assistantMsg = msg as AssistantMessage;
35
+ const isSameModel =
36
+ assistantMsg.provider === model.provider &&
37
+ assistantMsg.api === model.api &&
38
+ assistantMsg.model === model.id;
39
+
40
+ const transformedContent = assistantMsg.content.flatMap((block) => {
41
+ if (block.type === "thinking") {
42
+ // Redacted thinking is opaque encrypted content, only valid for the same model.
43
+ // Drop it for cross-model to avoid API errors.
44
+ if (block.redacted) {
45
+ return isSameModel ? block : [];
46
+ }
47
+ // For same model: keep thinking blocks with signatures (needed for replay)
48
+ // even if the thinking text is empty (OpenAI encrypted reasoning)
49
+ if (isSameModel && block.thinkingSignature) return block;
50
+ // Skip empty thinking blocks, convert others to plain text
51
+ if (!block.thinking || block.thinking.trim() === "") return [];
52
+ if (isSameModel) return block;
53
+ return {
54
+ type: "text" as const,
55
+ text: block.thinking,
56
+ };
57
+ }
58
+
59
+ if (block.type === "text") {
60
+ if (isSameModel) return block;
61
+ return {
62
+ type: "text" as const,
63
+ text: block.text,
64
+ };
65
+ }
66
+
67
+ if (block.type === "toolCall") {
68
+ const toolCall = block as ToolCall;
69
+ let normalizedToolCall: ToolCall = toolCall;
70
+
71
+ if (!isSameModel && toolCall.thoughtSignature) {
72
+ normalizedToolCall = { ...toolCall };
73
+ delete (normalizedToolCall as { thoughtSignature?: string }).thoughtSignature;
74
+ }
75
+
76
+ if (!isSameModel && normalizeToolCallId) {
77
+ const normalizedId = normalizeToolCallId(toolCall.id, model, assistantMsg);
78
+ if (normalizedId !== toolCall.id) {
79
+ toolCallIdMap.set(toolCall.id, normalizedId);
80
+ normalizedToolCall = { ...normalizedToolCall, id: normalizedId };
81
+ }
82
+ }
83
+
84
+ return normalizedToolCall;
85
+ }
86
+
87
+ return block;
88
+ });
89
+
90
+ return {
91
+ ...assistantMsg,
92
+ content: transformedContent,
93
+ };
94
+ }
95
+ return msg;
96
+ });
97
+
98
+ // Second pass: insert synthetic empty tool results for orphaned tool calls
99
+ // This preserves thinking signatures and satisfies API requirements
100
+ const result: Message[] = [];
101
+ let pendingToolCalls: ToolCall[] = [];
102
+ let existingToolResultIds = new Set<string>();
103
+
104
+ for (let i = 0; i < transformed.length; i++) {
105
+ const msg = transformed[i];
106
+
107
+ if (msg.role === "assistant") {
108
+ // If we have pending orphaned tool calls from a previous assistant, insert synthetic results now
109
+ if (pendingToolCalls.length > 0) {
110
+ for (const tc of pendingToolCalls) {
111
+ if (!existingToolResultIds.has(tc.id)) {
112
+ result.push({
113
+ role: "toolResult",
114
+ toolCallId: tc.id,
115
+ toolName: tc.name,
116
+ content: [{ type: "text", text: "No result provided" }],
117
+ isError: true,
118
+ timestamp: Date.now(),
119
+ } as ToolResultMessage);
120
+ }
121
+ }
122
+ pendingToolCalls = [];
123
+ existingToolResultIds = new Set();
124
+ }
125
+
126
+ // Skip errored/aborted assistant messages entirely.
127
+ // These are incomplete turns that shouldn't be replayed:
128
+ // - May have partial content (reasoning without message, incomplete tool calls)
129
+ // - Replaying them can cause API errors (e.g., OpenAI "reasoning without following item")
130
+ // - The model should retry from the last valid state
131
+ const assistantMsg = msg as AssistantMessage;
132
+ if (assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted") {
133
+ continue;
134
+ }
135
+
136
+ // Track tool calls from this assistant message
137
+ const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[];
138
+ if (toolCalls.length > 0) {
139
+ pendingToolCalls = toolCalls;
140
+ existingToolResultIds = new Set();
141
+ }
142
+
143
+ result.push(msg);
144
+ } else if (msg.role === "toolResult") {
145
+ existingToolResultIds.add(msg.toolCallId);
146
+ result.push(msg);
147
+ } else if (msg.role === "user") {
148
+ // User message interrupts tool flow - insert synthetic results for orphaned calls
149
+ if (pendingToolCalls.length > 0) {
150
+ for (const tc of pendingToolCalls) {
151
+ if (!existingToolResultIds.has(tc.id)) {
152
+ result.push({
153
+ role: "toolResult",
154
+ toolCallId: tc.id,
155
+ toolName: tc.name,
156
+ content: [{ type: "text", text: "No result provided" }],
157
+ isError: true,
158
+ timestamp: Date.now(),
159
+ } as ToolResultMessage);
160
+ }
161
+ }
162
+ pendingToolCalls = [];
163
+ existingToolResultIds = new Set();
164
+ }
165
+ result.push(msg);
166
+ } else {
167
+ result.push(msg);
168
+ }
169
+ }
170
+
171
+ return result;
172
+ }