@clinebot/llms 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. package/README.md +198 -0
  2. package/dist/config-browser.d.ts +3 -0
  3. package/dist/config.d.ts +3 -0
  4. package/dist/index.browser.d.ts +4 -0
  5. package/dist/index.browser.js +1 -0
  6. package/dist/index.d.ts +5 -0
  7. package/dist/index.js +7 -0
  8. package/dist/models/generated-access.d.ts +4 -0
  9. package/dist/models/generated-provider-loaders.d.ts +13 -0
  10. package/dist/models/generated.d.ts +14 -0
  11. package/dist/models/index.d.ts +43 -0
  12. package/dist/models/models-dev-catalog.d.ts +32 -0
  13. package/dist/models/providers/aihubmix.d.ts +5 -0
  14. package/dist/models/providers/anthropic.d.ts +53 -0
  15. package/dist/models/providers/asksage.d.ts +5 -0
  16. package/dist/models/providers/baseten.d.ts +5 -0
  17. package/dist/models/providers/bedrock.d.ts +7 -0
  18. package/dist/models/providers/cerebras.d.ts +7 -0
  19. package/dist/models/providers/claude-code.d.ts +4 -0
  20. package/dist/models/providers/cline.d.ts +34 -0
  21. package/dist/models/providers/deepseek.d.ts +8 -0
  22. package/dist/models/providers/dify.d.ts +5 -0
  23. package/dist/models/providers/doubao.d.ts +7 -0
  24. package/dist/models/providers/fireworks.d.ts +8 -0
  25. package/dist/models/providers/gemini.d.ts +9 -0
  26. package/dist/models/providers/groq.d.ts +8 -0
  27. package/dist/models/providers/hicap.d.ts +5 -0
  28. package/dist/models/providers/huawei-cloud-maas.d.ts +5 -0
  29. package/dist/models/providers/huggingface.d.ts +6 -0
  30. package/dist/models/providers/index.d.ts +45 -0
  31. package/dist/models/providers/litellm.d.ts +5 -0
  32. package/dist/models/providers/lmstudio.d.ts +5 -0
  33. package/dist/models/providers/minimax.d.ts +7 -0
  34. package/dist/models/providers/mistral.d.ts +5 -0
  35. package/dist/models/providers/moonshot.d.ts +7 -0
  36. package/dist/models/providers/nebius.d.ts +7 -0
  37. package/dist/models/providers/nous-research.d.ts +7 -0
  38. package/dist/models/providers/oca.d.ts +9 -0
  39. package/dist/models/providers/ollama.d.ts +5 -0
  40. package/dist/models/providers/openai-codex.d.ts +10 -0
  41. package/dist/models/providers/openai.d.ts +9 -0
  42. package/dist/models/providers/opencode.d.ts +10 -0
  43. package/dist/models/providers/openrouter.d.ts +7 -0
  44. package/dist/models/providers/qwen-code.d.ts +7 -0
  45. package/dist/models/providers/qwen.d.ts +7 -0
  46. package/dist/models/providers/requesty.d.ts +6 -0
  47. package/dist/models/providers/sambanova.d.ts +7 -0
  48. package/dist/models/providers/sapaicore.d.ts +7 -0
  49. package/dist/models/providers/together.d.ts +8 -0
  50. package/dist/models/providers/vercel-ai-gateway.d.ts +5 -0
  51. package/dist/models/providers/vertex.d.ts +7 -0
  52. package/dist/models/providers/xai.d.ts +8 -0
  53. package/dist/models/providers/zai.d.ts +7 -0
  54. package/dist/models/query.d.ts +181 -0
  55. package/dist/models/registry.d.ts +123 -0
  56. package/dist/models/schemas/index.d.ts +7 -0
  57. package/dist/models/schemas/model.d.ts +340 -0
  58. package/dist/models/schemas/query.d.ts +191 -0
  59. package/dist/providers/handlers/ai-sdk-community.d.ts +46 -0
  60. package/dist/providers/handlers/ai-sdk-provider-base.d.ts +32 -0
  61. package/dist/providers/handlers/anthropic-base.d.ts +26 -0
  62. package/dist/providers/handlers/asksage.d.ts +12 -0
  63. package/dist/providers/handlers/auth.d.ts +5 -0
  64. package/dist/providers/handlers/base.d.ts +55 -0
  65. package/dist/providers/handlers/bedrock-base.d.ts +23 -0
  66. package/dist/providers/handlers/bedrock-client.d.ts +4 -0
  67. package/dist/providers/handlers/community-sdk.d.ts +97 -0
  68. package/dist/providers/handlers/fetch-base.d.ts +18 -0
  69. package/dist/providers/handlers/gemini-base.d.ts +25 -0
  70. package/dist/providers/handlers/index.d.ts +19 -0
  71. package/dist/providers/handlers/openai-base.d.ts +54 -0
  72. package/dist/providers/handlers/openai-responses.d.ts +64 -0
  73. package/dist/providers/handlers/providers.d.ts +43 -0
  74. package/dist/providers/handlers/r1-base.d.ts +62 -0
  75. package/dist/providers/handlers/registry.d.ts +106 -0
  76. package/dist/providers/handlers/vertex.d.ts +32 -0
  77. package/dist/providers/index.d.ts +100 -0
  78. package/dist/providers/public.browser.d.ts +2 -0
  79. package/dist/providers/public.d.ts +3 -0
  80. package/dist/providers/shared/openai-compatible.d.ts +10 -0
  81. package/dist/providers/transform/ai-sdk-community-format.d.ts +9 -0
  82. package/dist/providers/transform/anthropic-format.d.ts +24 -0
  83. package/dist/providers/transform/content-format.d.ts +3 -0
  84. package/dist/providers/transform/gemini-format.d.ts +19 -0
  85. package/dist/providers/transform/index.d.ts +10 -0
  86. package/dist/providers/transform/openai-format.d.ts +36 -0
  87. package/dist/providers/transform/r1-format.d.ts +26 -0
  88. package/dist/providers/types/config.d.ts +261 -0
  89. package/dist/providers/types/handler.d.ts +71 -0
  90. package/dist/providers/types/index.d.ts +11 -0
  91. package/dist/providers/types/messages.d.ts +139 -0
  92. package/dist/providers/types/model-info.d.ts +32 -0
  93. package/dist/providers/types/provider-ids.d.ts +63 -0
  94. package/dist/providers/types/settings.d.ts +308 -0
  95. package/dist/providers/types/stream.d.ts +106 -0
  96. package/dist/providers/utils/index.d.ts +7 -0
  97. package/dist/providers/utils/retry.d.ts +38 -0
  98. package/dist/providers/utils/stream-processor.d.ts +110 -0
  99. package/dist/providers/utils/tool-processor.d.ts +34 -0
  100. package/dist/sdk.d.ts +18 -0
  101. package/dist/types.d.ts +60 -0
  102. package/package.json +66 -0
  103. package/src/catalog.ts +20 -0
  104. package/src/config-browser.ts +11 -0
  105. package/src/config.ts +49 -0
  106. package/src/index.browser.ts +9 -0
  107. package/src/index.ts +10 -0
  108. package/src/live-providers.test.ts +137 -0
  109. package/src/models/generated-access.ts +41 -0
  110. package/src/models/generated-provider-loaders.ts +166 -0
  111. package/src/models/generated.ts +11997 -0
  112. package/src/models/index.ts +271 -0
  113. package/src/models/models-dev-catalog.test.ts +161 -0
  114. package/src/models/models-dev-catalog.ts +161 -0
  115. package/src/models/providers/aihubmix.ts +19 -0
  116. package/src/models/providers/anthropic.ts +60 -0
  117. package/src/models/providers/asksage.ts +19 -0
  118. package/src/models/providers/baseten.ts +21 -0
  119. package/src/models/providers/bedrock.ts +30 -0
  120. package/src/models/providers/cerebras.ts +24 -0
  121. package/src/models/providers/claude-code.ts +51 -0
  122. package/src/models/providers/cline.ts +25 -0
  123. package/src/models/providers/deepseek.ts +33 -0
  124. package/src/models/providers/dify.ts +17 -0
  125. package/src/models/providers/doubao.ts +33 -0
  126. package/src/models/providers/fireworks.ts +34 -0
  127. package/src/models/providers/gemini.ts +43 -0
  128. package/src/models/providers/groq.ts +33 -0
  129. package/src/models/providers/hicap.ts +18 -0
  130. package/src/models/providers/huawei-cloud-maas.ts +18 -0
  131. package/src/models/providers/huggingface.ts +22 -0
  132. package/src/models/providers/index.ts +162 -0
  133. package/src/models/providers/litellm.ts +19 -0
  134. package/src/models/providers/lmstudio.ts +22 -0
  135. package/src/models/providers/minimax.ts +34 -0
  136. package/src/models/providers/mistral.ts +19 -0
  137. package/src/models/providers/moonshot.ts +34 -0
  138. package/src/models/providers/nebius.ts +24 -0
  139. package/src/models/providers/nous-research.ts +21 -0
  140. package/src/models/providers/oca.ts +30 -0
  141. package/src/models/providers/ollama.ts +18 -0
  142. package/src/models/providers/openai-codex.ts +30 -0
  143. package/src/models/providers/openai.ts +43 -0
  144. package/src/models/providers/opencode.ts +28 -0
  145. package/src/models/providers/openrouter.ts +24 -0
  146. package/src/models/providers/qwen-code.ts +33 -0
  147. package/src/models/providers/qwen.ts +34 -0
  148. package/src/models/providers/requesty.ts +23 -0
  149. package/src/models/providers/sambanova.ts +23 -0
  150. package/src/models/providers/sapaicore.ts +34 -0
  151. package/src/models/providers/together.ts +35 -0
  152. package/src/models/providers/vercel-ai-gateway.ts +23 -0
  153. package/src/models/providers/vertex.ts +36 -0
  154. package/src/models/providers/xai.ts +34 -0
  155. package/src/models/providers/zai.ts +25 -0
  156. package/src/models/query.ts +407 -0
  157. package/src/models/registry.ts +511 -0
  158. package/src/models/schemas/index.ts +62 -0
  159. package/src/models/schemas/model.ts +308 -0
  160. package/src/models/schemas/query.ts +336 -0
  161. package/src/providers/browser.ts +4 -0
  162. package/src/providers/handlers/ai-sdk-community.ts +226 -0
  163. package/src/providers/handlers/ai-sdk-provider-base.ts +193 -0
  164. package/src/providers/handlers/anthropic-base.ts +372 -0
  165. package/src/providers/handlers/asksage.test.ts +103 -0
  166. package/src/providers/handlers/asksage.ts +138 -0
  167. package/src/providers/handlers/auth.test.ts +19 -0
  168. package/src/providers/handlers/auth.ts +121 -0
  169. package/src/providers/handlers/base.test.ts +46 -0
  170. package/src/providers/handlers/base.ts +160 -0
  171. package/src/providers/handlers/bedrock-base.ts +390 -0
  172. package/src/providers/handlers/bedrock-client.ts +100 -0
  173. package/src/providers/handlers/codex.test.ts +123 -0
  174. package/src/providers/handlers/community-sdk.test.ts +288 -0
  175. package/src/providers/handlers/community-sdk.ts +392 -0
  176. package/src/providers/handlers/fetch-base.ts +68 -0
  177. package/src/providers/handlers/gemini-base.ts +302 -0
  178. package/src/providers/handlers/index.ts +67 -0
  179. package/src/providers/handlers/openai-base.ts +277 -0
  180. package/src/providers/handlers/openai-responses.ts +598 -0
  181. package/src/providers/handlers/providers.test.ts +120 -0
  182. package/src/providers/handlers/providers.ts +563 -0
  183. package/src/providers/handlers/r1-base.ts +280 -0
  184. package/src/providers/handlers/registry.ts +185 -0
  185. package/src/providers/handlers/vertex.test.ts +124 -0
  186. package/src/providers/handlers/vertex.ts +292 -0
  187. package/src/providers/index.ts +534 -0
  188. package/src/providers/public.browser.ts +20 -0
  189. package/src/providers/public.ts +51 -0
  190. package/src/providers/shared/openai-compatible.ts +63 -0
  191. package/src/providers/transform/ai-sdk-community-format.test.ts +73 -0
  192. package/src/providers/transform/ai-sdk-community-format.ts +115 -0
  193. package/src/providers/transform/anthropic-format.ts +218 -0
  194. package/src/providers/transform/content-format.ts +34 -0
  195. package/src/providers/transform/format-conversion.test.ts +310 -0
  196. package/src/providers/transform/gemini-format.ts +167 -0
  197. package/src/providers/transform/index.ts +22 -0
  198. package/src/providers/transform/openai-format.ts +247 -0
  199. package/src/providers/transform/r1-format.ts +287 -0
  200. package/src/providers/types/config.ts +388 -0
  201. package/src/providers/types/handler.ts +87 -0
  202. package/src/providers/types/index.ts +120 -0
  203. package/src/providers/types/messages.ts +158 -0
  204. package/src/providers/types/model-info.test.ts +57 -0
  205. package/src/providers/types/model-info.ts +65 -0
  206. package/src/providers/types/provider-ids.test.ts +12 -0
  207. package/src/providers/types/provider-ids.ts +89 -0
  208. package/src/providers/types/settings.test.ts +49 -0
  209. package/src/providers/types/settings.ts +533 -0
  210. package/src/providers/types/stream.ts +117 -0
  211. package/src/providers/utils/index.ts +27 -0
  212. package/src/providers/utils/retry.test.ts +140 -0
  213. package/src/providers/utils/retry.ts +188 -0
  214. package/src/providers/utils/stream-processor.test.ts +232 -0
  215. package/src/providers/utils/stream-processor.ts +472 -0
  216. package/src/providers/utils/tool-processor.test.ts +34 -0
  217. package/src/providers/utils/tool-processor.ts +111 -0
  218. package/src/sdk.ts +264 -0
  219. package/src/types.ts +79 -0
@@ -0,0 +1,280 @@
1
+ /**
2
+ * R1 Base Handler
3
+ *
4
+ * Handler for R1-based reasoning models (DeepSeek Reasoner, etc.)
5
+ * These models have special requirements:
6
+ * 1. Consecutive messages with the same role must be merged
7
+ * 2. reasoning_content field for tool calling continuations
8
+ * 3. No temperature parameter
9
+ * 4. Response includes reasoning_content in the delta
10
+ */
11
+
12
+ import OpenAI from "openai";
13
+ import type { ChatCompletionChunk } from "openai/resources/chat/completions";
14
+ import { getOpenAIToolParams } from "../transform/openai-format";
15
+ import { convertToR1Messages } from "../transform/r1-format";
16
+ import type {
17
+ ApiStream,
18
+ ApiStreamChunk,
19
+ HandlerModelInfo,
20
+ ModelInfo,
21
+ ProviderConfig,
22
+ } from "../types";
23
+ import type { Message, ToolDefinition } from "../types/messages";
24
+ import { retryStream } from "../utils/retry";
25
+ import { ToolCallProcessor } from "../utils/tool-processor";
26
+ import { getMissingApiKeyError, resolveApiKeyForProvider } from "./auth";
27
+ import { BaseHandler } from "./base";
28
+
29
+ /**
30
+ * Extended usage type for DeepSeek with cache tokens
31
+ */
32
+ interface R1Usage extends OpenAI.CompletionUsage {
33
+ prompt_cache_hit_tokens?: number;
34
+ prompt_cache_miss_tokens?: number;
35
+ }
36
+
37
+ /**
38
+ * Base handler for R1-based reasoning models
39
+ *
40
+ * Uses ProviderConfig fields:
41
+ * - baseUrl: Base URL for the API
42
+ * - modelId: Model ID
43
+ * - knownModels: Known models with their info
44
+ * - headers: Custom headers
45
+ */
46
+ export class R1BaseHandler extends BaseHandler {
47
+ protected client: OpenAI | undefined;
48
+
49
+ /**
50
+ * Ensure the OpenAI client is initialized
51
+ */
52
+ protected ensureClient(): OpenAI {
53
+ if (!this.client) {
54
+ const baseURL = this.config.baseUrl;
55
+
56
+ if (!baseURL) {
57
+ throw new Error("Base URL is required. Set baseUrl in config.");
58
+ }
59
+ const apiKey = resolveApiKeyForProvider(
60
+ this.config.providerId,
61
+ this.config.apiKey,
62
+ );
63
+ if (!apiKey) {
64
+ throw new Error(getMissingApiKeyError(this.config.providerId));
65
+ }
66
+ const requestHeaders = this.getRequestHeaders();
67
+ const hasAuthorizationHeader = Object.keys(requestHeaders).some(
68
+ (key) => key.toLowerCase() === "authorization",
69
+ );
70
+
71
+ this.client = new OpenAI({
72
+ apiKey,
73
+ baseURL,
74
+ defaultHeaders: hasAuthorizationHeader
75
+ ? requestHeaders
76
+ : { ...requestHeaders, Authorization: `Bearer ${apiKey}` },
77
+ });
78
+ }
79
+ return this.client;
80
+ }
81
+
82
+ /**
83
+ * Get model info, falling back to provider defaults
84
+ */
85
+ getModel(): HandlerModelInfo {
86
+ const modelId = this.config.modelId;
87
+ if (!modelId) {
88
+ throw new Error("Model ID is required. Set modelId in config.");
89
+ }
90
+
91
+ const modelInfo =
92
+ this.config.modelInfo ??
93
+ this.config.knownModels?.[modelId] ??
94
+ this.getDefaultModelInfo();
95
+
96
+ return { id: modelId, info: { ...modelInfo, id: modelId } };
97
+ }
98
+
99
+ protected getDefaultModelInfo(): ModelInfo {
100
+ return {
101
+ id: this.config.modelId,
102
+ capabilities: ["prompt-cache", "reasoning"],
103
+ };
104
+ }
105
+
106
+ /**
107
+ * Check if this model is a reasoner model (no temperature allowed)
108
+ */
109
+ protected isReasonerModel(modelId: string): boolean {
110
+ return modelId.includes("reasoner") || modelId.includes("r1");
111
+ }
112
+
113
+ getMessages(
114
+ systemPrompt: string,
115
+ messages: Message[],
116
+ ): OpenAI.Chat.ChatCompletionMessageParam[] {
117
+ return [
118
+ { role: "system", content: systemPrompt },
119
+ ...convertToR1Messages(messages),
120
+ ];
121
+ }
122
+
123
+ /**
124
+ * Create a streaming message
125
+ */
126
+ async *createMessage(
127
+ systemPrompt: string,
128
+ messages: Message[],
129
+ tools?: ToolDefinition[],
130
+ ): ApiStream {
131
+ yield* retryStream(() =>
132
+ this.createMessageInternal(systemPrompt, messages, tools),
133
+ );
134
+ }
135
+
136
+ private async *createMessageInternal(
137
+ systemPrompt: string,
138
+ messages: Message[],
139
+ tools?: ToolDefinition[],
140
+ ): ApiStream {
141
+ const client = this.ensureClient();
142
+ const { id: modelId, info: modelInfo } = this.getModel();
143
+ const responseId = this.createResponseId();
144
+
145
+ // Convert messages to R1 format (handles merging and reasoning_content)
146
+ const openAiMessages = this.getMessages(systemPrompt, messages);
147
+
148
+ // Build request options
149
+ const requestOptions: OpenAI.ChatCompletionCreateParamsStreaming = {
150
+ model: modelId,
151
+ messages: openAiMessages,
152
+ stream: true,
153
+ stream_options: { include_usage: true },
154
+ ...getOpenAIToolParams(tools),
155
+ };
156
+
157
+ // Add max tokens if configured
158
+ if (modelInfo.maxTokens) {
159
+ requestOptions.max_completion_tokens = modelInfo.maxTokens;
160
+ }
161
+
162
+ // Only set temperature for non-reasoner models
163
+ if (!this.isReasonerModel(modelId)) {
164
+ requestOptions.temperature = modelInfo.temperature ?? 0;
165
+ }
166
+
167
+ const requestHeaders = this.getRequestHeaders();
168
+ const hasAuthorizationHeader = Object.keys(requestHeaders).some(
169
+ (key) => key.toLowerCase() === "authorization",
170
+ );
171
+ const apiKey = resolveApiKeyForProvider(
172
+ this.config.providerId,
173
+ this.config.apiKey,
174
+ );
175
+ if (!hasAuthorizationHeader && apiKey) {
176
+ requestHeaders.Authorization = `Bearer ${apiKey}`;
177
+ }
178
+ const abortSignal = this.getAbortSignal();
179
+ const stream = await client.chat.completions.create(requestOptions, {
180
+ signal: abortSignal,
181
+ headers: requestHeaders,
182
+ });
183
+ const toolCallProcessor = new ToolCallProcessor();
184
+
185
+ for await (const chunk of stream) {
186
+ yield* this.withResponseIdForAll(
187
+ this.processChunk(chunk, toolCallProcessor, modelInfo, responseId),
188
+ responseId,
189
+ );
190
+ }
191
+
192
+ // Yield done chunk to indicate streaming completed successfully
193
+ yield { type: "done", success: true, id: responseId };
194
+ }
195
+
196
+ /**
197
+ * Process a single chunk from the stream
198
+ */
199
+ protected *processChunk(
200
+ chunk: ChatCompletionChunk,
201
+ toolCallProcessor: ToolCallProcessor,
202
+ modelInfo: ModelInfo,
203
+ responseId: string,
204
+ ): Generator<ApiStreamChunk> {
205
+ const delta = chunk.choices?.[0]?.delta;
206
+
207
+ // Handle text content
208
+ if (delta?.content) {
209
+ yield { type: "text", text: delta.content, id: responseId };
210
+ }
211
+
212
+ // Handle reasoning content (R1 specific)
213
+ if ((delta as any)?.reasoning_content) {
214
+ yield {
215
+ type: "reasoning",
216
+ reasoning: (delta as any).reasoning_content,
217
+ id: responseId,
218
+ };
219
+ }
220
+
221
+ // Handle tool calls
222
+ if (delta?.tool_calls) {
223
+ yield* toolCallProcessor.processToolCallDeltas(
224
+ delta.tool_calls.map((tc) => ({
225
+ index: tc.index,
226
+ id: tc.id,
227
+ function: tc.function,
228
+ })),
229
+ responseId,
230
+ );
231
+ }
232
+
233
+ // Handle usage information with R1-specific cache tokens
234
+ if (chunk.usage) {
235
+ yield* this.processUsage(chunk.usage, modelInfo, responseId);
236
+ }
237
+ }
238
+
239
+ /**
240
+ * Process usage information with R1-specific cache handling
241
+ *
242
+ * DeepSeek reports total input AND cache reads/writes,
243
+ * where the input tokens is the sum of the cache hits/misses.
244
+ */
245
+ protected *processUsage(
246
+ usage: OpenAI.CompletionUsage,
247
+ _modelInfo: ModelInfo,
248
+ responseId: string,
249
+ ): Generator<ApiStreamChunk> {
250
+ const r1Usage = usage as R1Usage;
251
+
252
+ const inputTokens = r1Usage.prompt_tokens ?? 0; // sum of cache hits and misses
253
+ const outputTokens = r1Usage.completion_tokens ?? 0;
254
+ const cacheReadTokens = r1Usage.prompt_cache_hit_tokens ?? 0;
255
+ const cacheWriteTokens = r1Usage.prompt_cache_miss_tokens ?? 0;
256
+
257
+ // Calculate non-cached input tokens (will always be 0 for DeepSeek since input = read + write)
258
+ const nonCachedInputTokens = Math.max(
259
+ 0,
260
+ inputTokens - cacheReadTokens - cacheWriteTokens,
261
+ );
262
+
263
+ yield {
264
+ type: "usage",
265
+ inputTokens: nonCachedInputTokens,
266
+ outputTokens,
267
+ cacheReadTokens,
268
+ cacheWriteTokens,
269
+ totalCost: this.calculateCost(inputTokens, outputTokens, cacheReadTokens),
270
+ id: responseId,
271
+ };
272
+ }
273
+ }
274
+
275
+ /**
276
+ * Create an R1-compatible handler
277
+ */
278
+ export function createR1Handler(config: ProviderConfig): R1BaseHandler {
279
+ return new R1BaseHandler(config);
280
+ }
@@ -0,0 +1,185 @@
1
+ /**
2
+ * Custom Handler Registry
3
+ *
4
+ * Allows users to register their own custom handlers that extend BaseHandler.
5
+ * This is useful for providers that require dependencies not included in this package
6
+ * (e.g., VSCode LM handler that requires the vscode package).
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { registerHandler, BaseHandler, type ProviderConfig, type ApiStream, type Message } from "@clinebot/providers"
11
+ * import * as vscode from "vscode"
12
+ *
13
+ * class VSCodeLmHandler extends BaseHandler {
14
+ * async *createMessage(systemPrompt: string, messages: Message[]): ApiStream {
15
+ * // Implementation using vscode.lm API
16
+ * }
17
+ * }
18
+ *
19
+ * // Register the handler
20
+ * registerHandler("vscode-lm", (config) => new VSCodeLmHandler(config))
21
+ *
22
+ * // Now createHandler will use your custom handler for "vscode-lm"
23
+ * const handler = createHandler({ providerId: "vscode-lm", modelId: "copilot" })
24
+ * ```
25
+ */
26
+
27
+ import type {
28
+ ApiHandler,
29
+ HandlerFactory,
30
+ LazyHandlerFactory,
31
+ ProviderConfig,
32
+ } from "../types";
33
+
34
+ /**
35
+ * Registry entry that can be either sync or async factory
36
+ */
37
+ type RegistryEntry = {
38
+ factory: HandlerFactory<ProviderConfig> | LazyHandlerFactory<ProviderConfig>;
39
+ isAsync: boolean;
40
+ };
41
+
42
+ /**
43
+ * Internal registry of custom handlers
44
+ */
45
+ const customHandlerRegistry = new Map<string, RegistryEntry>();
46
+
47
+ /**
48
+ * Register a custom handler factory for a provider ID
49
+ *
50
+ * Use this to add handlers for providers that require external dependencies
51
+ * not bundled with this package, or to override built-in handlers.
52
+ *
53
+ * @param providerId - The provider ID to register (can be existing or new)
54
+ * @param factory - Factory function that creates the handler
55
+ *
56
+ * @example
57
+ * ```typescript
58
+ * // Simple registration
59
+ * registerHandler("my-provider", (config) => new MyHandler(config))
60
+ *
61
+ * // Override built-in handler
62
+ * registerHandler("anthropic", (config) => new MyCustomAnthropicHandler(config))
63
+ * ```
64
+ */
65
+ export function registerHandler(
66
+ providerId: string,
67
+ factory: HandlerFactory<ProviderConfig>,
68
+ ): void {
69
+ customHandlerRegistry.set(providerId, { factory, isAsync: false });
70
+ }
71
+
72
+ /**
73
+ * Register an async handler factory for lazy loading
74
+ *
75
+ * Use this when your handler has heavy dependencies that should be
76
+ * loaded only when needed.
77
+ *
78
+ * @param providerId - The provider ID to register
79
+ * @param factory - Async factory function that creates the handler
80
+ *
81
+ * @example
82
+ * ```typescript
83
+ * registerAsyncHandler("heavy-provider", async (config) => {
84
+ * const { HeavyHandler } = await import("./heavy-handler")
85
+ * return new HeavyHandler(config)
86
+ * })
87
+ * ```
88
+ */
89
+ export function registerAsyncHandler(
90
+ providerId: string,
91
+ factory: LazyHandlerFactory<ProviderConfig>,
92
+ ): void {
93
+ customHandlerRegistry.set(providerId, { factory, isAsync: true });
94
+ }
95
+
96
+ /**
97
+ * Unregister a custom handler
98
+ *
99
+ * @param providerId - The provider ID to unregister
100
+ * @returns true if a handler was removed, false if none existed
101
+ */
102
+ export function unregisterHandler(providerId: string): boolean {
103
+ return customHandlerRegistry.delete(providerId);
104
+ }
105
+
106
+ /**
107
+ * Check if a custom handler is registered for a provider ID
108
+ *
109
+ * @param providerId - The provider ID to check
110
+ */
111
+ export function hasRegisteredHandler(providerId: string): boolean {
112
+ return customHandlerRegistry.has(providerId);
113
+ }
114
+
115
+ /**
116
+ * Get a registered handler (internal use)
117
+ *
118
+ * @param providerId - The provider ID to get
119
+ * @param config - The config to pass to the factory
120
+ * @returns The handler instance, or undefined if not registered
121
+ */
122
+ export function getRegisteredHandler(
123
+ providerId: string,
124
+ config: ProviderConfig,
125
+ ): ApiHandler | undefined {
126
+ const entry = customHandlerRegistry.get(providerId);
127
+ if (!entry) {
128
+ return undefined;
129
+ }
130
+
131
+ if (entry.isAsync) {
132
+ throw new Error(
133
+ `Handler for "${providerId}" is registered as async. Use getRegisteredHandlerAsync() or createHandlerAsync() instead.`,
134
+ );
135
+ }
136
+
137
+ return (entry.factory as HandlerFactory<ProviderConfig>)(config);
138
+ }
139
+
140
+ /**
141
+ * Get a registered handler asynchronously (internal use)
142
+ *
143
+ * @param providerId - The provider ID to get
144
+ * @param config - The config to pass to the factory
145
+ * @returns The handler instance, or undefined if not registered
146
+ */
147
+ export async function getRegisteredHandlerAsync(
148
+ providerId: string,
149
+ config: ProviderConfig,
150
+ ): Promise<ApiHandler | undefined> {
151
+ const entry = customHandlerRegistry.get(providerId);
152
+ if (!entry) {
153
+ return undefined;
154
+ }
155
+
156
+ if (entry.isAsync) {
157
+ return (entry.factory as LazyHandlerFactory<ProviderConfig>)(config);
158
+ }
159
+
160
+ return (entry.factory as HandlerFactory<ProviderConfig>)(config);
161
+ }
162
+
163
+ /**
164
+ * Check if a registered handler is async
165
+ *
166
+ * @param providerId - The provider ID to check
167
+ */
168
+ export function isRegisteredHandlerAsync(providerId: string): boolean {
169
+ const entry = customHandlerRegistry.get(providerId);
170
+ return entry?.isAsync ?? false;
171
+ }
172
+
173
+ /**
174
+ * Get all registered provider IDs
175
+ */
176
+ export function getRegisteredProviderIds(): string[] {
177
+ return Array.from(customHandlerRegistry.keys());
178
+ }
179
+
180
+ /**
181
+ * Clear all registered handlers (mainly for testing)
182
+ */
183
+ export function clearRegistry(): void {
184
+ customHandlerRegistry.clear();
185
+ }
@@ -0,0 +1,124 @@
1
+ import { beforeEach, describe, expect, it, vi } from "vitest";
2
+ import type { Message } from "../types/messages";
3
+
4
+ const geminiConstructorSpy = vi.fn();
5
+ const geminiGetMessagesSpy = vi.fn();
6
+ const geminiCreateMessageSpy = vi.fn();
7
+
8
+ vi.mock("./gemini-base", () => {
9
+ return {
10
+ GeminiHandler: class {
11
+ constructor(config: unknown) {
12
+ geminiConstructorSpy(config);
13
+ }
14
+
15
+ getMessages(systemPrompt: string, messages: Message[]) {
16
+ return geminiGetMessagesSpy(systemPrompt, messages);
17
+ }
18
+
19
+ createMessage(
20
+ systemPrompt: string,
21
+ messages: Message[],
22
+ tools?: unknown[],
23
+ ) {
24
+ return geminiCreateMessageSpy(systemPrompt, messages, tools);
25
+ }
26
+
27
+ getModel() {
28
+ return {
29
+ id: "gemini-2.5-pro",
30
+ info: {
31
+ id: "gemini-2.5-pro",
32
+ name: "Gemini 2.5 Pro",
33
+ contextWindow: 1,
34
+ maxTokens: 1,
35
+ },
36
+ };
37
+ }
38
+ },
39
+ };
40
+ });
41
+
42
+ import { VertexHandler } from "./vertex";
43
+
44
+ describe("VertexHandler", () => {
45
+ beforeEach(() => {
46
+ vi.clearAllMocks();
47
+ });
48
+
49
+ it("routes Gemini models through GeminiHandler with Vertex config defaults", () => {
50
+ geminiGetMessagesSpy.mockReturnValue([
51
+ { role: "user", parts: [{ text: "ok" }] },
52
+ ]);
53
+
54
+ const handler = new VertexHandler({
55
+ providerId: "vertex",
56
+ modelId: "gemini-2.5-pro",
57
+ gcp: { projectId: "my-project" },
58
+ });
59
+
60
+ const messages: Message[] = [{ role: "user", content: "Hello" }];
61
+ const converted = handler.getMessages("You are helpful.", messages);
62
+
63
+ expect(geminiConstructorSpy).toHaveBeenCalledTimes(1);
64
+ expect(geminiConstructorSpy).toHaveBeenCalledWith(
65
+ expect.objectContaining({
66
+ region: "us-central1",
67
+ gcp: expect.objectContaining({
68
+ projectId: "my-project",
69
+ region: "us-central1",
70
+ }),
71
+ }),
72
+ );
73
+ expect(geminiGetMessagesSpy).toHaveBeenCalledWith(
74
+ "You are helpful.",
75
+ messages,
76
+ );
77
+ expect(converted).toEqual([{ role: "user", parts: [{ text: "ok" }] }]);
78
+ });
79
+
80
+ it("uses Anthropic-style message conversion for Claude models", () => {
81
+ const handler = new VertexHandler({
82
+ providerId: "vertex",
83
+ modelId: "claude-sonnet-4-5",
84
+ gcp: { projectId: "my-project", region: "us-east5" },
85
+ });
86
+
87
+ const converted = handler.getMessages("System", [
88
+ { role: "user", content: "Hello Claude" },
89
+ ]);
90
+
91
+ expect(geminiGetMessagesSpy).not.toHaveBeenCalled();
92
+ expect(converted).toEqual([
93
+ {
94
+ role: "user",
95
+ content: [{ type: "text", text: "Hello Claude" }],
96
+ },
97
+ ]);
98
+ });
99
+
100
+ it("requires gcp.projectId for Vertex provider", async () => {
101
+ const handler = new VertexHandler({
102
+ providerId: "vertex",
103
+ modelId: "gemini-2.5-pro",
104
+ });
105
+
106
+ const stream = handler.createMessage("System", [
107
+ { role: "user", content: "Hello" },
108
+ ]);
109
+ await expect(stream.next()).rejects.toThrow("gcp.projectId");
110
+ });
111
+
112
+ it("requires region for Claude models on Vertex", async () => {
113
+ const handler = new VertexHandler({
114
+ providerId: "vertex",
115
+ modelId: "claude-sonnet-4-5",
116
+ gcp: { projectId: "my-project" },
117
+ });
118
+
119
+ const stream = handler.createMessage("System", [
120
+ { role: "user", content: "Hello" },
121
+ ]);
122
+ await expect(stream.next()).rejects.toThrow("gcp.region");
123
+ });
124
+ });