@auxiora/providers 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/LICENSE +191 -0
  2. package/dist/anthropic.d.ts +82 -0
  3. package/dist/anthropic.d.ts.map +1 -0
  4. package/dist/anthropic.js +618 -0
  5. package/dist/anthropic.js.map +1 -0
  6. package/dist/claude-code-tools.d.ts +29 -0
  7. package/dist/claude-code-tools.d.ts.map +1 -0
  8. package/dist/claude-code-tools.js +221 -0
  9. package/dist/claude-code-tools.js.map +1 -0
  10. package/dist/claude-oauth.d.ts +86 -0
  11. package/dist/claude-oauth.d.ts.map +1 -0
  12. package/dist/claude-oauth.js +318 -0
  13. package/dist/claude-oauth.js.map +1 -0
  14. package/dist/cohere.d.ts +18 -0
  15. package/dist/cohere.d.ts.map +1 -0
  16. package/dist/cohere.js +163 -0
  17. package/dist/cohere.js.map +1 -0
  18. package/dist/deepseek.d.ts +18 -0
  19. package/dist/deepseek.d.ts.map +1 -0
  20. package/dist/deepseek.js +164 -0
  21. package/dist/deepseek.js.map +1 -0
  22. package/dist/factory.d.ts +19 -0
  23. package/dist/factory.d.ts.map +1 -0
  24. package/dist/factory.js +108 -0
  25. package/dist/factory.js.map +1 -0
  26. package/dist/google.d.ts +18 -0
  27. package/dist/google.d.ts.map +1 -0
  28. package/dist/google.js +141 -0
  29. package/dist/google.js.map +1 -0
  30. package/dist/groq.d.ts +18 -0
  31. package/dist/groq.d.ts.map +1 -0
  32. package/dist/groq.js +186 -0
  33. package/dist/groq.js.map +1 -0
  34. package/dist/index.d.ts +15 -0
  35. package/dist/index.d.ts.map +1 -0
  36. package/dist/index.js +14 -0
  37. package/dist/index.js.map +1 -0
  38. package/dist/ollama.d.ts +18 -0
  39. package/dist/ollama.d.ts.map +1 -0
  40. package/dist/ollama.js +141 -0
  41. package/dist/ollama.js.map +1 -0
  42. package/dist/openai-compatible.d.ts +20 -0
  43. package/dist/openai-compatible.d.ts.map +1 -0
  44. package/dist/openai-compatible.js +112 -0
  45. package/dist/openai-compatible.js.map +1 -0
  46. package/dist/openai.d.ts +20 -0
  47. package/dist/openai.d.ts.map +1 -0
  48. package/dist/openai.js +259 -0
  49. package/dist/openai.js.map +1 -0
  50. package/dist/replicate.d.ts +20 -0
  51. package/dist/replicate.d.ts.map +1 -0
  52. package/dist/replicate.js +186 -0
  53. package/dist/replicate.js.map +1 -0
  54. package/dist/thinking-levels.d.ts +16 -0
  55. package/dist/thinking-levels.d.ts.map +1 -0
  56. package/dist/thinking-levels.js +34 -0
  57. package/dist/thinking-levels.js.map +1 -0
  58. package/dist/types.d.ts +157 -0
  59. package/dist/types.d.ts.map +1 -0
  60. package/dist/types.js +2 -0
  61. package/dist/types.js.map +1 -0
  62. package/dist/xai.d.ts +18 -0
  63. package/dist/xai.d.ts.map +1 -0
  64. package/dist/xai.js +164 -0
  65. package/dist/xai.js.map +1 -0
  66. package/package.json +30 -0
  67. package/src/anthropic.ts +691 -0
  68. package/src/claude-code-tools.ts +233 -0
  69. package/src/claude-oauth.ts +410 -0
  70. package/src/cohere.ts +242 -0
  71. package/src/deepseek.ts +241 -0
  72. package/src/factory.ts +142 -0
  73. package/src/google.ts +176 -0
  74. package/src/groq.ts +263 -0
  75. package/src/index.ts +44 -0
  76. package/src/ollama.ts +194 -0
  77. package/src/openai-compatible.ts +154 -0
  78. package/src/openai.ts +307 -0
  79. package/src/replicate.ts +247 -0
  80. package/src/thinking-levels.ts +37 -0
  81. package/src/types.ts +171 -0
  82. package/src/xai.ts +241 -0
  83. package/tests/adapters.test.ts +185 -0
  84. package/tests/claude-oauth.test.ts +45 -0
  85. package/tests/new-providers.test.ts +732 -0
  86. package/tests/thinking-levels.test.ts +82 -0
  87. package/tsconfig.json +8 -0
  88. package/tsconfig.tsbuildinfo +1 -0
@@ -0,0 +1,37 @@
1
+ import type { ThinkingLevel } from './types.js';
2
+
3
+ /**
4
+ * Map ThinkingLevel to Anthropic budget_tokens for the thinking block.
5
+ * Returns undefined for 'off' (no thinking).
6
+ */
7
+ export function getAnthropicThinkingBudget(level: ThinkingLevel): number | undefined {
8
+ switch (level) {
9
+ case 'off': return undefined;
10
+ case 'low': return 1024;
11
+ case 'medium': return 4096;
12
+ case 'high': return 10000;
13
+ case 'xhigh': return 32000;
14
+ }
15
+ }
16
+
17
+ /**
18
+ * Map ThinkingLevel to OpenAI reasoning_effort for o-series models.
19
+ * Returns undefined for 'off'.
20
+ */
21
+ export function getOpenAIReasoningEffort(level: ThinkingLevel): 'low' | 'medium' | 'high' | undefined {
22
+ switch (level) {
23
+ case 'off': return undefined;
24
+ case 'low': return 'low';
25
+ case 'medium': return 'medium';
26
+ case 'high': return 'high';
27
+ case 'xhigh': return 'high'; // OpenAI maxes at 'high'
28
+ }
29
+ }
30
+
31
+ /**
32
+ * Check if an OpenAI model supports reasoning_effort (o-series).
33
+ */
34
+ export function isOpenAIReasoningModel(model: string): boolean {
35
+ return model.startsWith('o1') || model.startsWith('o3') || model.startsWith('o4')
36
+ || model.startsWith('gpt-5');
37
+ }
package/src/types.ts ADDED
@@ -0,0 +1,171 @@
1
+ /**
2
+ * Thinking/reasoning budget levels.
3
+ * Maps to provider-specific parameters:
4
+ * - Anthropic: thinking budget_tokens
5
+ * - OpenAI: reasoning_effort (for o-series models)
6
+ */
7
+ export type ThinkingLevel = 'off' | 'low' | 'medium' | 'high' | 'xhigh';
8
+
9
+ export type MessageRole = 'user' | 'assistant' | 'system';
10
+
11
+ export interface ChatMessage {
12
+ role: MessageRole;
13
+ content: string;
14
+ }
15
+
16
+ /**
17
+ * Tool definition for AI providers
18
+ */
19
+ export interface ToolDefinition {
20
+ name: string;
21
+ description: string;
22
+ input_schema: {
23
+ type: 'object';
24
+ properties: Record<string, any>;
25
+ required?: string[];
26
+ };
27
+ }
28
+
29
+ /**
30
+ * Tool use request from the AI
31
+ */
32
+ export interface ToolUse {
33
+ id: string;
34
+ name: string;
35
+ input: any;
36
+ }
37
+
38
+ /**
39
+ * Tool result to send back to the AI
40
+ */
41
+ export interface ToolResultMessage {
42
+ tool_use_id: string;
43
+ content: string;
44
+ is_error?: boolean;
45
+ }
46
+
47
+ export interface StreamChunk {
48
+ type: 'text' | 'thinking' | 'tool_use' | 'done' | 'error';
49
+ content?: string;
50
+ toolUse?: ToolUse;
51
+ error?: string;
52
+ usage?: {
53
+ inputTokens: number;
54
+ outputTokens: number;
55
+ };
56
+ }
57
+
58
+ export interface CompletionOptions {
59
+ model?: string;
60
+ maxTokens?: number;
61
+ temperature?: number;
62
+ systemPrompt?: string;
63
+ stream?: boolean;
64
+ tools?: ToolDefinition[];
65
+ /** Thinking/reasoning budget level. Default: 'off'. */
66
+ thinkingLevel?: ThinkingLevel;
67
+ /** When true, don't filter Claude Code emulation tool calls — yield them as normal tool_use events. */
68
+ passThroughAllTools?: boolean;
69
+ }
70
+
71
+ export interface CompletionResult {
72
+ content: string;
73
+ toolUse?: ToolUse[];
74
+ usage: {
75
+ inputTokens: number;
76
+ outputTokens: number;
77
+ };
78
+ model: string;
79
+ finishReason: string;
80
+ }
81
+
82
+ export interface ModelCapabilities {
83
+ maxContextTokens: number;
84
+ supportsVision: boolean;
85
+ supportsTools: boolean;
86
+ supportsStreaming: boolean;
87
+ supportsImageGen: boolean;
88
+ costPer1kInput: number;
89
+ costPer1kOutput: number;
90
+ strengths: string[];
91
+ isLocal: boolean;
92
+ }
93
+
94
+ export interface ProviderMetadata {
95
+ name: string;
96
+ displayName: string;
97
+ models: Record<string, ModelCapabilities>;
98
+ isAvailable(): Promise<boolean>;
99
+ }
100
+
101
+ export interface Provider {
102
+ name: string;
103
+ metadata: ProviderMetadata;
104
+ complete(messages: ChatMessage[], options?: CompletionOptions): Promise<CompletionResult>;
105
+ stream(
106
+ messages: ChatMessage[],
107
+ options?: CompletionOptions
108
+ ): AsyncGenerator<StreamChunk, void, unknown>;
109
+ }
110
+
111
+ export interface ProviderConfig {
112
+ anthropic?: {
113
+ apiKey?: string;
114
+ oauthToken?: string;
115
+ model?: string;
116
+ maxTokens?: number;
117
+ /** Read credentials from Claude CLI (~/.claude/.credentials.json) */
118
+ useCliCredentials?: boolean;
119
+ /** Callback to refresh the OAuth token when expired. Returns new access token. */
120
+ onTokenRefresh?: () => Promise<string | null>;
121
+ /** When the current OAuth token expires (epoch ms). Used for proactive refresh. */
122
+ tokenExpiresAt?: number;
123
+ };
124
+ openai?: {
125
+ apiKey: string;
126
+ model?: string;
127
+ maxTokens?: number;
128
+ };
129
+ google?: {
130
+ apiKey: string;
131
+ model?: string;
132
+ maxTokens?: number;
133
+ };
134
+ ollama?: {
135
+ baseUrl?: string;
136
+ model?: string;
137
+ maxTokens?: number;
138
+ };
139
+ openaiCompatible?: {
140
+ baseUrl: string;
141
+ apiKey?: string;
142
+ model?: string;
143
+ maxTokens?: number;
144
+ name?: string;
145
+ };
146
+ groq?: {
147
+ apiKey: string;
148
+ model?: string;
149
+ maxTokens?: number;
150
+ };
151
+ replicate?: {
152
+ apiToken: string;
153
+ model?: string;
154
+ pollInterval?: number;
155
+ };
156
+ deepseek?: {
157
+ apiKey: string;
158
+ model?: string;
159
+ maxTokens?: number;
160
+ };
161
+ cohere?: {
162
+ apiKey: string;
163
+ model?: string;
164
+ maxTokens?: number;
165
+ };
166
+ xai?: {
167
+ apiKey: string;
168
+ model?: string;
169
+ maxTokens?: number;
170
+ };
171
+ }
package/src/xai.ts ADDED
@@ -0,0 +1,241 @@
1
+ import type {
2
+ Provider,
3
+ ProviderMetadata,
4
+ ChatMessage,
5
+ CompletionOptions,
6
+ CompletionResult,
7
+ StreamChunk,
8
+ } from './types.js';
9
+
10
+ const DEFAULT_MODEL = 'grok-2';
11
+ const DEFAULT_MAX_TOKENS = 4096;
12
+ const BASE_URL = 'https://api.x.ai/v1';
13
+
14
+ export interface XAIProviderOptions {
15
+ apiKey: string;
16
+ model?: string;
17
+ maxTokens?: number;
18
+ }
19
+
20
+ interface XAIChatMessage {
21
+ role: string;
22
+ content: string;
23
+ }
24
+
25
+ interface XAIChoice {
26
+ message: { role: string; content: string };
27
+ finish_reason: string;
28
+ }
29
+
30
+ interface XAIUsage {
31
+ prompt_tokens: number;
32
+ completion_tokens: number;
33
+ }
34
+
35
+ interface XAIChatResponse {
36
+ choices: XAIChoice[];
37
+ usage: XAIUsage;
38
+ model: string;
39
+ }
40
+
41
+ interface XAIStreamDelta {
42
+ content?: string;
43
+ }
44
+
45
+ interface XAIStreamChoice {
46
+ delta: XAIStreamDelta;
47
+ finish_reason: string | null;
48
+ }
49
+
50
+ interface XAIStreamChunk {
51
+ choices: XAIStreamChoice[];
52
+ usage?: XAIUsage;
53
+ }
54
+
55
+ export class XAIProvider implements Provider {
56
+ name = 'xai';
57
+ metadata: ProviderMetadata = {
58
+ name: 'xai',
59
+ displayName: 'xAI Grok',
60
+ models: {
61
+ 'grok-2': {
62
+ maxContextTokens: 131072,
63
+ supportsVision: false,
64
+ supportsTools: true,
65
+ supportsStreaming: true,
66
+ supportsImageGen: false,
67
+ costPer1kInput: 0.002,
68
+ costPer1kOutput: 0.01,
69
+ strengths: ['reasoning', 'code', 'creative'],
70
+ isLocal: false,
71
+ },
72
+ 'grok-2-mini': {
73
+ maxContextTokens: 131072,
74
+ supportsVision: false,
75
+ supportsTools: true,
76
+ supportsStreaming: true,
77
+ supportsImageGen: false,
78
+ costPer1kInput: 0.0002,
79
+ costPer1kOutput: 0.001,
80
+ strengths: ['fast', 'low-cost'],
81
+ isLocal: false,
82
+ },
83
+ },
84
+ isAvailable: async () => {
85
+ try {
86
+ const response = await fetch(`${BASE_URL}/models`, {
87
+ headers: { Authorization: `Bearer ${this.apiKey}` },
88
+ });
89
+ return response.ok;
90
+ } catch {
91
+ return false;
92
+ }
93
+ },
94
+ };
95
+
96
+ private apiKey: string;
97
+ private defaultModel: string;
98
+ private defaultMaxTokens: number;
99
+
100
+ constructor(options: XAIProviderOptions) {
101
+ this.apiKey = options.apiKey;
102
+ this.defaultModel = options.model || DEFAULT_MODEL;
103
+ this.defaultMaxTokens = options.maxTokens || DEFAULT_MAX_TOKENS;
104
+ }
105
+
106
+ async complete(
107
+ messages: ChatMessage[],
108
+ options?: CompletionOptions,
109
+ ): Promise<CompletionResult> {
110
+ const model = options?.model || this.defaultModel;
111
+ const xaiMessages = this.prepareMessages(messages, options);
112
+
113
+ const response = await fetch(`${BASE_URL}/chat/completions`, {
114
+ method: 'POST',
115
+ headers: {
116
+ 'Content-Type': 'application/json',
117
+ Authorization: `Bearer ${this.apiKey}`,
118
+ },
119
+ body: JSON.stringify({
120
+ model,
121
+ max_tokens: options?.maxTokens || this.defaultMaxTokens,
122
+ messages: xaiMessages,
123
+ temperature: options?.temperature,
124
+ }),
125
+ });
126
+
127
+ if (!response.ok) {
128
+ throw new Error(`xAI API error: ${response.status} ${response.statusText}`);
129
+ }
130
+
131
+ const data = (await response.json()) as XAIChatResponse;
132
+ const choice = data.choices[0];
133
+
134
+ return {
135
+ content: choice?.message?.content || '',
136
+ usage: {
137
+ inputTokens: data.usage?.prompt_tokens || 0,
138
+ outputTokens: data.usage?.completion_tokens || 0,
139
+ },
140
+ model: data.model,
141
+ finishReason: choice?.finish_reason || 'unknown',
142
+ };
143
+ }
144
+
145
+ async *stream(
146
+ messages: ChatMessage[],
147
+ options?: CompletionOptions,
148
+ ): AsyncGenerator<StreamChunk, void, unknown> {
149
+ const model = options?.model || this.defaultModel;
150
+ const xaiMessages = this.prepareMessages(messages, options);
151
+
152
+ try {
153
+ const response = await fetch(`${BASE_URL}/chat/completions`, {
154
+ method: 'POST',
155
+ headers: {
156
+ 'Content-Type': 'application/json',
157
+ Authorization: `Bearer ${this.apiKey}`,
158
+ },
159
+ body: JSON.stringify({
160
+ model,
161
+ max_tokens: options?.maxTokens || this.defaultMaxTokens,
162
+ messages: xaiMessages,
163
+ temperature: options?.temperature,
164
+ stream: true,
165
+ stream_options: { include_usage: true },
166
+ }),
167
+ });
168
+
169
+ if (!response.ok) {
170
+ throw new Error(`xAI API error: ${response.status} ${response.statusText}`);
171
+ }
172
+
173
+ const reader = response.body?.getReader();
174
+ if (!reader) {
175
+ throw new Error('No response body');
176
+ }
177
+
178
+ const decoder = new TextDecoder();
179
+ let buffer = '';
180
+ let inputTokens = 0;
181
+ let outputTokens = 0;
182
+
183
+ while (true) {
184
+ const { done, value } = await reader.read();
185
+ if (done) break;
186
+
187
+ buffer += decoder.decode(value, { stream: true });
188
+ const lines = buffer.split('\n');
189
+ buffer = lines.pop() || '';
190
+
191
+ for (const line of lines) {
192
+ const trimmed = line.trim();
193
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
194
+ const data = trimmed.slice(6);
195
+ if (data === '[DONE]') continue;
196
+
197
+ const chunk = JSON.parse(data) as XAIStreamChunk;
198
+
199
+ const delta = chunk.choices[0]?.delta;
200
+ if (delta?.content) {
201
+ yield { type: 'text', content: delta.content };
202
+ }
203
+
204
+ if (chunk.usage) {
205
+ inputTokens = chunk.usage.prompt_tokens || 0;
206
+ outputTokens = chunk.usage.completion_tokens || 0;
207
+ }
208
+
209
+ if (chunk.choices[0]?.finish_reason) {
210
+ yield {
211
+ type: 'done',
212
+ usage: { inputTokens, outputTokens },
213
+ };
214
+ }
215
+ }
216
+ }
217
+ } catch (error) {
218
+ yield {
219
+ type: 'error',
220
+ error: error instanceof Error ? error.message : 'Unknown error',
221
+ };
222
+ }
223
+ }
224
+
225
+ private prepareMessages(
226
+ messages: ChatMessage[],
227
+ options?: CompletionOptions,
228
+ ): XAIChatMessage[] {
229
+ const xaiMessages: XAIChatMessage[] = [];
230
+
231
+ if (options?.systemPrompt) {
232
+ xaiMessages.push({ role: 'system', content: options.systemPrompt });
233
+ }
234
+
235
+ for (const msg of messages) {
236
+ xaiMessages.push({ role: msg.role, content: msg.content });
237
+ }
238
+
239
+ return xaiMessages;
240
+ }
241
+ }
@@ -0,0 +1,185 @@
1
+ import { describe, it, expect, vi, beforeEach } from 'vitest';
2
+ import { OllamaProvider } from '../src/ollama.js';
3
+ import { OpenAICompatibleProvider } from '../src/openai-compatible.js';
4
+ import { GoogleProvider } from '../src/google.js';
5
+ import { ProviderFactory } from '../src/factory.js';
6
+
7
+ describe('OllamaProvider', () => {
8
+ let provider: OllamaProvider;
9
+
10
+ beforeEach(() => {
11
+ provider = new OllamaProvider({ model: 'llama3' });
12
+ });
13
+
14
+ it('should have correct metadata', () => {
15
+ expect(provider.name).toBe('ollama');
16
+ expect(provider.metadata.name).toBe('ollama');
17
+ expect(provider.metadata.displayName).toBe('Ollama (Local)');
18
+ expect(provider.metadata.models['llama3']).toBeDefined();
19
+ expect(provider.metadata.models['llama3'].isLocal).toBe(true);
20
+ expect(provider.metadata.models['llama3'].costPer1kInput).toBe(0);
21
+ expect(provider.metadata.models['llama3'].costPer1kOutput).toBe(0);
22
+ });
23
+
24
+ it('should use custom base URL', () => {
25
+ const custom = new OllamaProvider({ baseUrl: 'http://192.168.1.5:11434', model: 'mistral' });
26
+ expect(custom.metadata.models['mistral']).toBeDefined();
27
+ });
28
+
29
+ it('should make correct API call for complete', async () => {
30
+ const mockResponse = {
31
+ message: { role: 'assistant', content: 'Hello! How can I help?' },
32
+ done: true,
33
+ eval_count: 10,
34
+ prompt_eval_count: 5,
35
+ };
36
+
37
+ vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce({
38
+ ok: true,
39
+ json: async () => mockResponse,
40
+ } as Response);
41
+
42
+ const result = await provider.complete([{ role: 'user', content: 'Hello' }]);
43
+ expect(result.content).toBe('Hello! How can I help?');
44
+ expect(result.usage.inputTokens).toBe(5);
45
+ expect(result.usage.outputTokens).toBe(10);
46
+ expect(result.finishReason).toBe('stop');
47
+
48
+ vi.restoreAllMocks();
49
+ });
50
+
51
+ it('should handle API errors', async () => {
52
+ vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce({
53
+ ok: false,
54
+ status: 500,
55
+ statusText: 'Internal Server Error',
56
+ } as Response);
57
+
58
+ await expect(provider.complete([{ role: 'user', content: 'Hello' }]))
59
+ .rejects.toThrow('Ollama API error: 500');
60
+
61
+ vi.restoreAllMocks();
62
+ });
63
+
64
+ it('should check availability via /api/tags', async () => {
65
+ vi.spyOn(globalThis, 'fetch').mockResolvedValueOnce({ ok: true } as Response);
66
+ const available = await provider.metadata.isAvailable();
67
+ expect(available).toBe(true);
68
+ vi.restoreAllMocks();
69
+ });
70
+
71
+ it('should return false when Ollama is unreachable', async () => {
72
+ vi.spyOn(globalThis, 'fetch').mockRejectedValueOnce(new Error('Connection refused'));
73
+ const available = await provider.metadata.isAvailable();
74
+ expect(available).toBe(false);
75
+ vi.restoreAllMocks();
76
+ });
77
+ });
78
+
79
+ describe('OpenAICompatibleProvider', () => {
80
+ let provider: OpenAICompatibleProvider;
81
+
82
+ beforeEach(() => {
83
+ provider = new OpenAICompatibleProvider({
84
+ baseUrl: 'http://localhost:1234/v1',
85
+ model: 'local-model',
86
+ name: 'lm-studio',
87
+ });
88
+ });
89
+
90
+ it('should have correct metadata', () => {
91
+ expect(provider.name).toBe('lm-studio');
92
+ expect(provider.metadata.name).toBe('lm-studio');
93
+ expect(provider.metadata.displayName).toContain('lm-studio');
94
+ expect(provider.metadata.models['local-model']).toBeDefined();
95
+ expect(provider.metadata.models['local-model'].isLocal).toBe(true);
96
+ expect(provider.metadata.models['local-model'].costPer1kInput).toBe(0);
97
+ });
98
+
99
+ it('should default name to openai-compatible', () => {
100
+ const unnamed = new OpenAICompatibleProvider({
101
+ baseUrl: 'http://localhost:1234/v1',
102
+ model: 'test-model',
103
+ });
104
+ expect(unnamed.name).toBe('openai-compatible');
105
+ });
106
+ });
107
+
108
+ describe('GoogleProvider', () => {
109
+ it('should have correct metadata', () => {
110
+ const provider = new GoogleProvider({ apiKey: 'test-key' });
111
+ expect(provider.name).toBe('google');
112
+ expect(provider.metadata.name).toBe('google');
113
+ expect(provider.metadata.displayName).toBe('Google Gemini');
114
+ expect(provider.metadata.models['gemini-2.5-flash']).toBeDefined();
115
+ expect(provider.metadata.models['gemini-2.5-pro']).toBeDefined();
116
+ expect(provider.metadata.models['gemini-2.5-flash'].supportsVision).toBe(true);
117
+ expect(provider.metadata.models['gemini-2.5-pro'].maxContextTokens).toBe(1048576);
118
+ });
119
+ });
120
+
121
+ describe('ProviderFactory with new providers', () => {
122
+ it('should create ollama provider when config present', () => {
123
+ const factory = new ProviderFactory({
124
+ primary: 'ollama',
125
+ config: {
126
+ ollama: { model: 'llama3' },
127
+ },
128
+ });
129
+ const provider = factory.getProvider('ollama');
130
+ expect(provider.name).toBe('ollama');
131
+ });
132
+
133
+ it('should create google provider when API key present', () => {
134
+ const factory = new ProviderFactory({
135
+ primary: 'google',
136
+ config: {
137
+ google: { apiKey: 'test-google-key' },
138
+ },
139
+ });
140
+ const provider = factory.getProvider('google');
141
+ expect(provider.name).toBe('google');
142
+ });
143
+
144
+ it('should create openai-compatible provider when baseUrl present', () => {
145
+ const factory = new ProviderFactory({
146
+ primary: 'lm-studio',
147
+ config: {
148
+ openaiCompatible: {
149
+ baseUrl: 'http://localhost:1234/v1',
150
+ model: 'local-model',
151
+ name: 'lm-studio',
152
+ },
153
+ },
154
+ });
155
+ const provider = factory.getProvider('lm-studio');
156
+ expect(provider.name).toBe('lm-studio');
157
+ });
158
+
159
+ it('should list all available providers', () => {
160
+ const factory = new ProviderFactory({
161
+ primary: 'ollama',
162
+ config: {
163
+ ollama: { model: 'llama3' },
164
+ google: { apiKey: 'test-key' },
165
+ openaiCompatible: {
166
+ baseUrl: 'http://localhost:1234/v1',
167
+ model: 'test',
168
+ name: 'vllm',
169
+ },
170
+ },
171
+ });
172
+ const available = factory.listAvailable();
173
+ expect(available).toContain('ollama');
174
+ expect(available).toContain('google');
175
+ expect(available).toContain('vllm');
176
+ });
177
+
178
+ it('should throw when getting unconfigured provider', () => {
179
+ const factory = new ProviderFactory({
180
+ primary: 'anthropic',
181
+ config: {},
182
+ });
183
+ expect(() => factory.getProvider('google')).toThrow('Provider not configured: google');
184
+ });
185
+ });
@@ -0,0 +1,45 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import {
3
+ generatePKCE,
4
+ buildAuthorizationUrl,
5
+ } from '../src/claude-oauth.js';
6
+
7
+ describe('PKCE generation', () => {
8
+ it('generates a verifier of 43-128 chars using unreserved chars', () => {
9
+ const { verifier } = generatePKCE();
10
+ expect(verifier.length).toBeGreaterThanOrEqual(43);
11
+ expect(verifier.length).toBeLessThanOrEqual(128);
12
+ expect(verifier).toMatch(/^[A-Za-z0-9\-._~]+$/);
13
+ });
14
+
15
+ it('generates a base64url-encoded SHA-256 challenge', () => {
16
+ const { challenge } = generatePKCE();
17
+ expect(challenge).toMatch(/^[A-Za-z0-9_-]+$/);
18
+ expect(challenge.length).toBe(43);
19
+ });
20
+
21
+ it('generates unique values each call', () => {
22
+ const a = generatePKCE();
23
+ const b = generatePKCE();
24
+ expect(a.verifier).not.toBe(b.verifier);
25
+ });
26
+ });
27
+
28
+ describe('buildAuthorizationUrl', () => {
29
+ it('includes all required OAuth parameters', () => {
30
+ const url = buildAuthorizationUrl('test-challenge-value', 'test-state-value');
31
+ const parsed = new URL(url);
32
+
33
+ expect(parsed.origin).toBe('https://claude.ai');
34
+ expect(parsed.pathname).toBe('/oauth/authorize');
35
+ expect(parsed.searchParams.get('response_type')).toBe('code');
36
+ expect(parsed.searchParams.get('client_id')).toBeTruthy();
37
+ expect(parsed.searchParams.get('redirect_uri')).toBe(
38
+ 'https://console.anthropic.com/oauth/code/callback'
39
+ );
40
+ expect(parsed.searchParams.get('scope')).toContain('user:inference');
41
+ expect(parsed.searchParams.get('code_challenge')).toBe('test-challenge-value');
42
+ expect(parsed.searchParams.get('code_challenge_method')).toBe('S256');
43
+ expect(parsed.searchParams.get('state')).toBe('test-state-value');
44
+ });
45
+ });