ai.libx.js 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +339 -0
- package/build/@Module.d.ts +6 -0
- package/build/@Module.js +14 -0
- package/build/@Module.js.map +1 -0
- package/build/AIClient.d.ts +19 -0
- package/build/AIClient.js +132 -0
- package/build/AIClient.js.map +1 -0
- package/build/Extensions.d.ts +3 -0
- package/build/Extensions.js +4 -0
- package/build/Extensions.js.map +1 -0
- package/build/adapters/ai21.d.ts +8 -0
- package/build/adapters/ai21.js +83 -0
- package/build/adapters/ai21.js.map +1 -0
- package/build/adapters/anthropic.d.ts +9 -0
- package/build/adapters/anthropic.js +162 -0
- package/build/adapters/anthropic.js.map +1 -0
- package/build/adapters/base/BaseAdapter.d.ts +13 -0
- package/build/adapters/base/BaseAdapter.js +56 -0
- package/build/adapters/base/BaseAdapter.js.map +1 -0
- package/build/adapters/cloudflare.d.ts +8 -0
- package/build/adapters/cloudflare.js +129 -0
- package/build/adapters/cloudflare.js.map +1 -0
- package/build/adapters/cohere.d.ts +9 -0
- package/build/adapters/cohere.js +158 -0
- package/build/adapters/cohere.js.map +1 -0
- package/build/adapters/deepseek.d.ts +8 -0
- package/build/adapters/deepseek.js +142 -0
- package/build/adapters/deepseek.js.map +1 -0
- package/build/adapters/google.d.ts +9 -0
- package/build/adapters/google.js +166 -0
- package/build/adapters/google.js.map +1 -0
- package/build/adapters/groq.d.ts +8 -0
- package/build/adapters/groq.js +142 -0
- package/build/adapters/groq.js.map +1 -0
- package/build/adapters/index.d.ts +12 -0
- package/build/adapters/index.js +28 -0
- package/build/adapters/index.js.map +1 -0
- package/build/adapters/mistral.d.ts +8 -0
- package/build/adapters/mistral.js +139 -0
- package/build/adapters/mistral.js.map +1 -0
- package/build/adapters/openai.d.ts +9 -0
- package/build/adapters/openai.js +145 -0
- package/build/adapters/openai.js.map +1 -0
- package/build/adapters/openrouter.d.ts +8 -0
- package/build/adapters/openrouter.js +145 -0
- package/build/adapters/openrouter.js.map +1 -0
- package/build/adapters/xai.d.ts +8 -0
- package/build/adapters/xai.js +138 -0
- package/build/adapters/xai.js.map +1 -0
- package/build/index.d.ts +12 -0
- package/build/index.js +29 -0
- package/build/index.js.map +1 -0
- package/build/models.d.ts +6 -0
- package/build/models.js +103 -0
- package/build/models.js.map +1 -0
- package/build/types/index.d.ts +66 -0
- package/build/types/index.js +3 -0
- package/build/types/index.js.map +1 -0
- package/build/types/provider.d.ts +8 -0
- package/build/types/provider.js +3 -0
- package/build/types/provider.js.map +1 -0
- package/build/types/streaming.d.ts +8 -0
- package/build/types/streaming.js +33 -0
- package/build/types/streaming.js.map +1 -0
- package/build/utils/errors.d.ts +21 -0
- package/build/utils/errors.js +70 -0
- package/build/utils/errors.js.map +1 -0
- package/build/utils/model-normalization.d.ts +9 -0
- package/build/utils/model-normalization.js +59 -0
- package/build/utils/model-normalization.js.map +1 -0
- package/build/utils/request-logger.d.ts +43 -0
- package/build/utils/request-logger.js +96 -0
- package/build/utils/request-logger.js.map +1 -0
- package/build/utils/stream.d.ts +8 -0
- package/build/utils/stream.js +109 -0
- package/build/utils/stream.js.map +1 -0
- package/build/utils/validation.d.ts +4 -0
- package/build/utils/validation.js +57 -0
- package/build/utils/validation.js.map +1 -0
- package/example.ts +166 -0
- package/jest.config.js +26 -0
- package/package.json +68 -0
- package/src/@Module.ts +9 -0
- package/src/AIClient.ts +210 -0
- package/src/Extensions.ts +7 -0
- package/src/adapters/ai21.ts +99 -0
- package/src/adapters/anthropic.ts +152 -0
- package/src/adapters/base/BaseAdapter.ts +78 -0
- package/src/adapters/cloudflare.ts +115 -0
- package/src/adapters/cohere.ts +158 -0
- package/src/adapters/deepseek.ts +108 -0
- package/src/adapters/google.ts +170 -0
- package/src/adapters/groq.ts +108 -0
- package/src/adapters/index.ts +14 -0
- package/src/adapters/mistral.ts +108 -0
- package/src/adapters/openai.ts +129 -0
- package/src/adapters/openrouter.ts +110 -0
- package/src/adapters/xai.ts +106 -0
- package/src/index.ts +66 -0
- package/src/models.ts +116 -0
- package/src/types/index.ts +81 -0
- package/src/types/provider.ts +19 -0
- package/src/types/streaming.ts +32 -0
- package/src/utils/errors.ts +76 -0
- package/src/utils/model-normalization.ts +100 -0
- package/src/utils/request-logger.ts +179 -0
- package/src/utils/stream.ts +93 -0
- package/src/utils/validation.ts +69 -0
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk, Message } from '../types';
|
|
3
|
+
import { streamLines } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
interface CohereChatMessage {
|
|
7
|
+
role: 'USER' | 'CHATBOT' | 'SYSTEM';
|
|
8
|
+
message: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
interface CohereRequest {
|
|
12
|
+
model?: string;
|
|
13
|
+
message: string;
|
|
14
|
+
chat_history?: Array<{ role: 'USER' | 'CHATBOT'; message: string; }>;
|
|
15
|
+
preamble?: string;
|
|
16
|
+
temperature?: number;
|
|
17
|
+
max_tokens?: number;
|
|
18
|
+
p?: number;
|
|
19
|
+
stop_sequences?: string[];
|
|
20
|
+
stream?: boolean;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Cohere API adapter
|
|
25
|
+
*/
|
|
26
|
+
export class CohereAdapter extends BaseAdapter {
|
|
27
|
+
get name(): string {
|
|
28
|
+
return 'cohere';
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
32
|
+
try {
|
|
33
|
+
const apiKey = this.getApiKey(options);
|
|
34
|
+
const baseUrl = this.getBaseUrl('https://api.cohere.ai/v1');
|
|
35
|
+
|
|
36
|
+
// Strip provider prefix from model if present
|
|
37
|
+
const model = options.model.replace(/^cohere\//, '');
|
|
38
|
+
|
|
39
|
+
// Transform messages to Cohere format
|
|
40
|
+
const { message, chatHistory, preamble } = this.transformMessages(options.messages);
|
|
41
|
+
|
|
42
|
+
const request: CohereRequest = {
|
|
43
|
+
message,
|
|
44
|
+
stream: options.stream || false,
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
// Add model if specified
|
|
48
|
+
if (model) request.model = model;
|
|
49
|
+
|
|
50
|
+
// Add chat history
|
|
51
|
+
if (chatHistory.length > 0) {
|
|
52
|
+
request.chat_history = chatHistory;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// Add preamble (system message)
|
|
56
|
+
if (preamble) {
|
|
57
|
+
request.preamble = preamble;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Add optional parameters
|
|
61
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
62
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
63
|
+
if (options.topP !== undefined) request.p = options.topP;
|
|
64
|
+
if (options.stop && Array.isArray(options.stop)) {
|
|
65
|
+
request.stop_sequences = options.stop;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Merge provider-specific options
|
|
69
|
+
if (options.providerOptions) {
|
|
70
|
+
Object.assign(request, options.providerOptions);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const response = await this.fetchWithErrorHandling(
|
|
74
|
+
`${baseUrl}/chat`,
|
|
75
|
+
{
|
|
76
|
+
method: 'POST',
|
|
77
|
+
headers: {
|
|
78
|
+
'Content-Type': 'application/json',
|
|
79
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
80
|
+
},
|
|
81
|
+
body: JSON.stringify(request),
|
|
82
|
+
},
|
|
83
|
+
this.name
|
|
84
|
+
);
|
|
85
|
+
|
|
86
|
+
if (options.stream) {
|
|
87
|
+
return this.handleStreamResponse(response, model);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
91
|
+
} catch (error) {
|
|
92
|
+
handleProviderError(error, this.name);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
private transformMessages(messages: Message[]): {
|
|
97
|
+
message: string;
|
|
98
|
+
chatHistory: Array<{ role: 'USER' | 'CHATBOT'; message: string; }>;
|
|
99
|
+
preamble?: string;
|
|
100
|
+
} {
|
|
101
|
+
const systemMessage = messages.find((m) => m.role === 'system');
|
|
102
|
+
const nonSystemMessages = messages.filter((m) => m.role !== 'system');
|
|
103
|
+
|
|
104
|
+
// Last message is the current message
|
|
105
|
+
const lastMessage = nonSystemMessages[nonSystemMessages.length - 1];
|
|
106
|
+
const historyMessages = nonSystemMessages.slice(0, -1);
|
|
107
|
+
|
|
108
|
+
return {
|
|
109
|
+
message: lastMessage?.content || '',
|
|
110
|
+
chatHistory: historyMessages.map((msg) => ({
|
|
111
|
+
role: msg.role === 'user' ? 'USER' : 'CHATBOT',
|
|
112
|
+
message: msg.content,
|
|
113
|
+
})),
|
|
114
|
+
preamble: systemMessage?.content,
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
119
|
+
return {
|
|
120
|
+
content: data.text || '',
|
|
121
|
+
finishReason: data.finish_reason,
|
|
122
|
+
usage: data.meta?.tokens ? {
|
|
123
|
+
promptTokens: data.meta.tokens.input_tokens || 0,
|
|
124
|
+
completionTokens: data.meta.tokens.output_tokens || 0,
|
|
125
|
+
totalTokens: (data.meta.tokens.input_tokens || 0) + (data.meta.tokens.output_tokens || 0),
|
|
126
|
+
} : undefined,
|
|
127
|
+
model,
|
|
128
|
+
raw: data,
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
133
|
+
if (!response.body) {
|
|
134
|
+
throw new Error('No response body for streaming');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
for await (const line of streamLines(response.body)) {
|
|
138
|
+
try {
|
|
139
|
+
const chunk = JSON.parse(line);
|
|
140
|
+
|
|
141
|
+
if (chunk.event_type === 'text-generation') {
|
|
142
|
+
yield {
|
|
143
|
+
content: chunk.text || '',
|
|
144
|
+
};
|
|
145
|
+
} else if (chunk.event_type === 'stream-end') {
|
|
146
|
+
yield {
|
|
147
|
+
content: '',
|
|
148
|
+
finishReason: chunk.finish_reason,
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
} catch (e) {
|
|
152
|
+
// Skip invalid JSON
|
|
153
|
+
continue;
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* DeepSeek API adapter (uses OpenAI-compatible API)
|
|
8
|
+
*/
|
|
9
|
+
export class DeepSeekAdapter extends BaseAdapter {
|
|
10
|
+
get name(): string {
|
|
11
|
+
return 'deepseek';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
15
|
+
try {
|
|
16
|
+
const apiKey = this.getApiKey(options);
|
|
17
|
+
const baseUrl = this.getBaseUrl('https://api.deepseek.com/v1');
|
|
18
|
+
|
|
19
|
+
// Strip provider prefix from model if present
|
|
20
|
+
const model = options.model.replace(/^deepseek\//, '');
|
|
21
|
+
|
|
22
|
+
const request: any = {
|
|
23
|
+
model,
|
|
24
|
+
messages: options.messages.map((msg) => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
})),
|
|
28
|
+
stream: options.stream || false,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Add optional parameters
|
|
32
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
33
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
34
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
35
|
+
if (options.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
|
|
36
|
+
if (options.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
|
|
37
|
+
if (options.stop !== undefined) request.stop = options.stop;
|
|
38
|
+
|
|
39
|
+
// Merge provider-specific options
|
|
40
|
+
if (options.providerOptions) {
|
|
41
|
+
Object.assign(request, options.providerOptions);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const response = await this.fetchWithErrorHandling(
|
|
45
|
+
`${baseUrl}/chat/completions`,
|
|
46
|
+
{
|
|
47
|
+
method: 'POST',
|
|
48
|
+
headers: {
|
|
49
|
+
'Content-Type': 'application/json',
|
|
50
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
51
|
+
},
|
|
52
|
+
body: JSON.stringify(request),
|
|
53
|
+
},
|
|
54
|
+
this.name
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
if (options.stream) {
|
|
58
|
+
return this.handleStreamResponse(response, model);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
62
|
+
} catch (error) {
|
|
63
|
+
handleProviderError(error, this.name);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
68
|
+
const choice = data.choices?.[0];
|
|
69
|
+
if (!choice) {
|
|
70
|
+
throw new Error('No choices in response');
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
content: choice.message?.content || '',
|
|
75
|
+
finishReason: choice.finish_reason,
|
|
76
|
+
usage: data.usage ? {
|
|
77
|
+
promptTokens: data.usage.prompt_tokens,
|
|
78
|
+
completionTokens: data.usage.completion_tokens,
|
|
79
|
+
totalTokens: data.usage.total_tokens,
|
|
80
|
+
} : undefined,
|
|
81
|
+
model,
|
|
82
|
+
raw: data,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
87
|
+
if (!response.body) {
|
|
88
|
+
throw new Error('No response body for streaming');
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
92
|
+
const choice = chunk.choices?.[0];
|
|
93
|
+
if (!choice) continue;
|
|
94
|
+
|
|
95
|
+
const content = choice.delta?.content || '';
|
|
96
|
+
const finishReason = choice.finish_reason;
|
|
97
|
+
|
|
98
|
+
if (content || finishReason) {
|
|
99
|
+
yield {
|
|
100
|
+
content,
|
|
101
|
+
finishReason,
|
|
102
|
+
index: choice.index,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk, Message } from '../types';
|
|
3
|
+
import { streamLines } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
interface GooglePart {
|
|
7
|
+
text: string;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
interface GoogleContent {
|
|
11
|
+
role: string;
|
|
12
|
+
parts: GooglePart[];
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
interface GoogleRequest {
|
|
16
|
+
contents: GoogleContent[];
|
|
17
|
+
generationConfig?: {
|
|
18
|
+
temperature?: number;
|
|
19
|
+
maxOutputTokens?: number;
|
|
20
|
+
topP?: number;
|
|
21
|
+
topK?: number;
|
|
22
|
+
stopSequences?: string[];
|
|
23
|
+
};
|
|
24
|
+
systemInstruction?: {
|
|
25
|
+
parts: GooglePart[];
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Google Gemini API adapter
|
|
31
|
+
*/
|
|
32
|
+
export class GoogleAdapter extends BaseAdapter {
|
|
33
|
+
get name(): string {
|
|
34
|
+
return 'google';
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
38
|
+
try {
|
|
39
|
+
const apiKey = this.getApiKey(options);
|
|
40
|
+
const baseUrl = this.getBaseUrl('https://generativelanguage.googleapis.com/v1beta');
|
|
41
|
+
|
|
42
|
+
// Strip provider prefix from model if present (e.g., "google/models/gemini-2.5-pro" -> "models/gemini-2.5-pro")
|
|
43
|
+
let model = options.model.replace(/^google\//, '');
|
|
44
|
+
|
|
45
|
+
// Ensure model has "models/" prefix for API
|
|
46
|
+
if (!model.startsWith('models/')) {
|
|
47
|
+
model = `models/${model}`;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Extract system message if present
|
|
51
|
+
const systemMessage = options.messages.find((m) => m.role === 'system');
|
|
52
|
+
const nonSystemMessages = options.messages.filter((m) => m.role !== 'system');
|
|
53
|
+
|
|
54
|
+
const request: GoogleRequest = {
|
|
55
|
+
contents: this.transformMessages(nonSystemMessages),
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
// Add system instruction
|
|
59
|
+
if (systemMessage) {
|
|
60
|
+
request.systemInstruction = {
|
|
61
|
+
parts: [{ text: systemMessage.content }],
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Add generation config
|
|
66
|
+
const generationConfig: any = {};
|
|
67
|
+
if (options.temperature !== undefined) generationConfig.temperature = options.temperature;
|
|
68
|
+
if (options.maxTokens !== undefined) generationConfig.maxOutputTokens = options.maxTokens;
|
|
69
|
+
if (options.topP !== undefined) generationConfig.topP = options.topP;
|
|
70
|
+
if (options.topK !== undefined) generationConfig.topK = options.topK;
|
|
71
|
+
if (options.stop && Array.isArray(options.stop)) {
|
|
72
|
+
generationConfig.stopSequences = options.stop;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (Object.keys(generationConfig).length > 0) {
|
|
76
|
+
request.generationConfig = generationConfig;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Merge provider-specific options
|
|
80
|
+
if (options.providerOptions) {
|
|
81
|
+
Object.assign(request, options.providerOptions);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const endpoint = options.stream ? 'streamGenerateContent' : 'generateContent';
|
|
85
|
+
const url = `${baseUrl}/${model}:${endpoint}?key=${apiKey}`;
|
|
86
|
+
|
|
87
|
+
const response = await this.fetchWithErrorHandling(
|
|
88
|
+
url,
|
|
89
|
+
{
|
|
90
|
+
method: 'POST',
|
|
91
|
+
headers: {
|
|
92
|
+
'Content-Type': 'application/json',
|
|
93
|
+
},
|
|
94
|
+
body: JSON.stringify(request),
|
|
95
|
+
},
|
|
96
|
+
this.name
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
if (options.stream) {
|
|
100
|
+
return this.handleStreamResponse(response, model);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
104
|
+
} catch (error) {
|
|
105
|
+
handleProviderError(error, this.name);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
private transformMessages(messages: Message[]): GoogleContent[] {
|
|
110
|
+
return messages.map((msg) => ({
|
|
111
|
+
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
112
|
+
parts: [{ text: msg.content }],
|
|
113
|
+
}));
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
117
|
+
const candidate = data.candidates?.[0];
|
|
118
|
+
if (!candidate) {
|
|
119
|
+
throw new Error('No candidates in response');
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
const content = candidate.content?.parts?.[0]?.text || '';
|
|
123
|
+
|
|
124
|
+
return {
|
|
125
|
+
content,
|
|
126
|
+
finishReason: candidate.finishReason,
|
|
127
|
+
usage: data.usageMetadata ? {
|
|
128
|
+
promptTokens: data.usageMetadata.promptTokenCount || 0,
|
|
129
|
+
completionTokens: data.usageMetadata.candidatesTokenCount || 0,
|
|
130
|
+
totalTokens: data.usageMetadata.totalTokenCount || 0,
|
|
131
|
+
} : undefined,
|
|
132
|
+
model,
|
|
133
|
+
raw: data,
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
138
|
+
if (!response.body) {
|
|
139
|
+
throw new Error('No response body for streaming');
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
for await (const line of streamLines(response.body)) {
|
|
143
|
+
if (!line.trim() || line.trim() === '[' || line.trim() === ']') continue;
|
|
144
|
+
|
|
145
|
+
// Remove trailing comma if present
|
|
146
|
+
const cleanLine = line.trim().replace(/,$/, '');
|
|
147
|
+
|
|
148
|
+
try {
|
|
149
|
+
const chunk = JSON.parse(cleanLine);
|
|
150
|
+
const candidate = chunk.candidates?.[0];
|
|
151
|
+
|
|
152
|
+
if (!candidate) continue;
|
|
153
|
+
|
|
154
|
+
const content = candidate.content?.parts?.[0]?.text || '';
|
|
155
|
+
const finishReason = candidate.finishReason;
|
|
156
|
+
|
|
157
|
+
if (content || finishReason) {
|
|
158
|
+
yield {
|
|
159
|
+
content,
|
|
160
|
+
finishReason,
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
} catch (e) {
|
|
164
|
+
// Skip invalid JSON
|
|
165
|
+
continue;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Groq API adapter (uses OpenAI-compatible API)
|
|
8
|
+
*/
|
|
9
|
+
export class GroqAdapter extends BaseAdapter {
|
|
10
|
+
get name(): string {
|
|
11
|
+
return 'groq';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
15
|
+
try {
|
|
16
|
+
const apiKey = this.getApiKey(options);
|
|
17
|
+
const baseUrl = this.getBaseUrl('https://api.groq.com/openai/v1');
|
|
18
|
+
|
|
19
|
+
// Strip provider prefix from model if present
|
|
20
|
+
const model = options.model.replace(/^groq\//, '');
|
|
21
|
+
|
|
22
|
+
const request: any = {
|
|
23
|
+
model,
|
|
24
|
+
messages: options.messages.map((msg) => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
})),
|
|
28
|
+
stream: options.stream || false,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Add optional parameters
|
|
32
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
33
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
34
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
35
|
+
if (options.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
|
|
36
|
+
if (options.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
|
|
37
|
+
if (options.stop !== undefined) request.stop = options.stop;
|
|
38
|
+
|
|
39
|
+
// Merge provider-specific options
|
|
40
|
+
if (options.providerOptions) {
|
|
41
|
+
Object.assign(request, options.providerOptions);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const response = await this.fetchWithErrorHandling(
|
|
45
|
+
`${baseUrl}/chat/completions`,
|
|
46
|
+
{
|
|
47
|
+
method: 'POST',
|
|
48
|
+
headers: {
|
|
49
|
+
'Content-Type': 'application/json',
|
|
50
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
51
|
+
},
|
|
52
|
+
body: JSON.stringify(request),
|
|
53
|
+
},
|
|
54
|
+
this.name
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
if (options.stream) {
|
|
58
|
+
return this.handleStreamResponse(response, model);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
62
|
+
} catch (error) {
|
|
63
|
+
handleProviderError(error, this.name);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
68
|
+
const choice = data.choices?.[0];
|
|
69
|
+
if (!choice) {
|
|
70
|
+
throw new Error('No choices in response');
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
content: choice.message?.content || '',
|
|
75
|
+
finishReason: choice.finish_reason,
|
|
76
|
+
usage: data.usage ? {
|
|
77
|
+
promptTokens: data.usage.prompt_tokens,
|
|
78
|
+
completionTokens: data.usage.completion_tokens,
|
|
79
|
+
totalTokens: data.usage.total_tokens,
|
|
80
|
+
} : undefined,
|
|
81
|
+
model,
|
|
82
|
+
raw: data,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
87
|
+
if (!response.body) {
|
|
88
|
+
throw new Error('No response body for streaming');
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
92
|
+
const choice = chunk.choices?.[0];
|
|
93
|
+
if (!choice) continue;
|
|
94
|
+
|
|
95
|
+
const content = choice.delta?.content || '';
|
|
96
|
+
const finishReason = choice.finish_reason;
|
|
97
|
+
|
|
98
|
+
if (content || finishReason) {
|
|
99
|
+
yield {
|
|
100
|
+
content,
|
|
101
|
+
finishReason,
|
|
102
|
+
index: choice.index,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
// Export all adapters for direct usage
|
|
2
|
+
export { BaseAdapter } from './base/BaseAdapter';
|
|
3
|
+
export { OpenAIAdapter } from './openai';
|
|
4
|
+
export { AnthropicAdapter } from './anthropic';
|
|
5
|
+
export { GoogleAdapter } from './google';
|
|
6
|
+
export { GroqAdapter } from './groq';
|
|
7
|
+
export { MistralAdapter } from './mistral';
|
|
8
|
+
export { CohereAdapter } from './cohere';
|
|
9
|
+
export { XAIAdapter } from './xai';
|
|
10
|
+
export { DeepSeekAdapter } from './deepseek';
|
|
11
|
+
export { AI21Adapter } from './ai21';
|
|
12
|
+
export { OpenRouterAdapter } from './openrouter';
|
|
13
|
+
export { CloudflareAdapter } from './cloudflare';
|
|
14
|
+
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Mistral AI API adapter
|
|
8
|
+
*/
|
|
9
|
+
export class MistralAdapter extends BaseAdapter {
|
|
10
|
+
get name(): string {
|
|
11
|
+
return 'mistral';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
15
|
+
try {
|
|
16
|
+
const apiKey = this.getApiKey(options);
|
|
17
|
+
const baseUrl = this.getBaseUrl('https://api.mistral.ai/v1');
|
|
18
|
+
|
|
19
|
+
// Strip provider prefix from model if present
|
|
20
|
+
const model = options.model.replace(/^mistral\//, '');
|
|
21
|
+
|
|
22
|
+
const request: any = {
|
|
23
|
+
model,
|
|
24
|
+
messages: options.messages.map((msg) => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
})),
|
|
28
|
+
stream: options.stream || false,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Add optional parameters
|
|
32
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
33
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
34
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
35
|
+
if (options.stop && Array.isArray(options.stop)) {
|
|
36
|
+
request.stop = options.stop;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Merge provider-specific options
|
|
40
|
+
if (options.providerOptions) {
|
|
41
|
+
Object.assign(request, options.providerOptions);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const response = await this.fetchWithErrorHandling(
|
|
45
|
+
`${baseUrl}/chat/completions`,
|
|
46
|
+
{
|
|
47
|
+
method: 'POST',
|
|
48
|
+
headers: {
|
|
49
|
+
'Content-Type': 'application/json',
|
|
50
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
51
|
+
},
|
|
52
|
+
body: JSON.stringify(request),
|
|
53
|
+
},
|
|
54
|
+
this.name
|
|
55
|
+
);
|
|
56
|
+
|
|
57
|
+
if (options.stream) {
|
|
58
|
+
return this.handleStreamResponse(response, model);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
62
|
+
} catch (error) {
|
|
63
|
+
handleProviderError(error, this.name);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
68
|
+
const choice = data.choices?.[0];
|
|
69
|
+
if (!choice) {
|
|
70
|
+
throw new Error('No choices in response');
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
content: choice.message?.content || '',
|
|
75
|
+
finishReason: choice.finish_reason,
|
|
76
|
+
usage: data.usage ? {
|
|
77
|
+
promptTokens: data.usage.prompt_tokens,
|
|
78
|
+
completionTokens: data.usage.completion_tokens,
|
|
79
|
+
totalTokens: data.usage.total_tokens,
|
|
80
|
+
} : undefined,
|
|
81
|
+
model,
|
|
82
|
+
raw: data,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
87
|
+
if (!response.body) {
|
|
88
|
+
throw new Error('No response body for streaming');
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
92
|
+
const choice = chunk.choices?.[0];
|
|
93
|
+
if (!choice) continue;
|
|
94
|
+
|
|
95
|
+
const content = choice.delta?.content || '';
|
|
96
|
+
const finishReason = choice.finish_reason;
|
|
97
|
+
|
|
98
|
+
if (content || finishReason) {
|
|
99
|
+
yield {
|
|
100
|
+
content,
|
|
101
|
+
finishReason,
|
|
102
|
+
index: choice.index,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|