ai.libx.js 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +339 -0
- package/build/@Module.d.ts +6 -0
- package/build/@Module.js +14 -0
- package/build/@Module.js.map +1 -0
- package/build/AIClient.d.ts +19 -0
- package/build/AIClient.js +132 -0
- package/build/AIClient.js.map +1 -0
- package/build/Extensions.d.ts +3 -0
- package/build/Extensions.js +4 -0
- package/build/Extensions.js.map +1 -0
- package/build/adapters/ai21.d.ts +8 -0
- package/build/adapters/ai21.js +83 -0
- package/build/adapters/ai21.js.map +1 -0
- package/build/adapters/anthropic.d.ts +9 -0
- package/build/adapters/anthropic.js +162 -0
- package/build/adapters/anthropic.js.map +1 -0
- package/build/adapters/base/BaseAdapter.d.ts +13 -0
- package/build/adapters/base/BaseAdapter.js +56 -0
- package/build/adapters/base/BaseAdapter.js.map +1 -0
- package/build/adapters/cloudflare.d.ts +8 -0
- package/build/adapters/cloudflare.js +129 -0
- package/build/adapters/cloudflare.js.map +1 -0
- package/build/adapters/cohere.d.ts +9 -0
- package/build/adapters/cohere.js +158 -0
- package/build/adapters/cohere.js.map +1 -0
- package/build/adapters/deepseek.d.ts +8 -0
- package/build/adapters/deepseek.js +142 -0
- package/build/adapters/deepseek.js.map +1 -0
- package/build/adapters/google.d.ts +9 -0
- package/build/adapters/google.js +166 -0
- package/build/adapters/google.js.map +1 -0
- package/build/adapters/groq.d.ts +8 -0
- package/build/adapters/groq.js +142 -0
- package/build/adapters/groq.js.map +1 -0
- package/build/adapters/index.d.ts +12 -0
- package/build/adapters/index.js +28 -0
- package/build/adapters/index.js.map +1 -0
- package/build/adapters/mistral.d.ts +8 -0
- package/build/adapters/mistral.js +139 -0
- package/build/adapters/mistral.js.map +1 -0
- package/build/adapters/openai.d.ts +9 -0
- package/build/adapters/openai.js +145 -0
- package/build/adapters/openai.js.map +1 -0
- package/build/adapters/openrouter.d.ts +8 -0
- package/build/adapters/openrouter.js +145 -0
- package/build/adapters/openrouter.js.map +1 -0
- package/build/adapters/xai.d.ts +8 -0
- package/build/adapters/xai.js +138 -0
- package/build/adapters/xai.js.map +1 -0
- package/build/index.d.ts +12 -0
- package/build/index.js +29 -0
- package/build/index.js.map +1 -0
- package/build/models.d.ts +6 -0
- package/build/models.js +103 -0
- package/build/models.js.map +1 -0
- package/build/types/index.d.ts +66 -0
- package/build/types/index.js +3 -0
- package/build/types/index.js.map +1 -0
- package/build/types/provider.d.ts +8 -0
- package/build/types/provider.js +3 -0
- package/build/types/provider.js.map +1 -0
- package/build/types/streaming.d.ts +8 -0
- package/build/types/streaming.js +33 -0
- package/build/types/streaming.js.map +1 -0
- package/build/utils/errors.d.ts +21 -0
- package/build/utils/errors.js +70 -0
- package/build/utils/errors.js.map +1 -0
- package/build/utils/model-normalization.d.ts +9 -0
- package/build/utils/model-normalization.js +59 -0
- package/build/utils/model-normalization.js.map +1 -0
- package/build/utils/request-logger.d.ts +43 -0
- package/build/utils/request-logger.js +96 -0
- package/build/utils/request-logger.js.map +1 -0
- package/build/utils/stream.d.ts +8 -0
- package/build/utils/stream.js +109 -0
- package/build/utils/stream.js.map +1 -0
- package/build/utils/validation.d.ts +4 -0
- package/build/utils/validation.js +57 -0
- package/build/utils/validation.js.map +1 -0
- package/example.ts +166 -0
- package/jest.config.js +26 -0
- package/package.json +68 -0
- package/src/@Module.ts +9 -0
- package/src/AIClient.ts +210 -0
- package/src/Extensions.ts +7 -0
- package/src/adapters/ai21.ts +99 -0
- package/src/adapters/anthropic.ts +152 -0
- package/src/adapters/base/BaseAdapter.ts +78 -0
- package/src/adapters/cloudflare.ts +115 -0
- package/src/adapters/cohere.ts +158 -0
- package/src/adapters/deepseek.ts +108 -0
- package/src/adapters/google.ts +170 -0
- package/src/adapters/groq.ts +108 -0
- package/src/adapters/index.ts +14 -0
- package/src/adapters/mistral.ts +108 -0
- package/src/adapters/openai.ts +129 -0
- package/src/adapters/openrouter.ts +110 -0
- package/src/adapters/xai.ts +106 -0
- package/src/index.ts +66 -0
- package/src/models.ts +116 -0
- package/src/types/index.ts +81 -0
- package/src/types/provider.ts +19 -0
- package/src/types/streaming.ts +32 -0
- package/src/utils/errors.ts +76 -0
- package/src/utils/model-normalization.ts +100 -0
- package/src/utils/request-logger.ts +179 -0
- package/src/utils/stream.ts +93 -0
- package/src/utils/validation.ts +69 -0
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk, Message } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
interface OpenAIMessage {
|
|
7
|
+
role: string;
|
|
8
|
+
content: string;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
interface OpenAIRequest {
|
|
12
|
+
model: string;
|
|
13
|
+
messages: OpenAIMessage[];
|
|
14
|
+
temperature?: number;
|
|
15
|
+
max_tokens?: number;
|
|
16
|
+
top_p?: number;
|
|
17
|
+
frequency_penalty?: number;
|
|
18
|
+
presence_penalty?: number;
|
|
19
|
+
stop?: string | string[];
|
|
20
|
+
stream?: boolean;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* OpenAI API adapter
|
|
25
|
+
*/
|
|
26
|
+
export class OpenAIAdapter extends BaseAdapter {
|
|
27
|
+
get name(): string {
|
|
28
|
+
return 'openai';
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
32
|
+
try {
|
|
33
|
+
const apiKey = this.getApiKey(options);
|
|
34
|
+
const baseUrl = this.getBaseUrl('https://api.openai.com/v1');
|
|
35
|
+
|
|
36
|
+
// Strip provider prefix from model if present
|
|
37
|
+
const model = options.model.replace(/^openai\//, '');
|
|
38
|
+
|
|
39
|
+
const request: OpenAIRequest = {
|
|
40
|
+
model,
|
|
41
|
+
messages: this.transformMessages(options.messages),
|
|
42
|
+
stream: options.stream || false,
|
|
43
|
+
};
|
|
44
|
+
|
|
45
|
+
// Add optional parameters
|
|
46
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
47
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
48
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
49
|
+
if (options.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
|
|
50
|
+
if (options.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
|
|
51
|
+
if (options.stop !== undefined) request.stop = options.stop;
|
|
52
|
+
|
|
53
|
+
// Merge provider-specific options
|
|
54
|
+
if (options.providerOptions) {
|
|
55
|
+
Object.assign(request, options.providerOptions);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const response = await this.fetchWithErrorHandling(
|
|
59
|
+
`${baseUrl}/chat/completions`,
|
|
60
|
+
{
|
|
61
|
+
method: 'POST',
|
|
62
|
+
headers: {
|
|
63
|
+
'Content-Type': 'application/json',
|
|
64
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
65
|
+
},
|
|
66
|
+
body: JSON.stringify(request),
|
|
67
|
+
},
|
|
68
|
+
this.name
|
|
69
|
+
);
|
|
70
|
+
|
|
71
|
+
if (options.stream) {
|
|
72
|
+
return this.handleStreamResponse(response, model);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
76
|
+
} catch (error) {
|
|
77
|
+
handleProviderError(error, this.name);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
private transformMessages(messages: Message[]): OpenAIMessage[] {
|
|
82
|
+
return messages.map((msg) => ({
|
|
83
|
+
role: msg.role,
|
|
84
|
+
content: msg.content,
|
|
85
|
+
}));
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
89
|
+
const choice = data.choices?.[0];
|
|
90
|
+
if (!choice) {
|
|
91
|
+
throw new Error('No choices in response');
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
content: choice.message?.content || '',
|
|
96
|
+
finishReason: choice.finish_reason,
|
|
97
|
+
usage: data.usage ? {
|
|
98
|
+
promptTokens: data.usage.prompt_tokens,
|
|
99
|
+
completionTokens: data.usage.completion_tokens,
|
|
100
|
+
totalTokens: data.usage.total_tokens,
|
|
101
|
+
} : undefined,
|
|
102
|
+
model,
|
|
103
|
+
raw: data,
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
108
|
+
if (!response.body) {
|
|
109
|
+
throw new Error('No response body for streaming');
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
113
|
+
const choice = chunk.choices?.[0];
|
|
114
|
+
if (!choice) continue;
|
|
115
|
+
|
|
116
|
+
const content = choice.delta?.content || '';
|
|
117
|
+
const finishReason = choice.finish_reason;
|
|
118
|
+
|
|
119
|
+
if (content || finishReason) {
|
|
120
|
+
yield {
|
|
121
|
+
content,
|
|
122
|
+
finishReason,
|
|
123
|
+
index: choice.index,
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* OpenRouter API adapter (uses OpenAI-compatible API)
|
|
8
|
+
*/
|
|
9
|
+
export class OpenRouterAdapter extends BaseAdapter {
|
|
10
|
+
get name(): string {
|
|
11
|
+
return 'openrouter';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
15
|
+
try {
|
|
16
|
+
const apiKey = this.getApiKey(options);
|
|
17
|
+
const baseUrl = this.getBaseUrl('https://openrouter.ai/api/v1');
|
|
18
|
+
|
|
19
|
+
// Strip provider prefix from model if present
|
|
20
|
+
const model = options.model.replace(/^openrouter\//, '');
|
|
21
|
+
|
|
22
|
+
const request: any = {
|
|
23
|
+
model,
|
|
24
|
+
messages: options.messages.map((msg) => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
})),
|
|
28
|
+
stream: options.stream || false,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Add optional parameters
|
|
32
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
33
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
34
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
35
|
+
if (options.frequencyPenalty !== undefined) request.frequency_penalty = options.frequencyPenalty;
|
|
36
|
+
if (options.presencePenalty !== undefined) request.presence_penalty = options.presencePenalty;
|
|
37
|
+
if (options.stop !== undefined) request.stop = options.stop;
|
|
38
|
+
|
|
39
|
+
// Merge provider-specific options
|
|
40
|
+
if (options.providerOptions) {
|
|
41
|
+
Object.assign(request, options.providerOptions);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const response = await this.fetchWithErrorHandling(
|
|
45
|
+
`${baseUrl}/chat/completions`,
|
|
46
|
+
{
|
|
47
|
+
method: 'POST',
|
|
48
|
+
headers: {
|
|
49
|
+
'Content-Type': 'application/json',
|
|
50
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
51
|
+
'HTTP-Referer': options.providerOptions?.httpReferer || 'https://ai.libx.js',
|
|
52
|
+
'X-Title': options.providerOptions?.xTitle || 'ai.libx.js',
|
|
53
|
+
},
|
|
54
|
+
body: JSON.stringify(request),
|
|
55
|
+
},
|
|
56
|
+
this.name
|
|
57
|
+
);
|
|
58
|
+
|
|
59
|
+
if (options.stream) {
|
|
60
|
+
return this.handleStreamResponse(response, model);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
64
|
+
} catch (error) {
|
|
65
|
+
handleProviderError(error, this.name);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
70
|
+
const choice = data.choices?.[0];
|
|
71
|
+
if (!choice) {
|
|
72
|
+
throw new Error('No choices in response');
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return {
|
|
76
|
+
content: choice.message?.content || '',
|
|
77
|
+
finishReason: choice.finish_reason,
|
|
78
|
+
usage: data.usage ? {
|
|
79
|
+
promptTokens: data.usage.prompt_tokens,
|
|
80
|
+
completionTokens: data.usage.completion_tokens,
|
|
81
|
+
totalTokens: data.usage.total_tokens,
|
|
82
|
+
} : undefined,
|
|
83
|
+
model,
|
|
84
|
+
raw: data,
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
89
|
+
if (!response.body) {
|
|
90
|
+
throw new Error('No response body for streaming');
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
94
|
+
const choice = chunk.choices?.[0];
|
|
95
|
+
if (!choice) continue;
|
|
96
|
+
|
|
97
|
+
const content = choice.delta?.content || '';
|
|
98
|
+
const finishReason = choice.finish_reason;
|
|
99
|
+
|
|
100
|
+
if (content || finishReason) {
|
|
101
|
+
yield {
|
|
102
|
+
content,
|
|
103
|
+
finishReason,
|
|
104
|
+
index: choice.index,
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import { BaseAdapter } from './base/BaseAdapter';
|
|
2
|
+
import { ChatOptions, ChatResponse, StreamChunk } from '../types';
|
|
3
|
+
import { parseSSEStream } from '../utils/stream';
|
|
4
|
+
import { handleProviderError } from '../utils/errors';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* xAI (Grok) API adapter (uses OpenAI-compatible API)
|
|
8
|
+
*/
|
|
9
|
+
export class XAIAdapter extends BaseAdapter {
|
|
10
|
+
get name(): string {
|
|
11
|
+
return 'xai';
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>> {
|
|
15
|
+
try {
|
|
16
|
+
const apiKey = this.getApiKey(options);
|
|
17
|
+
const baseUrl = this.getBaseUrl('https://api.x.ai/v1');
|
|
18
|
+
|
|
19
|
+
// Strip provider prefix from model if present
|
|
20
|
+
const model = options.model.replace(/^xai\//, '');
|
|
21
|
+
|
|
22
|
+
const request: any = {
|
|
23
|
+
model,
|
|
24
|
+
messages: options.messages.map((msg) => ({
|
|
25
|
+
role: msg.role,
|
|
26
|
+
content: msg.content,
|
|
27
|
+
})),
|
|
28
|
+
stream: options.stream || false,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
// Add optional parameters
|
|
32
|
+
if (options.temperature !== undefined) request.temperature = options.temperature;
|
|
33
|
+
if (options.maxTokens !== undefined) request.max_tokens = options.maxTokens;
|
|
34
|
+
if (options.topP !== undefined) request.top_p = options.topP;
|
|
35
|
+
if (options.stop !== undefined) request.stop = options.stop;
|
|
36
|
+
|
|
37
|
+
// Merge provider-specific options
|
|
38
|
+
if (options.providerOptions) {
|
|
39
|
+
Object.assign(request, options.providerOptions);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const response = await this.fetchWithErrorHandling(
|
|
43
|
+
`${baseUrl}/chat/completions`,
|
|
44
|
+
{
|
|
45
|
+
method: 'POST',
|
|
46
|
+
headers: {
|
|
47
|
+
'Content-Type': 'application/json',
|
|
48
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
49
|
+
},
|
|
50
|
+
body: JSON.stringify(request),
|
|
51
|
+
},
|
|
52
|
+
this.name
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
if (options.stream) {
|
|
56
|
+
return this.handleStreamResponse(response, model);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return this.handleNonStreamResponse(await response.json(), model);
|
|
60
|
+
} catch (error) {
|
|
61
|
+
handleProviderError(error, this.name);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
private handleNonStreamResponse(data: any, model: string): ChatResponse {
|
|
66
|
+
const choice = data.choices?.[0];
|
|
67
|
+
if (!choice) {
|
|
68
|
+
throw new Error('No choices in response');
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
content: choice.message?.content || '',
|
|
73
|
+
finishReason: choice.finish_reason,
|
|
74
|
+
usage: data.usage ? {
|
|
75
|
+
promptTokens: data.usage.prompt_tokens,
|
|
76
|
+
completionTokens: data.usage.completion_tokens,
|
|
77
|
+
totalTokens: data.usage.total_tokens,
|
|
78
|
+
} : undefined,
|
|
79
|
+
model,
|
|
80
|
+
raw: data,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
private async *handleStreamResponse(response: Response, model: string): AsyncIterable<StreamChunk> {
|
|
85
|
+
if (!response.body) {
|
|
86
|
+
throw new Error('No response body for streaming');
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
for await (const chunk of parseSSEStream(response.body)) {
|
|
90
|
+
const choice = chunk.choices?.[0];
|
|
91
|
+
if (!choice) continue;
|
|
92
|
+
|
|
93
|
+
const content = choice.delta?.content || '';
|
|
94
|
+
const finishReason = choice.finish_reason;
|
|
95
|
+
|
|
96
|
+
if (content || finishReason) {
|
|
97
|
+
yield {
|
|
98
|
+
content,
|
|
99
|
+
finishReason,
|
|
100
|
+
index: choice.index,
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
// Main export: AIClient
|
|
2
|
+
export { AIClient } from './AIClient';
|
|
3
|
+
export type { AIClientConfig } from './AIClient';
|
|
4
|
+
|
|
5
|
+
// Default export
|
|
6
|
+
import { AIClient } from './AIClient';
|
|
7
|
+
export default AIClient;
|
|
8
|
+
|
|
9
|
+
// Export types
|
|
10
|
+
export {
|
|
11
|
+
Message,
|
|
12
|
+
MessageRole,
|
|
13
|
+
ChatOptions,
|
|
14
|
+
ChatResponse,
|
|
15
|
+
StreamChunk,
|
|
16
|
+
ModelCapabilities,
|
|
17
|
+
ModelInfo,
|
|
18
|
+
ProviderConfig,
|
|
19
|
+
} from './types';
|
|
20
|
+
|
|
21
|
+
export { IProviderAdapter } from './types/provider';
|
|
22
|
+
export { ChatStream } from './types/streaming';
|
|
23
|
+
|
|
24
|
+
// Export model utilities
|
|
25
|
+
export {
|
|
26
|
+
supportedModels,
|
|
27
|
+
getProviderFromModel,
|
|
28
|
+
getModelInfo,
|
|
29
|
+
listModels,
|
|
30
|
+
isModelSupported,
|
|
31
|
+
} from './models';
|
|
32
|
+
|
|
33
|
+
// Export model normalization utilities
|
|
34
|
+
export {
|
|
35
|
+
normalizeModelName,
|
|
36
|
+
isReasoningModel,
|
|
37
|
+
supportsSystemMessages,
|
|
38
|
+
getReasoningModelAdjustments,
|
|
39
|
+
} from './utils/model-normalization';
|
|
40
|
+
|
|
41
|
+
// Export request logger
|
|
42
|
+
export {
|
|
43
|
+
RequestLogger,
|
|
44
|
+
getRequestLogger,
|
|
45
|
+
} from './utils/request-logger';
|
|
46
|
+
|
|
47
|
+
export type {
|
|
48
|
+
RequestMetadata,
|
|
49
|
+
RequestTracker,
|
|
50
|
+
CompletedRequest,
|
|
51
|
+
ProviderStats,
|
|
52
|
+
LoggerStats,
|
|
53
|
+
} from './utils/request-logger';
|
|
54
|
+
|
|
55
|
+
// Export errors
|
|
56
|
+
export {
|
|
57
|
+
AILibError,
|
|
58
|
+
AuthenticationError,
|
|
59
|
+
InvalidRequestError,
|
|
60
|
+
RateLimitError,
|
|
61
|
+
ModelNotFoundError,
|
|
62
|
+
ProviderError,
|
|
63
|
+
} from './utils/errors';
|
|
64
|
+
|
|
65
|
+
// Note: Individual adapters are exported from './adapters' subpath
|
|
66
|
+
// Usage: import { OpenAIAdapter } from 'ai.libx.js/adapters';
|
package/src/models.ts
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { ModelInfo } from './types';
|
|
2
|
+
|
|
3
|
+
// Import the supported models data
|
|
4
|
+
export const supportedModels: Record<string, ModelInfo> = {
|
|
5
|
+
'openai/gpt-5': { displayName: 'GPT-5' },
|
|
6
|
+
'openai/gpt-5-mini': { displayName: 'GPT-5 mini' },
|
|
7
|
+
'openai/gpt-5-nano': { displayName: 'GPT-5 nano' },
|
|
8
|
+
'openai/gpt-5-chat-latest': { displayName: 'GPT-5 Chat' },
|
|
9
|
+
'openai/gpt-4.1': { displayName: 'GPT 4.1' },
|
|
10
|
+
'openai/gpt-4.1-mini': { displayName: 'GPT 4.1 mini' },
|
|
11
|
+
'openai/gpt-4.1-nano': { displayName: 'GPT 4.1 nano' },
|
|
12
|
+
'openai/gpt-4.5-preview': { displayName: 'gpt 4.5 preview' },
|
|
13
|
+
'openai/o3-mini': { displayName: 'gpt o3 mini', noSystem: true },
|
|
14
|
+
'openai/gpt-4o-audio-preview': { displayName: 'gpt-4o-audio-preview', enabled: false },
|
|
15
|
+
'openai/gpt-4o-mini-audio-preview': { displayName: 'gpt-4o-mini-audio-preview', enabled: false },
|
|
16
|
+
'openai/gpt-4o-mini-realtime-preview': { displayName: 'gpt-4o-mini-realtime-preview', noChat: true },
|
|
17
|
+
'openai/gpt-4o-realtime-preview': { displayName: 'gpt-4o-realtime-preview', noChat: true },
|
|
18
|
+
'openai/o1-preview': { displayName: 'o1-preview', noSystem: true },
|
|
19
|
+
'openai/o1-mini': { displayName: 'o1-mini', noSystem: true },
|
|
20
|
+
'openai/o1': { displayName: 'o1', noSystem: true, enabled: false },
|
|
21
|
+
'openai/chatgpt-4o-latest': { displayName: ' ChatGPT GPT-4o' },
|
|
22
|
+
'openai/gpt-4o-mini': { displayName: 'GPT-4o mini' },
|
|
23
|
+
'openai/gpt-4o': { displayName: ' GPT-4o' },
|
|
24
|
+
'openai/gpt-4o-2024-05-13': { displayName: ' GPT-4o (2024-05-13)' },
|
|
25
|
+
'openai/gpt-4-turbo': { displayName: 'GPT-4 Turbo with Vision' },
|
|
26
|
+
'openai/gpt-4-turbo-2024-04-09': { displayName: 'GPT-4 Turbo with Vision (2024-04-09)' },
|
|
27
|
+
'openai/gpt-4-turbo-preview': { displayName: 'GPT-4 Turbo (gpt-4-turbo-preview)' },
|
|
28
|
+
'openai/gpt-4-0125-preview': { displayName: 'GPT-4 25-01 (gpt-4-0125-preview)' },
|
|
29
|
+
'openai/gpt-4-1106-preview': { displayName: 'GPT-4 06-11 (gpt-4-1106-preview)' },
|
|
30
|
+
'openai/gpt-4-vision-preview': { displayName: 'GPT-4 Vision', enabled: false },
|
|
31
|
+
'openai/gpt-4-32k': { displayName: 'gpt-4-32k' },
|
|
32
|
+
'openai/gpt-4-0613': { displayName: 'gpt-4-0613' },
|
|
33
|
+
'openai/gpt-4-32k-0613': { displayName: 'gpt-4-32k-0613' },
|
|
34
|
+
'openai/gpt-4': { displayName: 'gpt-4' },
|
|
35
|
+
'openai/gpt-4-0314': { displayName: 'gpt-4-0314' },
|
|
36
|
+
'openai/gpt-3.5-turbo': { displayName: 'gpt-3.5-turbo' },
|
|
37
|
+
'openai/gpt-3.5-turbo-0125': { displayName: 'gpt-3.5-turbo-0125' },
|
|
38
|
+
'openai/gpt-3.5-turbo-1106': { displayName: 'gpt-3.5-turbo-1106' },
|
|
39
|
+
'openai/gpt-3.5-turbo-instruct': { displayName: 'gpt-3.5-turbo-instruct' },
|
|
40
|
+
'openai/gpt-3.5-turbo-16k': { displayName: 'gpt-3.5-turbo-16k' },
|
|
41
|
+
'openai/gpt-3.5-turbo-0613': { displayName: 'gpt-3.5-turbo-0613', enabled: false },
|
|
42
|
+
'openai/gpt-3.5-turbo-16k-0613': { displayName: 'gpt-3.5-turbo-16k-0613', enabled: false },
|
|
43
|
+
'openai/text-davinci-003': { displayName: 'text-davinci-003' },
|
|
44
|
+
'openai/text-davinci-002': { displayName: 'text-davinci-002' },
|
|
45
|
+
'google/models/gemini-2.5-flash': { displayName: 'Gemini 2.5 Flash', reasoning: true },
|
|
46
|
+
'google/models/gemini-2.5-flash-lite-preview-06-17': { displayName: 'Gemini 2.5 Flash-Lite Preview 06-17', reasoning: true },
|
|
47
|
+
'google/models/gemini-2.5-pro': { displayName: 'Gemini 2.5 Pro', reasoning: true },
|
|
48
|
+
'google/models/gemini-2.5-pro-exp-03-25': { displayName: 'Gemini 2.5 Pro Experimental 03-25', reasoning: true },
|
|
49
|
+
'google/models/gemini-2.0-flash': { displayName: 'Gemini 2.0 Flash' },
|
|
50
|
+
'google/models/gemini-2.0-flash-lite': { displayName: 'Gemini 2.0 Flash-Lite' },
|
|
51
|
+
'google/models/gemini-2.0-flash-preview-image-generation': { displayName: 'Gemini 2.0 Flash Preview Image Generation', responseModalities: ['Text', 'Image'] },
|
|
52
|
+
'google/models/gemini-2.0-pro-exp-02-05': { displayName: 'Gemini 2.0 Pro Experimental 02-05' },
|
|
53
|
+
'google/models/gemini-2.0-flash-thinking-exp-01-21': { displayName: 'Gemini 2.0 Flash Thinking Experimental 01-21' },
|
|
54
|
+
'google/models/gemini-2.0-flash-exp': { displayName: 'Gemini 2.0 Flash Experimental', responseModalities: ['Text', 'Image'] },
|
|
55
|
+
'google/models/gemini-2.0-flash-thinking-exp-1219': { displayName: 'Gemini 2.0 Flash Thinking Mode' },
|
|
56
|
+
'google/models/gemma-3n-e4b-it': { displayName: 'Gemma 3n E4B' },
|
|
57
|
+
'google/models/gemma-3n-e2b-it': { displayName: 'Gemma 3n E2B' },
|
|
58
|
+
'google/models/gemma-3-27b-it': { displayName: 'Gemma 3 27B' },
|
|
59
|
+
'google/models/learnlm-1.5-pro-experimental': { displayName: 'LearnLM 1.5 Pro Experimental' },
|
|
60
|
+
'google/models/gemini-exp-1206': { displayName: 'Gemini (exp-1206)' },
|
|
61
|
+
'google/models/gemini-1.5-flash-8b': { displayName: 'Gemini 1.5 Flash-8B' },
|
|
62
|
+
'google/models/gemini-1.5-pro-002': { displayName: 'Gemini 1.5 Pro 2' },
|
|
63
|
+
'google/models/gemini-1.5-flash-002': { displayName: 'Gemini 1.5 Flash 2' },
|
|
64
|
+
'google/models/gemini-1.5-flash-latest': { displayName: 'Gemini 1.5 Flash' },
|
|
65
|
+
'google/models/gemini-1.5-pro-latest': { displayName: 'Gemini 1.5 Pro' },
|
|
66
|
+
'google/models/gemini-1.0-pro-latest': { displayName: 'Gemini 1.0 Pro' },
|
|
67
|
+
'google/models/gemini-pro-vision': { displayName: 'Gemini 1.0 Pro Vision' },
|
|
68
|
+
'google/models/chat-bison-001': { displayName: 'PaLM 2 Chat (Legacy)' },
|
|
69
|
+
'anthropic/claude-opus-4-1': { displayName: 'Claude Opus 4.1', url: 'https://api.anthropic.com/v1/messages' },
|
|
70
|
+
'anthropic/claude-opus-4-0': { displayName: 'Claude Opus 4', url: 'https://api.anthropic.com/v1/messages' },
|
|
71
|
+
'anthropic/claude-sonnet-4-5': { displayName: 'Claude Sonnet 4.5', url: 'https://api.anthropic.com/v1/messages' },
|
|
72
|
+
'anthropic/claude-sonnet-4-0': { displayName: 'Claude Sonnet 4', url: 'https://api.anthropic.com/v1/messages' },
|
|
73
|
+
'anthropic/claude-3-7-sonnet-latest': { displayName: 'Claude 3.7 Sonnet', url: 'https://api.anthropic.com/v1/messages' },
|
|
74
|
+
'anthropic/claude-3-5-sonnet-latest': { displayName: 'Claude 3.5 Sonnet v2', url: 'https://api.anthropic.com/v1/messages' },
|
|
75
|
+
'anthropic/claude-3-5-haiku-latest': { displayName: 'Claude 3.5 Haiku', url: 'https://api.anthropic.com/v1/messages' },
|
|
76
|
+
'anthropic/claude-3-haiku-20240307': { displayName: 'Claude 3 Haiku', url: 'https://api.anthropic.com/v1/messages' },
|
|
77
|
+
'anthropic/claude-3-sonnet-20240229': { displayName: 'Claude 3 Sonnet', url: 'https://api.anthropic.com/v1/messages' },
|
|
78
|
+
'anthropic/claude-3-opus-20240229': { displayName: 'Claude 3 Opus', url: 'https://api.anthropic.com/v1/messages' },
|
|
79
|
+
'anthropic/claude-2.1': { displayName: 'Claude 2.1' },
|
|
80
|
+
'anthropic/claude-instant-1.2': { displayName: 'Claude Instant' },
|
|
81
|
+
// ... Continue with other providers (truncated for brevity)
|
|
82
|
+
} as const;
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Extract provider name from model string (e.g., "openai/gpt-4" -> "openai")
|
|
86
|
+
*/
|
|
87
|
+
export function getProviderFromModel(model: string): string | null {
|
|
88
|
+
const parts = model.split('/');
|
|
89
|
+
return parts.length > 1 ? parts[0] : null;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Get model information
|
|
94
|
+
*/
|
|
95
|
+
export function getModelInfo(model: string): ModelInfo | undefined {
|
|
96
|
+
return supportedModels[model];
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
* List all models for a provider
|
|
101
|
+
*/
|
|
102
|
+
export function listModels(provider?: string): string[] {
|
|
103
|
+
const models = Object.keys(supportedModels);
|
|
104
|
+
if (!provider) {
|
|
105
|
+
return models;
|
|
106
|
+
}
|
|
107
|
+
return models.filter((m) => m.startsWith(`${provider}/`));
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Check if a model exists
|
|
112
|
+
*/
|
|
113
|
+
export function isModelSupported(model: string): boolean {
|
|
114
|
+
return model in supportedModels;
|
|
115
|
+
}
|
|
116
|
+
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
// Core types for AI interactions
|
|
2
|
+
|
|
3
|
+
export type MessageRole = 'system' | 'user' | 'assistant' | 'tool';
|
|
4
|
+
|
|
5
|
+
export interface Message {
|
|
6
|
+
role: MessageRole;
|
|
7
|
+
content: string;
|
|
8
|
+
name?: string;
|
|
9
|
+
tool_call_id?: string;
|
|
10
|
+
files?: ImageFile[]; // Multi-modal support: images
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface ImageFile {
|
|
14
|
+
url: string;
|
|
15
|
+
type?: string; // e.g., 'image/jpeg', 'image/png'
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export interface ChatOptions {
|
|
19
|
+
model: string;
|
|
20
|
+
messages: Message[];
|
|
21
|
+
apiKey?: string;
|
|
22
|
+
|
|
23
|
+
// Common parameters
|
|
24
|
+
temperature?: number;
|
|
25
|
+
maxTokens?: number;
|
|
26
|
+
topP?: number;
|
|
27
|
+
topK?: number;
|
|
28
|
+
frequencyPenalty?: number;
|
|
29
|
+
presencePenalty?: number;
|
|
30
|
+
stop?: string | string[];
|
|
31
|
+
stream?: boolean;
|
|
32
|
+
plain?: boolean; // Plain text mode - returns raw text without JSON/SSE wrapping
|
|
33
|
+
|
|
34
|
+
// Provider-specific options
|
|
35
|
+
providerOptions?: Record<string, any>;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export interface ChatResponse {
|
|
39
|
+
content: string;
|
|
40
|
+
finishReason?: 'stop' | 'length' | 'tool_calls' | 'content_filter' | string;
|
|
41
|
+
usage?: {
|
|
42
|
+
promptTokens: number;
|
|
43
|
+
completionTokens: number;
|
|
44
|
+
totalTokens: number;
|
|
45
|
+
};
|
|
46
|
+
model: string;
|
|
47
|
+
raw?: any; // Original provider response
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export interface StreamChunk {
|
|
51
|
+
content: string;
|
|
52
|
+
finishReason?: string;
|
|
53
|
+
index?: number;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export interface ModelCapabilities {
|
|
57
|
+
streaming?: boolean;
|
|
58
|
+
vision?: boolean;
|
|
59
|
+
imageInput?: boolean;
|
|
60
|
+
imageGen?: boolean;
|
|
61
|
+
reasoning?: boolean;
|
|
62
|
+
noSystem?: boolean; // Doesn't support system messages (o1/o3)
|
|
63
|
+
noChat?: boolean;
|
|
64
|
+
noTopPWithTemp?: boolean; // Can't use top_p with temperature
|
|
65
|
+
responseModalities?: string[];
|
|
66
|
+
audio?: boolean;
|
|
67
|
+
video?: boolean;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export interface ModelInfo extends ModelCapabilities {
|
|
71
|
+
displayName: string;
|
|
72
|
+
enabled?: boolean;
|
|
73
|
+
url?: string;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export interface ProviderConfig {
|
|
77
|
+
apiKey?: string;
|
|
78
|
+
baseUrl?: string;
|
|
79
|
+
[key: string]: any;
|
|
80
|
+
}
|
|
81
|
+
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { ChatOptions, ChatResponse, StreamChunk, ProviderConfig } from './index';
|
|
2
|
+
|
|
3
|
+
export interface IProviderAdapter {
|
|
4
|
+
/**
|
|
5
|
+
* Execute a chat completion request
|
|
6
|
+
* @returns ChatResponse for non-streaming, AsyncIterable<StreamChunk> for streaming
|
|
7
|
+
*/
|
|
8
|
+
chat(options: ChatOptions): Promise<ChatResponse | AsyncIterable<StreamChunk>>;
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Get the provider name
|
|
12
|
+
*/
|
|
13
|
+
readonly name: string;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export interface ProviderAdapterConstructor {
|
|
17
|
+
new(config?: ProviderConfig): IProviderAdapter;
|
|
18
|
+
}
|
|
19
|
+
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { StreamChunk } from './index';
|
|
2
|
+
|
|
3
|
+
export type StreamCallback = (chunk: StreamChunk) => void;
|
|
4
|
+
|
|
5
|
+
export interface StreamOptions {
|
|
6
|
+
onChunk?: StreamCallback;
|
|
7
|
+
signal?: AbortSignal;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Helper type for streams that can be async iterated
|
|
12
|
+
*/
|
|
13
|
+
export type ChatStream = AsyncIterable<StreamChunk>;
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* Transform a ReadableStream to AsyncIterable
|
|
17
|
+
*/
|
|
18
|
+
export async function* streamToAsyncIterable(
|
|
19
|
+
stream: ReadableStream<Uint8Array>
|
|
20
|
+
): AsyncIterable<Uint8Array> {
|
|
21
|
+
const reader = stream.getReader();
|
|
22
|
+
try {
|
|
23
|
+
while (true) {
|
|
24
|
+
const { done, value } = await reader.read();
|
|
25
|
+
if (done) break;
|
|
26
|
+
yield value;
|
|
27
|
+
}
|
|
28
|
+
} finally {
|
|
29
|
+
reader.releaseLock();
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|