ai.libx.js 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +339 -0
- package/build/@Module.d.ts +6 -0
- package/build/@Module.js +14 -0
- package/build/@Module.js.map +1 -0
- package/build/AIClient.d.ts +19 -0
- package/build/AIClient.js +132 -0
- package/build/AIClient.js.map +1 -0
- package/build/Extensions.d.ts +3 -0
- package/build/Extensions.js +4 -0
- package/build/Extensions.js.map +1 -0
- package/build/adapters/ai21.d.ts +8 -0
- package/build/adapters/ai21.js +83 -0
- package/build/adapters/ai21.js.map +1 -0
- package/build/adapters/anthropic.d.ts +9 -0
- package/build/adapters/anthropic.js +162 -0
- package/build/adapters/anthropic.js.map +1 -0
- package/build/adapters/base/BaseAdapter.d.ts +13 -0
- package/build/adapters/base/BaseAdapter.js +56 -0
- package/build/adapters/base/BaseAdapter.js.map +1 -0
- package/build/adapters/cloudflare.d.ts +8 -0
- package/build/adapters/cloudflare.js +129 -0
- package/build/adapters/cloudflare.js.map +1 -0
- package/build/adapters/cohere.d.ts +9 -0
- package/build/adapters/cohere.js +158 -0
- package/build/adapters/cohere.js.map +1 -0
- package/build/adapters/deepseek.d.ts +8 -0
- package/build/adapters/deepseek.js +142 -0
- package/build/adapters/deepseek.js.map +1 -0
- package/build/adapters/google.d.ts +9 -0
- package/build/adapters/google.js +166 -0
- package/build/adapters/google.js.map +1 -0
- package/build/adapters/groq.d.ts +8 -0
- package/build/adapters/groq.js +142 -0
- package/build/adapters/groq.js.map +1 -0
- package/build/adapters/index.d.ts +12 -0
- package/build/adapters/index.js +28 -0
- package/build/adapters/index.js.map +1 -0
- package/build/adapters/mistral.d.ts +8 -0
- package/build/adapters/mistral.js +139 -0
- package/build/adapters/mistral.js.map +1 -0
- package/build/adapters/openai.d.ts +9 -0
- package/build/adapters/openai.js +145 -0
- package/build/adapters/openai.js.map +1 -0
- package/build/adapters/openrouter.d.ts +8 -0
- package/build/adapters/openrouter.js +145 -0
- package/build/adapters/openrouter.js.map +1 -0
- package/build/adapters/xai.d.ts +8 -0
- package/build/adapters/xai.js +138 -0
- package/build/adapters/xai.js.map +1 -0
- package/build/index.d.ts +12 -0
- package/build/index.js +29 -0
- package/build/index.js.map +1 -0
- package/build/models.d.ts +6 -0
- package/build/models.js +103 -0
- package/build/models.js.map +1 -0
- package/build/types/index.d.ts +66 -0
- package/build/types/index.js +3 -0
- package/build/types/index.js.map +1 -0
- package/build/types/provider.d.ts +8 -0
- package/build/types/provider.js +3 -0
- package/build/types/provider.js.map +1 -0
- package/build/types/streaming.d.ts +8 -0
- package/build/types/streaming.js +33 -0
- package/build/types/streaming.js.map +1 -0
- package/build/utils/errors.d.ts +21 -0
- package/build/utils/errors.js +70 -0
- package/build/utils/errors.js.map +1 -0
- package/build/utils/model-normalization.d.ts +9 -0
- package/build/utils/model-normalization.js +59 -0
- package/build/utils/model-normalization.js.map +1 -0
- package/build/utils/request-logger.d.ts +43 -0
- package/build/utils/request-logger.js +96 -0
- package/build/utils/request-logger.js.map +1 -0
- package/build/utils/stream.d.ts +8 -0
- package/build/utils/stream.js +109 -0
- package/build/utils/stream.js.map +1 -0
- package/build/utils/validation.d.ts +4 -0
- package/build/utils/validation.js +57 -0
- package/build/utils/validation.js.map +1 -0
- package/example.ts +166 -0
- package/jest.config.js +26 -0
- package/package.json +68 -0
- package/src/@Module.ts +9 -0
- package/src/AIClient.ts +210 -0
- package/src/Extensions.ts +7 -0
- package/src/adapters/ai21.ts +99 -0
- package/src/adapters/anthropic.ts +152 -0
- package/src/adapters/base/BaseAdapter.ts +78 -0
- package/src/adapters/cloudflare.ts +115 -0
- package/src/adapters/cohere.ts +158 -0
- package/src/adapters/deepseek.ts +108 -0
- package/src/adapters/google.ts +170 -0
- package/src/adapters/groq.ts +108 -0
- package/src/adapters/index.ts +14 -0
- package/src/adapters/mistral.ts +108 -0
- package/src/adapters/openai.ts +129 -0
- package/src/adapters/openrouter.ts +110 -0
- package/src/adapters/xai.ts +106 -0
- package/src/index.ts +66 -0
- package/src/models.ts +116 -0
- package/src/types/index.ts +81 -0
- package/src/types/provider.ts +19 -0
- package/src/types/streaming.ts +32 -0
- package/src/utils/errors.ts +76 -0
- package/src/utils/model-normalization.ts +100 -0
- package/src/utils/request-logger.ts +179 -0
- package/src/utils/stream.ts +93 -0
- package/src/utils/validation.ts +69 -0
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
export class AILibError extends Error {
|
|
2
|
+
constructor(message: string, public code?: string, public statusCode?: number) {
|
|
3
|
+
super(message);
|
|
4
|
+
this.name = 'AILibError';
|
|
5
|
+
}
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export class AuthenticationError extends AILibError {
|
|
9
|
+
constructor(message: string, provider?: string) {
|
|
10
|
+
super(
|
|
11
|
+
provider ? `${provider}: ${message}` : message,
|
|
12
|
+
'AUTHENTICATION_ERROR',
|
|
13
|
+
401
|
|
14
|
+
);
|
|
15
|
+
this.name = 'AuthenticationError';
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export class InvalidRequestError extends AILibError {
|
|
20
|
+
constructor(message: string) {
|
|
21
|
+
super(message, 'INVALID_REQUEST', 400);
|
|
22
|
+
this.name = 'InvalidRequestError';
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export class RateLimitError extends AILibError {
|
|
27
|
+
constructor(message: string, provider?: string) {
|
|
28
|
+
super(
|
|
29
|
+
provider ? `${provider}: ${message}` : message,
|
|
30
|
+
'RATE_LIMIT_ERROR',
|
|
31
|
+
429
|
|
32
|
+
);
|
|
33
|
+
this.name = 'RateLimitError';
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
export class ModelNotFoundError extends AILibError {
|
|
38
|
+
constructor(model: string) {
|
|
39
|
+
super(`Model not found: ${model}`, 'MODEL_NOT_FOUND', 404);
|
|
40
|
+
this.name = 'ModelNotFoundError';
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export class ProviderError extends AILibError {
|
|
45
|
+
constructor(message: string, provider: string, originalError?: any) {
|
|
46
|
+
super(`${provider}: ${message}`, 'PROVIDER_ERROR');
|
|
47
|
+
this.name = 'ProviderError';
|
|
48
|
+
if (originalError) {
|
|
49
|
+
this.stack = originalError.stack;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function handleProviderError(error: any, provider: string): never {
|
|
55
|
+
if (error instanceof AILibError) {
|
|
56
|
+
throw error;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const status = error.status || error.statusCode;
|
|
60
|
+
const message = error.message || error.error?.message || 'Unknown error';
|
|
61
|
+
|
|
62
|
+
if (status === 401 || status === 403) {
|
|
63
|
+
throw new AuthenticationError(message, provider);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (status === 429) {
|
|
67
|
+
throw new RateLimitError(message, provider);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if (status === 400) {
|
|
71
|
+
throw new InvalidRequestError(message);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
throw new ProviderError(message, provider, error);
|
|
75
|
+
}
|
|
76
|
+
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model alias resolution and normalization
|
|
3
|
+
* Handles common aliases and provides forward compatibility
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const MODEL_ALIASES: Record<string, string> = {
|
|
7
|
+
// OpenAI aliases
|
|
8
|
+
'gpt-5': 'chatgpt-4o-latest',
|
|
9
|
+
'gpt-4.5-preview': 'gpt-4o',
|
|
10
|
+
'gpt-4.5': 'gpt-4o',
|
|
11
|
+
'gpt-4-latest': 'gpt-4o',
|
|
12
|
+
|
|
13
|
+
// Anthropic aliases
|
|
14
|
+
'claude-4': 'claude-sonnet-4-0',
|
|
15
|
+
'claude-3.5': 'claude-3-5-sonnet-latest',
|
|
16
|
+
'claude-3': 'claude-3-opus-20240229',
|
|
17
|
+
|
|
18
|
+
// Google aliases
|
|
19
|
+
'gemini': 'models/gemini-2.0-flash',
|
|
20
|
+
'gemini-2': 'models/gemini-2.0-flash',
|
|
21
|
+
'gemini-pro': 'models/gemini-2.5-pro',
|
|
22
|
+
|
|
23
|
+
// DeepSeek aliases
|
|
24
|
+
'deepseek': 'deepseek-chat',
|
|
25
|
+
'deepseek-r1': 'deepseek-reasoner',
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Normalize model name by resolving aliases
|
|
30
|
+
*/
|
|
31
|
+
export function normalizeModelName(model: string): string {
|
|
32
|
+
// Extract provider and model parts
|
|
33
|
+
const parts = model.split('/');
|
|
34
|
+
|
|
35
|
+
if (parts.length === 1) {
|
|
36
|
+
// No provider prefix, just model name
|
|
37
|
+
return MODEL_ALIASES[model] || model;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const [provider, modelName] = parts;
|
|
41
|
+
const normalizedModel = MODEL_ALIASES[modelName] || modelName;
|
|
42
|
+
|
|
43
|
+
return `${provider}/${normalizedModel}`;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Check if a model is a reasoning model (requires special handling)
|
|
48
|
+
*/
|
|
49
|
+
export function isReasoningModel(model: string): boolean {
|
|
50
|
+
const modelLower = model.toLowerCase();
|
|
51
|
+
|
|
52
|
+
return (
|
|
53
|
+
modelLower.includes('/o1') ||
|
|
54
|
+
modelLower.includes('/o3') ||
|
|
55
|
+
modelLower.includes('reasoning') ||
|
|
56
|
+
modelLower.includes('reasoner') ||
|
|
57
|
+
modelLower.includes('-r1') ||
|
|
58
|
+
modelLower.includes('qwq')
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Check if model doesn't support system messages
|
|
64
|
+
*/
|
|
65
|
+
export function supportsSystemMessages(model: string): boolean {
|
|
66
|
+
const modelLower = model.toLowerCase();
|
|
67
|
+
|
|
68
|
+
// o1, o3 models don't support system messages
|
|
69
|
+
if (modelLower.includes('/o1') || modelLower.includes('/o3')) {
|
|
70
|
+
return false;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return true;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Get adjusted parameters for reasoning models
|
|
78
|
+
*/
|
|
79
|
+
export function getReasoningModelAdjustments(model: string): {
|
|
80
|
+
temperature?: number;
|
|
81
|
+
topP?: number;
|
|
82
|
+
maxTokens?: number;
|
|
83
|
+
useMaxCompletionTokens?: boolean;
|
|
84
|
+
} {
|
|
85
|
+
if (!isReasoningModel(model)) {
|
|
86
|
+
return {};
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// o1/o3 models require specific parameter handling
|
|
90
|
+
if (model.toLowerCase().includes('/o1') || model.toLowerCase().includes('/o3')) {
|
|
91
|
+
return {
|
|
92
|
+
temperature: 1, // Fixed for o1/o3
|
|
93
|
+
topP: 1, // Fixed for o1/o3
|
|
94
|
+
useMaxCompletionTokens: true, // Use max_completion_tokens instead of max_tokens
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
return {};
|
|
99
|
+
}
|
|
100
|
+
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Request logging and metrics tracking
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export interface RequestMetadata {
|
|
6
|
+
requestId: string;
|
|
7
|
+
provider: string;
|
|
8
|
+
model: string;
|
|
9
|
+
timestamp: number;
|
|
10
|
+
metadata?: Record<string, any>;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface RequestTracker extends RequestMetadata {
|
|
14
|
+
startTime: number;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export interface CompletedRequest extends RequestMetadata {
|
|
18
|
+
latency: number;
|
|
19
|
+
success: boolean;
|
|
20
|
+
tokens?: number;
|
|
21
|
+
error?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface ProviderStats {
|
|
25
|
+
requests: number;
|
|
26
|
+
avgLatency: number;
|
|
27
|
+
tokens: number;
|
|
28
|
+
errors: number;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface LoggerStats {
|
|
32
|
+
totalRequests: number;
|
|
33
|
+
successfulRequests: number;
|
|
34
|
+
failedRequests: number;
|
|
35
|
+
averageLatency: number;
|
|
36
|
+
totalTokensUsed: number;
|
|
37
|
+
providerBreakdown: Record<string, ProviderStats>;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Simple request logger for tracking metrics
|
|
42
|
+
*/
|
|
43
|
+
export class RequestLogger {
|
|
44
|
+
private requests: CompletedRequest[] = [];
|
|
45
|
+
private enabled: boolean;
|
|
46
|
+
|
|
47
|
+
constructor(enabled: boolean = false) {
|
|
48
|
+
this.enabled = enabled;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Start tracking a request
|
|
53
|
+
*/
|
|
54
|
+
startRequest(
|
|
55
|
+
provider: string,
|
|
56
|
+
model: string,
|
|
57
|
+
metadata?: Record<string, any>
|
|
58
|
+
): RequestTracker {
|
|
59
|
+
return {
|
|
60
|
+
requestId: this.generateId(),
|
|
61
|
+
provider,
|
|
62
|
+
model,
|
|
63
|
+
timestamp: Date.now(),
|
|
64
|
+
startTime: Date.now(),
|
|
65
|
+
metadata,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Complete and log a request
|
|
71
|
+
*/
|
|
72
|
+
logRequest(tracker: RequestTracker, success: boolean, tokens?: number, error?: string): void {
|
|
73
|
+
if (!this.enabled) return;
|
|
74
|
+
|
|
75
|
+
const latency = Date.now() - tracker.startTime;
|
|
76
|
+
|
|
77
|
+
this.requests.push({
|
|
78
|
+
requestId: tracker.requestId,
|
|
79
|
+
provider: tracker.provider,
|
|
80
|
+
model: tracker.model,
|
|
81
|
+
timestamp: tracker.timestamp,
|
|
82
|
+
metadata: tracker.metadata,
|
|
83
|
+
latency,
|
|
84
|
+
success,
|
|
85
|
+
tokens,
|
|
86
|
+
error,
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Get aggregated statistics
|
|
92
|
+
*/
|
|
93
|
+
getStats(): LoggerStats {
|
|
94
|
+
const successful = this.requests.filter((r) => r.success);
|
|
95
|
+
const failed = this.requests.filter((r) => !r.success);
|
|
96
|
+
|
|
97
|
+
const totalLatency = this.requests.reduce((sum, r) => sum + r.latency, 0);
|
|
98
|
+
const totalTokens = this.requests.reduce((sum, r) => sum + (r.tokens || 0), 0);
|
|
99
|
+
|
|
100
|
+
// Provider breakdown
|
|
101
|
+
const providerBreakdown: Record<string, ProviderStats> = {};
|
|
102
|
+
|
|
103
|
+
for (const request of this.requests) {
|
|
104
|
+
if (!providerBreakdown[request.provider]) {
|
|
105
|
+
providerBreakdown[request.provider] = {
|
|
106
|
+
requests: 0,
|
|
107
|
+
avgLatency: 0,
|
|
108
|
+
tokens: 0,
|
|
109
|
+
errors: 0,
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
const stats = providerBreakdown[request.provider];
|
|
114
|
+
stats.requests++;
|
|
115
|
+
stats.tokens += request.tokens || 0;
|
|
116
|
+
if (!request.success) stats.errors++;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Calculate average latencies per provider
|
|
120
|
+
for (const provider in providerBreakdown) {
|
|
121
|
+
const providerRequests = this.requests.filter((r) => r.provider === provider);
|
|
122
|
+
const providerLatency = providerRequests.reduce((sum, r) => sum + r.latency, 0);
|
|
123
|
+
providerBreakdown[provider].avgLatency =
|
|
124
|
+
providerRequests.length > 0 ? providerLatency / providerRequests.length : 0;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return {
|
|
128
|
+
totalRequests: this.requests.length,
|
|
129
|
+
successfulRequests: successful.length,
|
|
130
|
+
failedRequests: failed.length,
|
|
131
|
+
averageLatency: this.requests.length > 0 ? totalLatency / this.requests.length : 0,
|
|
132
|
+
totalTokensUsed: totalTokens,
|
|
133
|
+
providerBreakdown,
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* Get all requests (for detailed analysis)
|
|
139
|
+
*/
|
|
140
|
+
getRequests(): CompletedRequest[] {
|
|
141
|
+
return [...this.requests];
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Clear all logged requests
|
|
146
|
+
*/
|
|
147
|
+
clear(): void {
|
|
148
|
+
this.requests = [];
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Enable or disable logging
|
|
153
|
+
*/
|
|
154
|
+
setEnabled(enabled: boolean): void {
|
|
155
|
+
this.enabled = enabled;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
private generateId(): string {
|
|
159
|
+
return `req_${Date.now()}_${Math.random().toString(36).substring(2, 9)}`;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Global logger instance
|
|
165
|
+
*/
|
|
166
|
+
let globalLogger: RequestLogger | null = null;
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Get or create global logger
|
|
170
|
+
*/
|
|
171
|
+
export function getRequestLogger(enabled: boolean = false): RequestLogger {
|
|
172
|
+
if (!globalLogger) {
|
|
173
|
+
globalLogger = new RequestLogger(enabled);
|
|
174
|
+
} else {
|
|
175
|
+
globalLogger.setEnabled(enabled);
|
|
176
|
+
}
|
|
177
|
+
return globalLogger;
|
|
178
|
+
}
|
|
179
|
+
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import { StreamChunk } from '../types';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Parse SSE (Server-Sent Events) data line
|
|
5
|
+
*/
|
|
6
|
+
export function parseSSELine(line: string): { event?: string; data?: string; } | null {
|
|
7
|
+
if (!line.trim() || line.startsWith(':')) {
|
|
8
|
+
return null;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
if (line.startsWith('event:')) {
|
|
12
|
+
return { event: line.slice(6).trim() };
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
if (line.startsWith('data:')) {
|
|
16
|
+
return { data: line.slice(5).trim() };
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
return null;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Transform a ReadableStream of Uint8Array to lines
|
|
24
|
+
*/
|
|
25
|
+
export async function* streamLines(stream: ReadableStream<Uint8Array>): AsyncIterable<string> {
|
|
26
|
+
const reader = stream.getReader();
|
|
27
|
+
const decoder = new TextDecoder();
|
|
28
|
+
let buffer = '';
|
|
29
|
+
|
|
30
|
+
try {
|
|
31
|
+
while (true) {
|
|
32
|
+
const { done, value } = await reader.read();
|
|
33
|
+
|
|
34
|
+
if (done) {
|
|
35
|
+
if (buffer.trim()) {
|
|
36
|
+
yield buffer;
|
|
37
|
+
}
|
|
38
|
+
break;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
buffer += decoder.decode(value, { stream: true });
|
|
42
|
+
const lines = buffer.split('\n');
|
|
43
|
+
buffer = lines.pop() || '';
|
|
44
|
+
|
|
45
|
+
for (const line of lines) {
|
|
46
|
+
if (line.trim()) {
|
|
47
|
+
yield line;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
} finally {
|
|
52
|
+
reader.releaseLock();
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Generic SSE parser for streaming responses
|
|
58
|
+
*/
|
|
59
|
+
export async function* parseSSEStream(
|
|
60
|
+
stream: ReadableStream<Uint8Array>
|
|
61
|
+
): AsyncIterable<any> {
|
|
62
|
+
let currentData = '';
|
|
63
|
+
|
|
64
|
+
for await (const line of streamLines(stream)) {
|
|
65
|
+
const parsed = parseSSELine(line);
|
|
66
|
+
|
|
67
|
+
if (parsed?.data) {
|
|
68
|
+
currentData = parsed.data;
|
|
69
|
+
|
|
70
|
+
if (currentData === '[DONE]') {
|
|
71
|
+
break;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
try {
|
|
75
|
+
const json = JSON.parse(currentData);
|
|
76
|
+
yield json;
|
|
77
|
+
} catch (e) {
|
|
78
|
+
// Skip invalid JSON
|
|
79
|
+
continue;
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Helper to create an async iterable from chunks
|
|
87
|
+
*/
|
|
88
|
+
export async function* createChunkStream(chunks: StreamChunk[]): AsyncIterable<StreamChunk> {
|
|
89
|
+
for (const chunk of chunks) {
|
|
90
|
+
yield chunk;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import { ChatOptions, Message } from '../types';
|
|
2
|
+
import { InvalidRequestError } from './errors';
|
|
3
|
+
|
|
4
|
+
export function validateChatOptions(options: ChatOptions): void {
|
|
5
|
+
if (!options.model) {
|
|
6
|
+
throw new InvalidRequestError('Model is required');
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
if (!options.messages || !Array.isArray(options.messages)) {
|
|
10
|
+
throw new InvalidRequestError('Messages must be an array');
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
if (options.messages.length === 0) {
|
|
14
|
+
throw new InvalidRequestError('At least one message is required');
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
options.messages.forEach((msg, idx) => {
|
|
18
|
+
validateMessage(msg, idx);
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
if (options.temperature !== undefined) {
|
|
22
|
+
if (typeof options.temperature !== 'number' || options.temperature < 0 || options.temperature > 2) {
|
|
23
|
+
throw new InvalidRequestError('Temperature must be between 0 and 2');
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
if (options.maxTokens !== undefined) {
|
|
28
|
+
if (typeof options.maxTokens !== 'number' || options.maxTokens < 1) {
|
|
29
|
+
throw new InvalidRequestError('maxTokens must be a positive number');
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (options.topP !== undefined) {
|
|
34
|
+
if (typeof options.topP !== 'number' || options.topP < 0 || options.topP > 1) {
|
|
35
|
+
throw new InvalidRequestError('topP must be between 0 and 1');
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
export function validateMessage(message: Message, index: number): void {
|
|
41
|
+
if (!message.role) {
|
|
42
|
+
throw new InvalidRequestError(`Message at index ${index} is missing role`);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
if (!['system', 'user', 'assistant', 'tool'].includes(message.role)) {
|
|
46
|
+
throw new InvalidRequestError(
|
|
47
|
+
`Invalid role "${message.role}" at index ${index}. Must be: system, user, assistant, or tool`
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (message.content === undefined || message.content === null) {
|
|
52
|
+
throw new InvalidRequestError(`Message at index ${index} is missing content`);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if (typeof message.content !== 'string') {
|
|
56
|
+
throw new InvalidRequestError(`Message content at index ${index} must be a string`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function validateApiKey(apiKey: string | undefined, provider: string): string {
|
|
61
|
+
if (!apiKey) {
|
|
62
|
+
throw new InvalidRequestError(
|
|
63
|
+
`API key is required for ${provider}. ` +
|
|
64
|
+
`Pass it in the constructor or per-request options.`
|
|
65
|
+
);
|
|
66
|
+
}
|
|
67
|
+
return apiKey;
|
|
68
|
+
}
|
|
69
|
+
|