commons-proxy 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +757 -0
- package/bin/cli.js +146 -0
- package/package.json +97 -0
- package/public/Complaint Details.pdf +0 -0
- package/public/Cyber Crime Portal.pdf +0 -0
- package/public/app.js +229 -0
- package/public/css/src/input.css +523 -0
- package/public/css/style.css +1 -0
- package/public/favicon.png +0 -0
- package/public/index.html +549 -0
- package/public/js/components/account-manager.js +356 -0
- package/public/js/components/add-account-modal.js +414 -0
- package/public/js/components/claude-config.js +420 -0
- package/public/js/components/dashboard/charts.js +605 -0
- package/public/js/components/dashboard/filters.js +362 -0
- package/public/js/components/dashboard/stats.js +110 -0
- package/public/js/components/dashboard.js +236 -0
- package/public/js/components/logs-viewer.js +100 -0
- package/public/js/components/models.js +36 -0
- package/public/js/components/server-config.js +349 -0
- package/public/js/config/constants.js +102 -0
- package/public/js/data-store.js +375 -0
- package/public/js/settings-store.js +58 -0
- package/public/js/store.js +99 -0
- package/public/js/translations/en.js +367 -0
- package/public/js/translations/id.js +412 -0
- package/public/js/translations/pt.js +308 -0
- package/public/js/translations/tr.js +358 -0
- package/public/js/translations/zh.js +373 -0
- package/public/js/utils/account-actions.js +189 -0
- package/public/js/utils/error-handler.js +96 -0
- package/public/js/utils/model-config.js +42 -0
- package/public/js/utils/ui-logger.js +143 -0
- package/public/js/utils/validators.js +77 -0
- package/public/js/utils.js +69 -0
- package/public/proxy-server-64.png +0 -0
- package/public/views/accounts.html +361 -0
- package/public/views/dashboard.html +484 -0
- package/public/views/logs.html +97 -0
- package/public/views/models.html +331 -0
- package/public/views/settings.html +1327 -0
- package/src/account-manager/credentials.js +378 -0
- package/src/account-manager/index.js +462 -0
- package/src/account-manager/onboarding.js +112 -0
- package/src/account-manager/rate-limits.js +369 -0
- package/src/account-manager/storage.js +160 -0
- package/src/account-manager/strategies/base-strategy.js +109 -0
- package/src/account-manager/strategies/hybrid-strategy.js +339 -0
- package/src/account-manager/strategies/index.js +79 -0
- package/src/account-manager/strategies/round-robin-strategy.js +76 -0
- package/src/account-manager/strategies/sticky-strategy.js +138 -0
- package/src/account-manager/strategies/trackers/health-tracker.js +162 -0
- package/src/account-manager/strategies/trackers/index.js +9 -0
- package/src/account-manager/strategies/trackers/quota-tracker.js +120 -0
- package/src/account-manager/strategies/trackers/token-bucket-tracker.js +155 -0
- package/src/auth/database.js +169 -0
- package/src/auth/oauth.js +548 -0
- package/src/auth/token-extractor.js +117 -0
- package/src/cli/accounts.js +648 -0
- package/src/cloudcode/index.js +29 -0
- package/src/cloudcode/message-handler.js +510 -0
- package/src/cloudcode/model-api.js +248 -0
- package/src/cloudcode/rate-limit-parser.js +235 -0
- package/src/cloudcode/request-builder.js +93 -0
- package/src/cloudcode/session-manager.js +47 -0
- package/src/cloudcode/sse-parser.js +121 -0
- package/src/cloudcode/sse-streamer.js +293 -0
- package/src/cloudcode/streaming-handler.js +615 -0
- package/src/config.js +125 -0
- package/src/constants.js +407 -0
- package/src/errors.js +242 -0
- package/src/fallback-config.js +29 -0
- package/src/format/content-converter.js +193 -0
- package/src/format/index.js +20 -0
- package/src/format/request-converter.js +255 -0
- package/src/format/response-converter.js +120 -0
- package/src/format/schema-sanitizer.js +673 -0
- package/src/format/signature-cache.js +88 -0
- package/src/format/thinking-utils.js +648 -0
- package/src/index.js +148 -0
- package/src/modules/usage-stats.js +205 -0
- package/src/providers/anthropic-provider.js +258 -0
- package/src/providers/base-provider.js +157 -0
- package/src/providers/cloudcode.js +94 -0
- package/src/providers/copilot.js +399 -0
- package/src/providers/github-provider.js +287 -0
- package/src/providers/google-provider.js +192 -0
- package/src/providers/index.js +211 -0
- package/src/providers/openai-compatible.js +265 -0
- package/src/providers/openai-provider.js +271 -0
- package/src/providers/openrouter-provider.js +325 -0
- package/src/providers/setup.js +83 -0
- package/src/server.js +870 -0
- package/src/utils/claude-config.js +245 -0
- package/src/utils/helpers.js +51 -0
- package/src/utils/logger.js +142 -0
- package/src/utils/native-module-helper.js +162 -0
- package/src/webui/index.js +1134 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI-Compatible Provider
|
|
3
|
+
*
|
|
4
|
+
* Generic provider for any OpenAI-compatible API endpoint.
|
|
5
|
+
* Allows CommonsProxy to work with various LLM providers that
|
|
6
|
+
* implement the OpenAI API format.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { ProviderType } from './index.js';
|
|
10
|
+
import { logger } from '../utils/logger.js';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Create an OpenAI-compatible provider instance
|
|
14
|
+
* @param {Object} config - Provider configuration
|
|
15
|
+
* @param {string} config.id - Unique provider ID
|
|
16
|
+
* @param {string} config.name - Display name
|
|
17
|
+
* @param {string} config.baseUrl - API base URL
|
|
18
|
+
* @param {string} [config.apiKey] - API key (optional)
|
|
19
|
+
* @param {Object} [config.headers] - Additional headers
|
|
20
|
+
* @returns {Object} Provider instance
|
|
21
|
+
*/
|
|
22
|
+
export function createOpenAICompatibleProvider(config) {
|
|
23
|
+
const { id, name, baseUrl, apiKey, headers = {} } = config;
|
|
24
|
+
|
|
25
|
+
return {
|
|
26
|
+
id,
|
|
27
|
+
name,
|
|
28
|
+
type: ProviderType.OPENAI,
|
|
29
|
+
enabled: true,
|
|
30
|
+
|
|
31
|
+
config: {
|
|
32
|
+
baseUrl,
|
|
33
|
+
apiKey,
|
|
34
|
+
headers
|
|
35
|
+
},
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Send a message
|
|
39
|
+
* @param {Object} request - Anthropic-format request
|
|
40
|
+
* @param {Object} credentials - { apiKey }
|
|
41
|
+
* @param {Object} options - Additional options
|
|
42
|
+
* @returns {Promise<Object>} Response
|
|
43
|
+
*/
|
|
44
|
+
async sendMessage(request, credentials = {}, options = {}) {
|
|
45
|
+
const key = credentials.apiKey || apiKey;
|
|
46
|
+
const openaiRequest = convertAnthropicToOpenAI(request);
|
|
47
|
+
|
|
48
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
49
|
+
method: 'POST',
|
|
50
|
+
headers: {
|
|
51
|
+
'Content-Type': 'application/json',
|
|
52
|
+
'Authorization': key ? `Bearer ${key}` : undefined,
|
|
53
|
+
'User-Agent': 'commons-proxy/1.0.0',
|
|
54
|
+
...headers
|
|
55
|
+
},
|
|
56
|
+
body: JSON.stringify(openaiRequest)
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
if (!response.ok) {
|
|
60
|
+
const text = await response.text();
|
|
61
|
+
throw new Error(`API error: ${response.status} ${text}`);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const data = await response.json();
|
|
65
|
+
return convertOpenAIToAnthropic(data, request.model);
|
|
66
|
+
},
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Send a streaming message
|
|
70
|
+
* @param {Object} request - Anthropic-format request
|
|
71
|
+
* @param {Object} credentials - { apiKey }
|
|
72
|
+
* @param {Object} options - Additional options
|
|
73
|
+
* @yields {Object} Anthropic-format SSE events
|
|
74
|
+
*/
|
|
75
|
+
async *sendMessageStream(request, credentials = {}, options = {}) {
|
|
76
|
+
const key = credentials.apiKey || apiKey;
|
|
77
|
+
const openaiRequest = convertAnthropicToOpenAI(request);
|
|
78
|
+
openaiRequest.stream = true;
|
|
79
|
+
|
|
80
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
81
|
+
method: 'POST',
|
|
82
|
+
headers: {
|
|
83
|
+
'Content-Type': 'application/json',
|
|
84
|
+
'Authorization': key ? `Bearer ${key}` : undefined,
|
|
85
|
+
'User-Agent': 'commons-proxy/1.0.0',
|
|
86
|
+
...headers
|
|
87
|
+
},
|
|
88
|
+
body: JSON.stringify(openaiRequest)
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
if (!response.ok) {
|
|
92
|
+
const text = await response.text();
|
|
93
|
+
throw new Error(`API error: ${response.status} ${text}`);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Parse SSE stream
|
|
97
|
+
const reader = response.body.getReader();
|
|
98
|
+
const decoder = new TextDecoder();
|
|
99
|
+
let buffer = '';
|
|
100
|
+
let messageId = `msg_${Date.now()}`;
|
|
101
|
+
let contentIndex = 0;
|
|
102
|
+
let started = false;
|
|
103
|
+
|
|
104
|
+
while (true) {
|
|
105
|
+
const { done, value } = await reader.read();
|
|
106
|
+
if (done) break;
|
|
107
|
+
|
|
108
|
+
buffer += decoder.decode(value, { stream: true });
|
|
109
|
+
const lines = buffer.split('\n');
|
|
110
|
+
buffer = lines.pop() || '';
|
|
111
|
+
|
|
112
|
+
for (const line of lines) {
|
|
113
|
+
if (!line.startsWith('data: ')) continue;
|
|
114
|
+
const data = line.slice(6).trim();
|
|
115
|
+
if (data === '[DONE]') continue;
|
|
116
|
+
|
|
117
|
+
try {
|
|
118
|
+
const chunk = JSON.parse(data);
|
|
119
|
+
const delta = chunk.choices?.[0]?.delta;
|
|
120
|
+
|
|
121
|
+
if (!started) {
|
|
122
|
+
started = true;
|
|
123
|
+
yield {
|
|
124
|
+
type: 'message_start',
|
|
125
|
+
message: {
|
|
126
|
+
id: messageId,
|
|
127
|
+
type: 'message',
|
|
128
|
+
role: 'assistant',
|
|
129
|
+
content: [],
|
|
130
|
+
model: request.model,
|
|
131
|
+
stop_reason: null,
|
|
132
|
+
usage: { input_tokens: 0, output_tokens: 0 }
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
yield {
|
|
136
|
+
type: 'content_block_start',
|
|
137
|
+
index: 0,
|
|
138
|
+
content_block: { type: 'text', text: '' }
|
|
139
|
+
};
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if (delta?.content) {
|
|
143
|
+
yield {
|
|
144
|
+
type: 'content_block_delta',
|
|
145
|
+
index: 0,
|
|
146
|
+
delta: { type: 'text_delta', text: delta.content }
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
if (chunk.choices?.[0]?.finish_reason) {
|
|
151
|
+
yield { type: 'content_block_stop', index: 0 };
|
|
152
|
+
yield {
|
|
153
|
+
type: 'message_delta',
|
|
154
|
+
delta: { stop_reason: 'end_turn' },
|
|
155
|
+
usage: { output_tokens: chunk.usage?.completion_tokens || 0 }
|
|
156
|
+
};
|
|
157
|
+
yield { type: 'message_stop' };
|
|
158
|
+
}
|
|
159
|
+
} catch (e) {
|
|
160
|
+
logger.debug(`[OpenAI] Failed to parse chunk: ${e.message}`);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
},
|
|
165
|
+
|
|
166
|
+
/**
|
|
167
|
+
* List available models
|
|
168
|
+
* @param {Object} credentials - { apiKey }
|
|
169
|
+
* @returns {Promise<Array>} Array of model info
|
|
170
|
+
*/
|
|
171
|
+
async listModels(credentials = {}) {
|
|
172
|
+
const key = credentials.apiKey || apiKey;
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
const response = await fetch(`${baseUrl}/models`, {
|
|
176
|
+
headers: {
|
|
177
|
+
'Authorization': key ? `Bearer ${key}` : undefined,
|
|
178
|
+
'User-Agent': 'commons-proxy/1.0.0',
|
|
179
|
+
...headers
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
if (!response.ok) {
|
|
184
|
+
return [];
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const data = await response.json();
|
|
188
|
+
return (data.data || []).map(m => ({
|
|
189
|
+
id: m.id,
|
|
190
|
+
name: m.id,
|
|
191
|
+
family: 'openai'
|
|
192
|
+
}));
|
|
193
|
+
} catch (error) {
|
|
194
|
+
logger.warn(`[OpenAI] Failed to list models: ${error.message}`);
|
|
195
|
+
return [];
|
|
196
|
+
}
|
|
197
|
+
},
|
|
198
|
+
|
|
199
|
+
getModelFamilies() {
|
|
200
|
+
return ['openai'];
|
|
201
|
+
},
|
|
202
|
+
|
|
203
|
+
supportsModel(modelId) {
|
|
204
|
+
return true; // Accept any model
|
|
205
|
+
}
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/**
|
|
210
|
+
* Convert Anthropic request to OpenAI format
|
|
211
|
+
*/
|
|
212
|
+
function convertAnthropicToOpenAI(request) {
|
|
213
|
+
const messages = [];
|
|
214
|
+
|
|
215
|
+
// Add system message
|
|
216
|
+
if (request.system) {
|
|
217
|
+
messages.push({ role: 'system', content: request.system });
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
// Convert messages
|
|
221
|
+
for (const msg of request.messages) {
|
|
222
|
+
if (typeof msg.content === 'string') {
|
|
223
|
+
messages.push({ role: msg.role, content: msg.content });
|
|
224
|
+
} else {
|
|
225
|
+
// Handle content blocks
|
|
226
|
+
const textContent = msg.content
|
|
227
|
+
.filter(block => block.type === 'text')
|
|
228
|
+
.map(block => block.text)
|
|
229
|
+
.join('\n');
|
|
230
|
+
messages.push({ role: msg.role, content: textContent });
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
model: request.model,
|
|
236
|
+
messages,
|
|
237
|
+
max_tokens: request.max_tokens,
|
|
238
|
+
temperature: request.temperature,
|
|
239
|
+
stream: false
|
|
240
|
+
};
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Convert OpenAI response to Anthropic format
|
|
245
|
+
*/
|
|
246
|
+
function convertOpenAIToAnthropic(response, model) {
|
|
247
|
+
const choice = response.choices?.[0];
|
|
248
|
+
return {
|
|
249
|
+
id: response.id || `msg_${Date.now()}`,
|
|
250
|
+
type: 'message',
|
|
251
|
+
role: 'assistant',
|
|
252
|
+
content: [{
|
|
253
|
+
type: 'text',
|
|
254
|
+
text: choice?.message?.content || ''
|
|
255
|
+
}],
|
|
256
|
+
model: model,
|
|
257
|
+
stop_reason: choice?.finish_reason === 'stop' ? 'end_turn' : choice?.finish_reason,
|
|
258
|
+
usage: {
|
|
259
|
+
input_tokens: response.usage?.prompt_tokens || 0,
|
|
260
|
+
output_tokens: response.usage?.completion_tokens || 0
|
|
261
|
+
}
|
|
262
|
+
};
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
export default createOpenAICompatibleProvider;
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI API Provider
|
|
3
|
+
*
|
|
4
|
+
* Implements authentication via OpenAI API keys.
|
|
5
|
+
* Supports GPT models via direct OpenAI API.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import BaseProvider from './base-provider.js';
|
|
9
|
+
|
|
10
|
+
export class OpenAIProvider extends BaseProvider {
|
|
11
|
+
constructor(config = {}) {
|
|
12
|
+
super('openai', 'OpenAI', {
|
|
13
|
+
apiEndpoint: config.apiEndpoint || 'https://api.openai.com',
|
|
14
|
+
...config
|
|
15
|
+
});
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Validate OpenAI API key
|
|
20
|
+
*
|
|
21
|
+
* @param {Object} account - Account with apiKey
|
|
22
|
+
* @returns {Promise<{valid: boolean, error?: string, email?: string}>}
|
|
23
|
+
*/
|
|
24
|
+
async validateCredentials(account) {
|
|
25
|
+
if (!account.apiKey) {
|
|
26
|
+
return { valid: false, error: 'Missing API key' };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
try {
|
|
30
|
+
// Test API key by fetching model list
|
|
31
|
+
const endpoint = account.customApiEndpoint || this.config.apiEndpoint;
|
|
32
|
+
const response = await fetch(`${endpoint}/v1/models`, {
|
|
33
|
+
method: 'GET',
|
|
34
|
+
headers: {
|
|
35
|
+
'Authorization': `Bearer ${account.apiKey}`
|
|
36
|
+
}
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
if (!response.ok) {
|
|
40
|
+
const error = await response.text();
|
|
41
|
+
return { valid: false, error: `API key validation failed: ${error}` };
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// OpenAI doesn't provide email in API, use a placeholder
|
|
45
|
+
const email = account.email || `openai-${account.apiKey.slice(0, 8)}`;
|
|
46
|
+
|
|
47
|
+
return { valid: true, email };
|
|
48
|
+
} catch (error) {
|
|
49
|
+
this.error('Credential validation failed', error);
|
|
50
|
+
return { valid: false, error: error.message };
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Get API key (for OpenAI, API key IS the access token)
|
|
56
|
+
*
|
|
57
|
+
* @param {Object} account - Account with apiKey
|
|
58
|
+
* @returns {Promise<string>} API key
|
|
59
|
+
*/
|
|
60
|
+
async getAccessToken(account) {
|
|
61
|
+
if (!account.apiKey) {
|
|
62
|
+
throw new Error('Account missing API key');
|
|
63
|
+
}
|
|
64
|
+
return account.apiKey;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Fetch usage/quota information from OpenAI API
|
|
69
|
+
* Note: OpenAI usage API requires organization key
|
|
70
|
+
*
|
|
71
|
+
* @param {Object} account - Account object
|
|
72
|
+
* @param {string} token - API key
|
|
73
|
+
* @returns {Promise<Object>} Quota data
|
|
74
|
+
*/
|
|
75
|
+
async getQuotas(account, token) {
|
|
76
|
+
try {
|
|
77
|
+
const endpoint = account.customApiEndpoint || this.config.apiEndpoint;
|
|
78
|
+
|
|
79
|
+
// Fetch available models
|
|
80
|
+
const response = await fetch(`${endpoint}/v1/models`, {
|
|
81
|
+
method: 'GET',
|
|
82
|
+
headers: {
|
|
83
|
+
'Authorization': `Bearer ${token}`
|
|
84
|
+
}
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
if (!response.ok) {
|
|
88
|
+
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
const data = await response.json();
|
|
92
|
+
const models = {};
|
|
93
|
+
|
|
94
|
+
// Create default quota entries for GPT models
|
|
95
|
+
if (data.data && Array.isArray(data.data)) {
|
|
96
|
+
data.data
|
|
97
|
+
.filter(model => model.id.includes('gpt'))
|
|
98
|
+
.forEach(model => {
|
|
99
|
+
models[model.id] = {
|
|
100
|
+
remainingFraction: 1.0, // Default: full quota (no easy API to check actual)
|
|
101
|
+
resetTime: null // Unknown
|
|
102
|
+
};
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// If no models found, add common GPT models
|
|
107
|
+
if (Object.keys(models).length === 0) {
|
|
108
|
+
const commonModels = [
|
|
109
|
+
'gpt-4-turbo-preview',
|
|
110
|
+
'gpt-4',
|
|
111
|
+
'gpt-4-32k',
|
|
112
|
+
'gpt-3.5-turbo',
|
|
113
|
+
'gpt-4o',
|
|
114
|
+
'gpt-4o-mini'
|
|
115
|
+
];
|
|
116
|
+
commonModels.forEach(modelId => {
|
|
117
|
+
models[modelId] = {
|
|
118
|
+
remainingFraction: 1.0,
|
|
119
|
+
resetTime: null
|
|
120
|
+
};
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return { models };
|
|
125
|
+
} catch (error) {
|
|
126
|
+
this.error('Failed to fetch quotas', error);
|
|
127
|
+
// Return default quota on error
|
|
128
|
+
return {
|
|
129
|
+
models: {
|
|
130
|
+
'gpt-4': { remainingFraction: 1.0, resetTime: null },
|
|
131
|
+
'gpt-4-turbo-preview': { remainingFraction: 1.0, resetTime: null },
|
|
132
|
+
'gpt-3.5-turbo': { remainingFraction: 1.0, resetTime: null }
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Get subscription tier (OpenAI uses usage-based pricing)
|
|
140
|
+
*
|
|
141
|
+
* @param {Object} account - Account object
|
|
142
|
+
* @param {string} token - API key
|
|
143
|
+
* @returns {Promise<{tier: string, projectId: null}>}
|
|
144
|
+
*/
|
|
145
|
+
async getSubscriptionTier(account, token) {
|
|
146
|
+
// OpenAI uses usage-based pricing with different tier limits based on usage history
|
|
147
|
+
// We could potentially check the organization endpoint if available
|
|
148
|
+
return { tier: 'usage-based', projectId: null };
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Get available GPT models
|
|
153
|
+
*
|
|
154
|
+
* @param {Object} account - Account object
|
|
155
|
+
* @param {string} token - API key
|
|
156
|
+
* @returns {Promise<Array>} List of available models
|
|
157
|
+
*/
|
|
158
|
+
async getAvailableModels(account, token) {
|
|
159
|
+
try {
|
|
160
|
+
const endpoint = account.customApiEndpoint || this.config.apiEndpoint;
|
|
161
|
+
const response = await fetch(`${endpoint}/v1/models`, {
|
|
162
|
+
method: 'GET',
|
|
163
|
+
headers: {
|
|
164
|
+
'Authorization': `Bearer ${token}`
|
|
165
|
+
}
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
if (!response.ok) {
|
|
169
|
+
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const data = await response.json();
|
|
173
|
+
if (data.data && Array.isArray(data.data)) {
|
|
174
|
+
return data.data
|
|
175
|
+
.filter(model => model.id.includes('gpt'))
|
|
176
|
+
.map(model => ({
|
|
177
|
+
id: model.id,
|
|
178
|
+
name: model.id,
|
|
179
|
+
family: 'gpt'
|
|
180
|
+
}));
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return [];
|
|
184
|
+
} catch (error) {
|
|
185
|
+
this.error('Failed to fetch available models', error);
|
|
186
|
+
return [];
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Parse OpenAI rate limit headers
|
|
192
|
+
*
|
|
193
|
+
* @param {Response} response - Fetch response
|
|
194
|
+
* @param {Object} errorData - Error data from response body
|
|
195
|
+
* @returns {Object|null} Rate limit info
|
|
196
|
+
*/
|
|
197
|
+
parseRateLimitInfo(response, errorData = null) {
|
|
198
|
+
// OpenAI uses these headers:
|
|
199
|
+
// - x-ratelimit-limit-requests
|
|
200
|
+
// - x-ratelimit-remaining-requests
|
|
201
|
+
// - x-ratelimit-reset-requests
|
|
202
|
+
// - x-ratelimit-limit-tokens
|
|
203
|
+
// - x-ratelimit-remaining-tokens
|
|
204
|
+
// - x-ratelimit-reset-tokens
|
|
205
|
+
|
|
206
|
+
const requestsReset = response.headers.get('x-ratelimit-reset-requests');
|
|
207
|
+
const tokensReset = response.headers.get('x-ratelimit-reset-tokens');
|
|
208
|
+
|
|
209
|
+
// Reset headers are in format like "1s", "10ms", etc.
|
|
210
|
+
const parseResetDuration = (resetStr) => {
|
|
211
|
+
if (!resetStr) return null;
|
|
212
|
+
const match = resetStr.match(/^(\d+(?:\.\d+)?)([a-z]+)$/);
|
|
213
|
+
if (!match) return null;
|
|
214
|
+
|
|
215
|
+
const [, value, unit] = match;
|
|
216
|
+
const num = parseFloat(value);
|
|
217
|
+
|
|
218
|
+
const multipliers = {
|
|
219
|
+
'ms': 1,
|
|
220
|
+
's': 1000,
|
|
221
|
+
'm': 60000,
|
|
222
|
+
'h': 3600000
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
const ms = num * (multipliers[unit] || 1000);
|
|
226
|
+
return new Date(Date.now() + ms);
|
|
227
|
+
};
|
|
228
|
+
|
|
229
|
+
const resets = [requestsReset, tokensReset]
|
|
230
|
+
.map(parseResetDuration)
|
|
231
|
+
.filter(Boolean);
|
|
232
|
+
|
|
233
|
+
if (resets.length > 0) {
|
|
234
|
+
const latestReset = new Date(Math.max(...resets));
|
|
235
|
+
return {
|
|
236
|
+
resetTime: latestReset,
|
|
237
|
+
retryAfter: Math.max(0, Math.floor((latestReset - Date.now()) / 1000))
|
|
238
|
+
};
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
// Check error response for rate limit info
|
|
242
|
+
if (errorData?.error?.type === 'rate_limit_exceeded') {
|
|
243
|
+
return {
|
|
244
|
+
resetTime: new Date(Date.now() + 60000), // Default: 1 minute
|
|
245
|
+
retryAfter: 60
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
return null;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Check if error indicates invalid API key
|
|
254
|
+
*
|
|
255
|
+
* @param {Error} error - Error object
|
|
256
|
+
* @returns {boolean}
|
|
257
|
+
*/
|
|
258
|
+
shouldInvalidateCredentials(error) {
|
|
259
|
+
if (error.message && (
|
|
260
|
+
error.message.includes('invalid_api_key') ||
|
|
261
|
+
error.message.includes('Incorrect API key') ||
|
|
262
|
+
error.message.includes('authentication')
|
|
263
|
+
)) {
|
|
264
|
+
return true;
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
return super.shouldInvalidateCredentials(error);
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
export default OpenAIProvider;
|