codex-claude-proxy 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +206 -0
- package/docs/ACCOUNTS.md +202 -0
- package/docs/API.md +274 -0
- package/docs/ARCHITECTURE.md +133 -0
- package/docs/CLAUDE_INTEGRATION.md +163 -0
- package/docs/OAUTH.md +201 -0
- package/docs/OPENCLAW.md +338 -0
- package/images/f757093f-507b-4453-994e-f8275f8b07a9.png +0 -0
- package/package.json +44 -0
- package/public/css/style.css +791 -0
- package/public/index.html +783 -0
- package/public/js/app.js +511 -0
- package/src/account-manager.js +483 -0
- package/src/claude-config.js +143 -0
- package/src/cli/accounts.js +413 -0
- package/src/cli/index.js +66 -0
- package/src/direct-api.js +123 -0
- package/src/format-converter.js +331 -0
- package/src/index.js +41 -0
- package/src/kilo-api.js +68 -0
- package/src/kilo-format-converter.js +270 -0
- package/src/kilo-streamer.js +198 -0
- package/src/model-api.js +189 -0
- package/src/oauth.js +554 -0
- package/src/response-streamer.js +329 -0
- package/src/routes/api-routes.js +1035 -0
- package/src/server-settings.js +48 -0
- package/src/server.js +30 -0
- package/src/utils/logger.js +156 -0
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Kilo Streamer
|
|
3
|
+
* Streams OpenAI Chat Completions SSE and converts to Anthropic SSE events
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { generateMessageId } from './format-converter.js';
|
|
7
|
+
|
|
8
|
+
export async function* streamOpenAIChat(response, model) {
|
|
9
|
+
const messageId = generateMessageId();
|
|
10
|
+
let hasEmittedStart = false;
|
|
11
|
+
let blockIndex = 0;
|
|
12
|
+
let currentBlockType = null;
|
|
13
|
+
let currentToolCallId = null;
|
|
14
|
+
let currentToolName = null;
|
|
15
|
+
let pendingToolArgs = new Map();
|
|
16
|
+
let stopReason = 'end_turn';
|
|
17
|
+
let usage = { input_tokens: 0, output_tokens: 0 };
|
|
18
|
+
|
|
19
|
+
const reader = response.body.getReader();
|
|
20
|
+
const decoder = new TextDecoder();
|
|
21
|
+
let buffer = '';
|
|
22
|
+
|
|
23
|
+
const emitMessageStart = () => ({
|
|
24
|
+
event: 'message_start',
|
|
25
|
+
data: {
|
|
26
|
+
type: 'message_start',
|
|
27
|
+
message: {
|
|
28
|
+
id: messageId,
|
|
29
|
+
type: 'message',
|
|
30
|
+
role: 'assistant',
|
|
31
|
+
model,
|
|
32
|
+
content: [],
|
|
33
|
+
stop_reason: null,
|
|
34
|
+
stop_sequence: null,
|
|
35
|
+
usage: { input_tokens: 0, output_tokens: 0 }
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
const emitContentBlockStart = (contentBlock) => ({
|
|
41
|
+
event: 'content_block_start',
|
|
42
|
+
data: {
|
|
43
|
+
type: 'content_block_start',
|
|
44
|
+
index: blockIndex,
|
|
45
|
+
content_block: contentBlock
|
|
46
|
+
}
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
const emitContentBlockDelta = (delta) => ({
|
|
50
|
+
event: 'content_block_delta',
|
|
51
|
+
data: {
|
|
52
|
+
type: 'content_block_delta',
|
|
53
|
+
index: blockIndex,
|
|
54
|
+
delta
|
|
55
|
+
}
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
const emitContentBlockStop = () => ({
|
|
59
|
+
event: 'content_block_stop',
|
|
60
|
+
data: { type: 'content_block_stop', index: blockIndex }
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
const startTextBlock = () => {
|
|
64
|
+
currentBlockType = 'text';
|
|
65
|
+
currentToolCallId = null;
|
|
66
|
+
currentToolName = null;
|
|
67
|
+
return emitContentBlockStart({ type: 'text', text: '' });
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
const startToolBlock = (toolCall) => {
|
|
71
|
+
currentBlockType = 'tool_use';
|
|
72
|
+
currentToolCallId = toolCall.id;
|
|
73
|
+
currentToolName = toolCall.function?.name || 'tool';
|
|
74
|
+
stopReason = 'tool_use';
|
|
75
|
+
return emitContentBlockStart({
|
|
76
|
+
type: 'tool_use',
|
|
77
|
+
id: currentToolCallId,
|
|
78
|
+
name: currentToolName,
|
|
79
|
+
input: {}
|
|
80
|
+
});
|
|
81
|
+
};
|
|
82
|
+
|
|
83
|
+
const handleDelta = (delta) => {
|
|
84
|
+
const events = [];
|
|
85
|
+
|
|
86
|
+
if (delta.content) {
|
|
87
|
+
if (!hasEmittedStart) {
|
|
88
|
+
hasEmittedStart = true;
|
|
89
|
+
events.push(emitMessageStart());
|
|
90
|
+
events.push(startTextBlock());
|
|
91
|
+
} else if (currentBlockType !== 'text') {
|
|
92
|
+
events.push(emitContentBlockStop());
|
|
93
|
+
blockIndex++;
|
|
94
|
+
events.push(startTextBlock());
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
events.push(emitContentBlockDelta({ type: 'text_delta', text: delta.content }));
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
if (Array.isArray(delta.tool_calls)) {
|
|
101
|
+
for (const toolCall of delta.tool_calls) {
|
|
102
|
+
if (!hasEmittedStart) {
|
|
103
|
+
hasEmittedStart = true;
|
|
104
|
+
events.push(emitMessageStart());
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
if (currentBlockType !== 'tool_use' || currentToolCallId !== toolCall.id) {
|
|
108
|
+
if (currentBlockType) {
|
|
109
|
+
events.push(emitContentBlockStop());
|
|
110
|
+
blockIndex++;
|
|
111
|
+
}
|
|
112
|
+
events.push(startToolBlock(toolCall));
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const argsDelta = toolCall.function?.arguments || '';
|
|
116
|
+
if (argsDelta) {
|
|
117
|
+
const prev = pendingToolArgs.get(toolCall.id) || '';
|
|
118
|
+
pendingToolArgs.set(toolCall.id, prev + argsDelta);
|
|
119
|
+
events.push(emitContentBlockDelta({
|
|
120
|
+
type: 'input_json_delta',
|
|
121
|
+
partial_json: argsDelta
|
|
122
|
+
}));
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return events;
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
while (true) {
|
|
131
|
+
const { done, value } = await reader.read();
|
|
132
|
+
if (done) break;
|
|
133
|
+
|
|
134
|
+
buffer += decoder.decode(value, { stream: true });
|
|
135
|
+
const lines = buffer.split('\n');
|
|
136
|
+
buffer = lines.pop() || '';
|
|
137
|
+
|
|
138
|
+
for (const line of lines) {
|
|
139
|
+
if (!line.startsWith('data:')) continue;
|
|
140
|
+
const jsonText = line.slice(5).trim();
|
|
141
|
+
if (!jsonText) continue;
|
|
142
|
+
if (jsonText === '[DONE]') continue;
|
|
143
|
+
|
|
144
|
+
try {
|
|
145
|
+
const chunk = JSON.parse(jsonText);
|
|
146
|
+
|
|
147
|
+
if (chunk.usage) {
|
|
148
|
+
usage = {
|
|
149
|
+
input_tokens: chunk.usage.prompt_tokens || 0,
|
|
150
|
+
output_tokens: chunk.usage.completion_tokens || 0
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
const choice = chunk.choices?.[0];
|
|
155
|
+
if (!choice) continue;
|
|
156
|
+
|
|
157
|
+
const events = handleDelta(choice.delta || {});
|
|
158
|
+
for (const evt of events) {
|
|
159
|
+
yield evt;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
if (choice.finish_reason) {
|
|
163
|
+
stopReason = choice.finish_reason === 'tool_calls' ? 'tool_use' : 'end_turn';
|
|
164
|
+
}
|
|
165
|
+
} catch (err) {
|
|
166
|
+
// ignore malformed chunks
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
if (!hasEmittedStart) {
|
|
172
|
+
hasEmittedStart = true;
|
|
173
|
+
yield emitMessageStart();
|
|
174
|
+
yield emitContentBlockStart({ type: 'text', text: '' });
|
|
175
|
+
yield emitContentBlockDelta({ type: 'text_delta', text: '' });
|
|
176
|
+
yield emitContentBlockStop();
|
|
177
|
+
} else if (currentBlockType) {
|
|
178
|
+
yield emitContentBlockStop();
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
yield {
|
|
182
|
+
event: 'message_delta',
|
|
183
|
+
data: {
|
|
184
|
+
type: 'message_delta',
|
|
185
|
+
delta: { stop_reason: stopReason, stop_sequence: null },
|
|
186
|
+
usage
|
|
187
|
+
}
|
|
188
|
+
};
|
|
189
|
+
|
|
190
|
+
yield {
|
|
191
|
+
event: 'message_stop',
|
|
192
|
+
data: { type: 'message_stop' }
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
export default {
|
|
197
|
+
streamOpenAIChat
|
|
198
|
+
};
|
package/src/model-api.js
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model API for ChatGPT Codex
|
|
3
|
+
* Handles model listing and quota retrieval from ChatGPT backend API.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const CHATGPT_API_BASE = 'https://chatgpt.com/backend-api';
|
|
7
|
+
const CLIENT_VERSION = '0.100.0';
|
|
8
|
+
|
|
9
|
+
const MODEL_CACHE = {
|
|
10
|
+
models: null,
|
|
11
|
+
lastFetched: 0,
|
|
12
|
+
ttlMs: 5 * 60 * 1000
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
export async function fetchModels(accessToken, accountId) {
|
|
16
|
+
const now = Date.now();
|
|
17
|
+
|
|
18
|
+
if (MODEL_CACHE.models && (now - MODEL_CACHE.lastFetched) < MODEL_CACHE.ttlMs) {
|
|
19
|
+
return MODEL_CACHE.models;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
const url = `${CHATGPT_API_BASE}/codex/models?client_version=${CLIENT_VERSION}`;
|
|
23
|
+
|
|
24
|
+
const response = await fetch(url, {
|
|
25
|
+
method: 'GET',
|
|
26
|
+
headers: {
|
|
27
|
+
'Authorization': `Bearer ${accessToken}`,
|
|
28
|
+
'ChatGPT-Account-ID': accountId,
|
|
29
|
+
'Accept': 'application/json'
|
|
30
|
+
}
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
if (!response.ok) {
|
|
34
|
+
const errorText = await response.text();
|
|
35
|
+
throw new Error(`Failed to fetch models: ${response.status} - ${errorText}`);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const data = await response.json();
|
|
39
|
+
|
|
40
|
+
const models = (data.models || []).map(m => ({
|
|
41
|
+
id: m.slug,
|
|
42
|
+
name: m.display_name || m.slug,
|
|
43
|
+
description: m.description || '',
|
|
44
|
+
defaultReasoningLevel: m.default_reasoning_level || 'medium',
|
|
45
|
+
supportedReasoningLevels: m.supported_reasoning_levels || [],
|
|
46
|
+
supportedInApi: m.supported_in_api || false,
|
|
47
|
+
visibility: m.visibility || 'list'
|
|
48
|
+
}));
|
|
49
|
+
|
|
50
|
+
MODEL_CACHE.models = models;
|
|
51
|
+
MODEL_CACHE.lastFetched = now;
|
|
52
|
+
|
|
53
|
+
return models;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export async function fetchUsage(accessToken, accountId) {
|
|
57
|
+
const url = `${CHATGPT_API_BASE}/wham/usage`;
|
|
58
|
+
|
|
59
|
+
const response = await fetch(url, {
|
|
60
|
+
method: 'GET',
|
|
61
|
+
headers: {
|
|
62
|
+
'Authorization': `Bearer ${accessToken}`,
|
|
63
|
+
'ChatGPT-Account-ID': accountId,
|
|
64
|
+
'Accept': 'application/json'
|
|
65
|
+
}
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
if (!response.ok) {
|
|
69
|
+
const errorText = await response.text();
|
|
70
|
+
throw new Error(`Failed to fetch usage: ${response.status} - ${errorText}`);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const data = await response.json();
|
|
74
|
+
|
|
75
|
+
const primaryWindow = data.rate_limit?.primary_window || {};
|
|
76
|
+
const usedPercentRaw = Number(primaryWindow?.used_percent);
|
|
77
|
+
const usedPercent = Number.isFinite(usedPercentRaw) ? usedPercentRaw : 0;
|
|
78
|
+
|
|
79
|
+
const limitWindowSecondsRaw = Number(primaryWindow?.limit_window_seconds);
|
|
80
|
+
const limitWindowSeconds = Number.isFinite(limitWindowSecondsRaw) ? limitWindowSecondsRaw : null;
|
|
81
|
+
|
|
82
|
+
const resetAfterSecondsRaw = Number(primaryWindow?.reset_after_seconds);
|
|
83
|
+
const resetAfterSeconds = Number.isFinite(resetAfterSecondsRaw) ? resetAfterSecondsRaw : null;
|
|
84
|
+
|
|
85
|
+
const resetAtEpoch = Number(primaryWindow?.reset_at);
|
|
86
|
+
const resetAt = Number.isFinite(resetAtEpoch) ? new Date(resetAtEpoch * 1000).toISOString() : null;
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
totalTokenUsage: usedPercent,
|
|
90
|
+
limit: 100,
|
|
91
|
+
remaining: 100 - usedPercent,
|
|
92
|
+
percentage: usedPercent,
|
|
93
|
+
resetAt: resetAt,
|
|
94
|
+
resetAfterSeconds: resetAfterSeconds,
|
|
95
|
+
limitWindowSeconds: limitWindowSeconds,
|
|
96
|
+
planType: data.plan_type || null,
|
|
97
|
+
limitReached: data.rate_limit?.limit_reached || false,
|
|
98
|
+
allowed: data.rate_limit?.allowed ?? true,
|
|
99
|
+
raw: data
|
|
100
|
+
};
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
export async function fetchAccountCheck(accessToken, accountId) {
|
|
104
|
+
const url = `${CHATGPT_API_BASE}/wham/accounts/check`;
|
|
105
|
+
|
|
106
|
+
const response = await fetch(url, {
|
|
107
|
+
method: 'GET',
|
|
108
|
+
headers: {
|
|
109
|
+
'Authorization': `Bearer ${accessToken}`,
|
|
110
|
+
'ChatGPT-Account-ID': accountId,
|
|
111
|
+
'Accept': 'application/json'
|
|
112
|
+
}
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
if (!response.ok) {
|
|
116
|
+
const errorText = await response.text();
|
|
117
|
+
throw new Error(`Failed to fetch account check: ${response.status} - ${errorText}`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return await response.json();
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export async function getAccountQuota(accessToken, accountId) {
|
|
124
|
+
try {
|
|
125
|
+
const [usage, accountCheck] = await Promise.allSettled([
|
|
126
|
+
fetchUsage(accessToken, accountId),
|
|
127
|
+
fetchAccountCheck(accessToken, accountId)
|
|
128
|
+
]);
|
|
129
|
+
|
|
130
|
+
const quotaInfo = {
|
|
131
|
+
usage: usage.status === 'fulfilled' ? usage.value : null,
|
|
132
|
+
account: accountCheck.status === 'fulfilled' ? accountCheck.value : null,
|
|
133
|
+
fetchedAt: new Date().toISOString()
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
if (usage.status === 'rejected') {
|
|
137
|
+
quotaInfo.usageError = usage.reason?.message || 'Unknown error';
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (accountCheck.status === 'rejected') {
|
|
141
|
+
quotaInfo.accountError = accountCheck.reason?.message || 'Unknown error';
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return quotaInfo;
|
|
145
|
+
} catch (error) {
|
|
146
|
+
return {
|
|
147
|
+
usage: null,
|
|
148
|
+
account: null,
|
|
149
|
+
error: error.message,
|
|
150
|
+
fetchedAt: new Date().toISOString()
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
export async function getModelsAndQuota(accessToken, accountId) {
|
|
156
|
+
try {
|
|
157
|
+
const [models, quota] = await Promise.all([
|
|
158
|
+
fetchModels(accessToken, accountId),
|
|
159
|
+
getAccountQuota(accessToken, accountId)
|
|
160
|
+
]);
|
|
161
|
+
|
|
162
|
+
return {
|
|
163
|
+
models,
|
|
164
|
+
quota,
|
|
165
|
+
success: true
|
|
166
|
+
};
|
|
167
|
+
} catch (error) {
|
|
168
|
+
return {
|
|
169
|
+
models: null,
|
|
170
|
+
quota: null,
|
|
171
|
+
error: error.message,
|
|
172
|
+
success: false
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
export function clearModelCache() {
|
|
178
|
+
MODEL_CACHE.models = null;
|
|
179
|
+
MODEL_CACHE.lastFetched = 0;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
export default {
|
|
183
|
+
fetchModels,
|
|
184
|
+
fetchUsage,
|
|
185
|
+
fetchAccountCheck,
|
|
186
|
+
getAccountQuota,
|
|
187
|
+
getModelsAndQuota,
|
|
188
|
+
clearModelCache
|
|
189
|
+
};
|