codemaxxing 1.0.16 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +72 -21
- package/dist/agent.d.ts +12 -1
- package/dist/agent.js +296 -31
- package/dist/index.js +256 -1303
- package/dist/ui/banner.d.ts +12 -0
- package/dist/ui/banner.js +28 -0
- package/dist/ui/connection-types.d.ts +33 -0
- package/dist/ui/connection-types.js +1 -0
- package/dist/ui/connection.d.ts +11 -0
- package/dist/ui/connection.js +182 -0
- package/dist/ui/input-router.d.ts +176 -0
- package/dist/ui/input-router.js +710 -0
- package/dist/ui/paste-interceptor.d.ts +21 -0
- package/dist/ui/paste-interceptor.js +179 -0
- package/dist/ui/pickers.d.ts +171 -0
- package/dist/ui/pickers.js +120 -0
- package/dist/ui/status-bar.d.ts +8 -0
- package/dist/ui/status-bar.js +15 -0
- package/dist/ui/wizard-types.d.ts +27 -0
- package/dist/ui/wizard-types.js +1 -0
- package/dist/ui/wizard.d.ts +3 -0
- package/dist/ui/wizard.js +214 -0
- package/dist/utils/anthropic-oauth.d.ts +13 -0
- package/dist/utils/anthropic-oauth.js +171 -0
- package/dist/utils/auth.d.ts +2 -0
- package/dist/utils/auth.js +42 -3
- package/dist/utils/ollama.js +6 -1
- package/dist/utils/openai-oauth.d.ts +19 -0
- package/dist/utils/openai-oauth.js +233 -0
- package/dist/utils/responses-api.d.ts +40 -0
- package/dist/utils/responses-api.js +264 -0
- package/package.json +2 -2
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Codex OAuth PKCE flow
|
|
3
|
+
*
|
|
4
|
+
* Lets users log in with their ChatGPT Plus/Pro subscription (no API key needed).
|
|
5
|
+
* Uses the same OAuth flow as OpenAI's Codex CLI.
|
|
6
|
+
*/
|
|
7
|
+
import { createServer } from "http";
|
|
8
|
+
import { randomBytes, createHash } from "crypto";
|
|
9
|
+
import { exec } from "child_process";
|
|
10
|
+
import { readFileSync, readdirSync, existsSync } from "fs";
|
|
11
|
+
import { homedir } from "os";
|
|
12
|
+
import { join } from "path";
|
|
13
|
+
import { saveCredential } from "./auth.js";
|
|
14
|
+
// ── Constants ──
|
|
15
|
+
const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
|
|
16
|
+
const AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize";
|
|
17
|
+
const TOKEN_URL = "https://auth.openai.com/oauth/token";
|
|
18
|
+
const REDIRECT_URI = "http://localhost:1455/auth/callback";
|
|
19
|
+
const SCOPE = "openid profile email offline_access";
|
|
20
|
+
// ── PKCE helpers ──
|
|
21
|
+
function base64url(buf) {
|
|
22
|
+
return buf.toString("base64").replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, "");
|
|
23
|
+
}
|
|
24
|
+
function generatePKCE() {
|
|
25
|
+
const verifier = base64url(randomBytes(32));
|
|
26
|
+
const challenge = base64url(createHash("sha256").update(verifier).digest());
|
|
27
|
+
return { verifier, challenge };
|
|
28
|
+
}
|
|
29
|
+
// ── JWT decode (no verification — just extract payload) ──
|
|
30
|
+
function decodeJwtPayload(token) {
|
|
31
|
+
const parts = token.split(".");
|
|
32
|
+
if (parts.length < 2)
|
|
33
|
+
return {};
|
|
34
|
+
const payload = parts[1].replace(/-/g, "+").replace(/_/g, "/");
|
|
35
|
+
return JSON.parse(Buffer.from(payload, "base64").toString("utf-8"));
|
|
36
|
+
}
|
|
37
|
+
// ── Browser opener ──
|
|
38
|
+
function openBrowser(url) {
|
|
39
|
+
const cmd = process.platform === "darwin"
|
|
40
|
+
? `open "${url}"`
|
|
41
|
+
: process.platform === "win32"
|
|
42
|
+
? `start "" "${url}"`
|
|
43
|
+
: `xdg-open "${url}"`;
|
|
44
|
+
exec(cmd);
|
|
45
|
+
}
|
|
46
|
+
// ── Detect existing OpenClaw auth-profiles ──
|
|
47
|
+
export function detectOpenAICodexOAuth() {
|
|
48
|
+
const openclawBase = join(homedir(), ".openclaw", "agents");
|
|
49
|
+
if (!existsSync(openclawBase))
|
|
50
|
+
return null;
|
|
51
|
+
try {
|
|
52
|
+
const agents = readdirSync(openclawBase);
|
|
53
|
+
for (const agent of agents) {
|
|
54
|
+
const profilePath = join(openclawBase, agent, "agent", "auth-profiles.json");
|
|
55
|
+
if (!existsSync(profilePath))
|
|
56
|
+
continue;
|
|
57
|
+
try {
|
|
58
|
+
const data = JSON.parse(readFileSync(profilePath, "utf-8"));
|
|
59
|
+
// auth-profiles.json has { profiles: { "openai-codex:default": { ... }, ... } }
|
|
60
|
+
const profileEntries = data?.profiles ? Object.values(data.profiles) : (Array.isArray(data) ? data : Object.values(data));
|
|
61
|
+
for (const profile of profileEntries) {
|
|
62
|
+
if (profile?.provider === "openai-codex" && profile.access) {
|
|
63
|
+
const p = profile;
|
|
64
|
+
return {
|
|
65
|
+
access: p.access,
|
|
66
|
+
refresh: p.refresh ?? "",
|
|
67
|
+
expires: p.expires ?? 0,
|
|
68
|
+
accountId: p.accountId ?? "",
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
catch {
|
|
74
|
+
continue;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
catch {
|
|
79
|
+
// ignore
|
|
80
|
+
}
|
|
81
|
+
return null;
|
|
82
|
+
}
|
|
83
|
+
// ── Token refresh ──
|
|
84
|
+
export async function refreshOpenAICodexToken(refreshToken) {
|
|
85
|
+
const res = await fetch(TOKEN_URL, {
|
|
86
|
+
method: "POST",
|
|
87
|
+
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
|
88
|
+
body: new URLSearchParams({
|
|
89
|
+
grant_type: "refresh_token",
|
|
90
|
+
client_id: CLIENT_ID,
|
|
91
|
+
refresh_token: refreshToken,
|
|
92
|
+
}).toString(),
|
|
93
|
+
});
|
|
94
|
+
if (!res.ok) {
|
|
95
|
+
const errText = await res.text();
|
|
96
|
+
throw new Error(`Token refresh failed (${res.status}): ${errText}`);
|
|
97
|
+
}
|
|
98
|
+
const data = (await res.json());
|
|
99
|
+
return {
|
|
100
|
+
access: data.access_token,
|
|
101
|
+
refresh: data.refresh_token ?? refreshToken,
|
|
102
|
+
expires: Date.now() + data.expires_in * 1000,
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
// ── Main OAuth login flow ──
|
|
106
|
+
export async function loginOpenAICodexOAuth(onStatus) {
|
|
107
|
+
const { verifier, challenge } = generatePKCE();
|
|
108
|
+
const state = randomBytes(16).toString("hex");
|
|
109
|
+
return new Promise((resolve, reject) => {
|
|
110
|
+
const server = createServer(async (req, res) => {
|
|
111
|
+
const url = new URL(req.url ?? "/", "http://localhost");
|
|
112
|
+
if (url.pathname !== "/auth/callback") {
|
|
113
|
+
res.writeHead(404);
|
|
114
|
+
res.end("Not found");
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
const code = url.searchParams.get("code");
|
|
118
|
+
const returnedState = url.searchParams.get("state");
|
|
119
|
+
if (!code) {
|
|
120
|
+
res.writeHead(400, { "Content-Type": "text/html" });
|
|
121
|
+
res.end("<h1>Error: No authorization code received</h1><p>Please try again.</p>");
|
|
122
|
+
server.close();
|
|
123
|
+
reject(new Error("No authorization code received"));
|
|
124
|
+
return;
|
|
125
|
+
}
|
|
126
|
+
if (returnedState !== state) {
|
|
127
|
+
res.writeHead(400, { "Content-Type": "text/html" });
|
|
128
|
+
res.end("<h1>Error: State mismatch</h1><p>Please try again.</p>");
|
|
129
|
+
server.close();
|
|
130
|
+
reject(new Error("OAuth state mismatch"));
|
|
131
|
+
return;
|
|
132
|
+
}
|
|
133
|
+
onStatus?.("Exchanging code for tokens...");
|
|
134
|
+
try {
|
|
135
|
+
const tokenRes = await fetch(TOKEN_URL, {
|
|
136
|
+
method: "POST",
|
|
137
|
+
headers: { "Content-Type": "application/x-www-form-urlencoded" },
|
|
138
|
+
body: new URLSearchParams({
|
|
139
|
+
grant_type: "authorization_code",
|
|
140
|
+
client_id: CLIENT_ID,
|
|
141
|
+
code,
|
|
142
|
+
code_verifier: verifier,
|
|
143
|
+
redirect_uri: REDIRECT_URI,
|
|
144
|
+
}).toString(),
|
|
145
|
+
});
|
|
146
|
+
if (!tokenRes.ok) {
|
|
147
|
+
const errText = await tokenRes.text();
|
|
148
|
+
throw new Error(`Token exchange failed (${tokenRes.status}): ${errText}`);
|
|
149
|
+
}
|
|
150
|
+
const tokenData = (await tokenRes.json());
|
|
151
|
+
// Extract accountId from JWT
|
|
152
|
+
let accountId = "";
|
|
153
|
+
try {
|
|
154
|
+
const payload = decodeJwtPayload(tokenData.access_token);
|
|
155
|
+
const authClaim = payload["https://api.openai.com/auth"];
|
|
156
|
+
if (authClaim?.chatgpt_account_id) {
|
|
157
|
+
accountId = authClaim.chatgpt_account_id;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
catch {
|
|
161
|
+
// non-fatal — accountId is optional
|
|
162
|
+
}
|
|
163
|
+
res.writeHead(200, { "Content-Type": "text/html" });
|
|
164
|
+
res.end(`
|
|
165
|
+
<html>
|
|
166
|
+
<body style="font-family: monospace; background: #1a1a2e; color: #0ff; display: flex; justify-content: center; align-items: center; height: 100vh; margin: 0;">
|
|
167
|
+
<div style="text-align: center;">
|
|
168
|
+
<h1>Authenticated!</h1>
|
|
169
|
+
<p>You can close this tab and return to Codemaxxing.</p>
|
|
170
|
+
</div>
|
|
171
|
+
</body>
|
|
172
|
+
</html>
|
|
173
|
+
`);
|
|
174
|
+
server.close();
|
|
175
|
+
const expiresAt = Date.now() + tokenData.expires_in * 1000;
|
|
176
|
+
const cred = {
|
|
177
|
+
provider: "openai",
|
|
178
|
+
method: "oauth",
|
|
179
|
+
apiKey: tokenData.access_token,
|
|
180
|
+
baseUrl: "https://chatgpt.com/backend-api",
|
|
181
|
+
label: "OpenAI (ChatGPT subscription)",
|
|
182
|
+
refreshToken: tokenData.refresh_token,
|
|
183
|
+
oauthExpires: expiresAt,
|
|
184
|
+
createdAt: new Date().toISOString(),
|
|
185
|
+
};
|
|
186
|
+
saveCredential(cred);
|
|
187
|
+
resolve(cred);
|
|
188
|
+
}
|
|
189
|
+
catch (err) {
|
|
190
|
+
res.writeHead(500, { "Content-Type": "text/html" });
|
|
191
|
+
res.end(`<h1>Error</h1><p>${err.message}</p>`);
|
|
192
|
+
server.close();
|
|
193
|
+
reject(err);
|
|
194
|
+
}
|
|
195
|
+
});
|
|
196
|
+
server.listen(1455, "127.0.0.1", () => {
|
|
197
|
+
const params = new URLSearchParams({
|
|
198
|
+
response_type: "code",
|
|
199
|
+
client_id: CLIENT_ID,
|
|
200
|
+
redirect_uri: REDIRECT_URI,
|
|
201
|
+
scope: SCOPE,
|
|
202
|
+
code_challenge: challenge,
|
|
203
|
+
code_challenge_method: "S256",
|
|
204
|
+
state,
|
|
205
|
+
id_token_add_organizations: "true",
|
|
206
|
+
codex_cli_simplified_flow: "true",
|
|
207
|
+
originator: "codemaxxing",
|
|
208
|
+
});
|
|
209
|
+
const authUrl = `${AUTHORIZE_URL}?${params.toString()}`;
|
|
210
|
+
onStatus?.("Opening browser for ChatGPT login...");
|
|
211
|
+
try {
|
|
212
|
+
openBrowser(authUrl);
|
|
213
|
+
}
|
|
214
|
+
catch {
|
|
215
|
+
onStatus?.(`Could not open browser. Please visit:\n${authUrl}`);
|
|
216
|
+
}
|
|
217
|
+
onStatus?.("Waiting for authorization...");
|
|
218
|
+
// Timeout after 60 seconds
|
|
219
|
+
setTimeout(() => {
|
|
220
|
+
server.close();
|
|
221
|
+
reject(new Error("OAuth timed out after 60 seconds"));
|
|
222
|
+
}, 60 * 1000);
|
|
223
|
+
});
|
|
224
|
+
server.on("error", (err) => {
|
|
225
|
+
if (err.code === "EADDRINUSE") {
|
|
226
|
+
reject(new Error("Port 1455 is already in use. Close other auth flows and try again."));
|
|
227
|
+
}
|
|
228
|
+
else {
|
|
229
|
+
reject(err);
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
});
|
|
233
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Codex Responses API handler
|
|
3
|
+
*
|
|
4
|
+
* Uses the ChatGPT backend endpoint (https://chatgpt.com/backend-api/codex/responses)
|
|
5
|
+
* which is what Codex CLI, OpenClaw, and other tools use with ChatGPT Plus OAuth tokens.
|
|
6
|
+
*
|
|
7
|
+
* This endpoint supports the Responses API format but is separate from api.openai.com.
|
|
8
|
+
* Standard API keys use api.openai.com/v1/responses; Codex OAuth tokens use this.
|
|
9
|
+
*/
|
|
10
|
+
export interface ResponsesAPIOptions {
|
|
11
|
+
baseUrl: string;
|
|
12
|
+
apiKey: string;
|
|
13
|
+
model: string;
|
|
14
|
+
maxTokens: number;
|
|
15
|
+
systemPrompt: string;
|
|
16
|
+
messages: any[];
|
|
17
|
+
tools: any[];
|
|
18
|
+
onToken?: (token: string) => void;
|
|
19
|
+
onToolCall?: (name: string, args: Record<string, unknown>) => void;
|
|
20
|
+
}
|
|
21
|
+
interface ToolCall {
|
|
22
|
+
id: string;
|
|
23
|
+
name: string;
|
|
24
|
+
input: Record<string, unknown>;
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Execute a chat request using the Codex Responses API endpoint
|
|
28
|
+
* Streams text + handles tool calls
|
|
29
|
+
*/
|
|
30
|
+
export declare function chatWithResponsesAPI(options: ResponsesAPIOptions): Promise<{
|
|
31
|
+
contentText: string;
|
|
32
|
+
toolCalls: ToolCall[];
|
|
33
|
+
promptTokens: number;
|
|
34
|
+
completionTokens: number;
|
|
35
|
+
}>;
|
|
36
|
+
/**
|
|
37
|
+
* Determine if a model should use the Responses API
|
|
38
|
+
*/
|
|
39
|
+
export declare function shouldUseResponsesAPI(model: string): boolean;
|
|
40
|
+
export {};
|
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Codex Responses API handler
|
|
3
|
+
*
|
|
4
|
+
* Uses the ChatGPT backend endpoint (https://chatgpt.com/backend-api/codex/responses)
|
|
5
|
+
* which is what Codex CLI, OpenClaw, and other tools use with ChatGPT Plus OAuth tokens.
|
|
6
|
+
*
|
|
7
|
+
* This endpoint supports the Responses API format but is separate from api.openai.com.
|
|
8
|
+
* Standard API keys use api.openai.com/v1/responses; Codex OAuth tokens use this.
|
|
9
|
+
*/
|
|
10
|
+
/**
|
|
11
|
+
* Execute a chat request using the Codex Responses API endpoint
|
|
12
|
+
* Streams text + handles tool calls
|
|
13
|
+
*/
|
|
14
|
+
export async function chatWithResponsesAPI(options) {
|
|
15
|
+
const { baseUrl, apiKey, model, maxTokens, systemPrompt, messages, tools, onToken, onToolCall, } = options;
|
|
16
|
+
// Build input items from message history
|
|
17
|
+
const inputItems = [];
|
|
18
|
+
for (const msg of messages) {
|
|
19
|
+
if (msg.role === "system")
|
|
20
|
+
continue;
|
|
21
|
+
if (msg.role === "user") {
|
|
22
|
+
inputItems.push({
|
|
23
|
+
type: "message",
|
|
24
|
+
role: "user",
|
|
25
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
else if (msg.role === "assistant") {
|
|
29
|
+
if (msg.tool_calls?.length > 0) {
|
|
30
|
+
if (msg.content) {
|
|
31
|
+
inputItems.push({
|
|
32
|
+
type: "message",
|
|
33
|
+
role: "assistant",
|
|
34
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
for (const tc of msg.tool_calls) {
|
|
38
|
+
inputItems.push({
|
|
39
|
+
type: "function_call",
|
|
40
|
+
id: tc.id,
|
|
41
|
+
name: tc.function?.name || tc.name || "",
|
|
42
|
+
arguments: typeof tc.function?.arguments === "string"
|
|
43
|
+
? tc.function.arguments
|
|
44
|
+
: JSON.stringify(tc.function?.arguments || tc.input || {}),
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
else {
|
|
49
|
+
inputItems.push({
|
|
50
|
+
type: "message",
|
|
51
|
+
role: "assistant",
|
|
52
|
+
content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
else if (msg.role === "tool") {
|
|
57
|
+
inputItems.push({
|
|
58
|
+
type: "function_call_output",
|
|
59
|
+
call_id: msg.tool_call_id || "",
|
|
60
|
+
output: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
// Build tools in Responses API format
|
|
65
|
+
const responseTools = tools
|
|
66
|
+
.filter((t) => t.type === "function")
|
|
67
|
+
.map((t) => ({
|
|
68
|
+
type: "function",
|
|
69
|
+
name: t.function?.name || "",
|
|
70
|
+
description: t.function?.description || "",
|
|
71
|
+
parameters: t.function?.parameters || { type: "object", properties: {} },
|
|
72
|
+
}));
|
|
73
|
+
// Determine the endpoint URL
|
|
74
|
+
// OAuth tokens (JWTs, not sk- keys) must use ChatGPT backend
|
|
75
|
+
const isOAuthToken = !apiKey.startsWith("sk-") && !apiKey.startsWith("sess-");
|
|
76
|
+
let effectiveBaseUrl = baseUrl;
|
|
77
|
+
if (isOAuthToken && !baseUrl.includes("chatgpt.com")) {
|
|
78
|
+
effectiveBaseUrl = "https://chatgpt.com/backend-api";
|
|
79
|
+
}
|
|
80
|
+
let endpoint;
|
|
81
|
+
if (effectiveBaseUrl.includes("chatgpt.com/backend-api")) {
|
|
82
|
+
endpoint = effectiveBaseUrl.replace(/\/$/, "") + "/codex/responses";
|
|
83
|
+
}
|
|
84
|
+
else {
|
|
85
|
+
endpoint = effectiveBaseUrl.replace(/\/$/, "") + "/responses";
|
|
86
|
+
}
|
|
87
|
+
// Build request body
|
|
88
|
+
const body = {
|
|
89
|
+
model,
|
|
90
|
+
instructions: systemPrompt,
|
|
91
|
+
input: inputItems.length > 0 ? inputItems : "",
|
|
92
|
+
stream: true,
|
|
93
|
+
store: false,
|
|
94
|
+
};
|
|
95
|
+
if (responseTools.length > 0) {
|
|
96
|
+
body.tools = responseTools;
|
|
97
|
+
}
|
|
98
|
+
// Make the streaming request
|
|
99
|
+
const response = await fetch(endpoint, {
|
|
100
|
+
method: "POST",
|
|
101
|
+
headers: {
|
|
102
|
+
"Content-Type": "application/json",
|
|
103
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
104
|
+
"User-Agent": "codemaxxing/1.0",
|
|
105
|
+
},
|
|
106
|
+
body: JSON.stringify(body),
|
|
107
|
+
});
|
|
108
|
+
if (!response.ok) {
|
|
109
|
+
const errText = await response.text();
|
|
110
|
+
throw new Error(`Responses API error (${response.status}): ${errText}`);
|
|
111
|
+
}
|
|
112
|
+
// Parse SSE stream
|
|
113
|
+
let contentText = "";
|
|
114
|
+
let promptTokens = 0;
|
|
115
|
+
let completionTokens = 0;
|
|
116
|
+
const toolCalls = [];
|
|
117
|
+
let currentToolCallId = "";
|
|
118
|
+
let currentToolCallName = "";
|
|
119
|
+
let toolArgumentsBuffer = "";
|
|
120
|
+
const reader = response.body?.getReader();
|
|
121
|
+
if (!reader)
|
|
122
|
+
throw new Error("No response body");
|
|
123
|
+
const decoder = new TextDecoder();
|
|
124
|
+
let buffer = "";
|
|
125
|
+
while (true) {
|
|
126
|
+
const { done, value } = await reader.read();
|
|
127
|
+
if (done)
|
|
128
|
+
break;
|
|
129
|
+
buffer += decoder.decode(value, { stream: true });
|
|
130
|
+
// Process complete SSE events
|
|
131
|
+
const lines = buffer.split("\n");
|
|
132
|
+
buffer = lines.pop() || ""; // Keep incomplete line in buffer
|
|
133
|
+
for (const line of lines) {
|
|
134
|
+
if (!line.startsWith("data: "))
|
|
135
|
+
continue;
|
|
136
|
+
const data = line.slice(6).trim();
|
|
137
|
+
if (data === "[DONE]")
|
|
138
|
+
continue;
|
|
139
|
+
let event;
|
|
140
|
+
try {
|
|
141
|
+
event = JSON.parse(data);
|
|
142
|
+
}
|
|
143
|
+
catch {
|
|
144
|
+
continue;
|
|
145
|
+
}
|
|
146
|
+
const eventType = event.type;
|
|
147
|
+
// Text content delta
|
|
148
|
+
if (eventType === "response.output_text.delta") {
|
|
149
|
+
const delta = event.delta;
|
|
150
|
+
if (delta) {
|
|
151
|
+
contentText += delta;
|
|
152
|
+
onToken?.(delta);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
// Also handle the simpler delta format
|
|
156
|
+
if (eventType === "response.text_delta") {
|
|
157
|
+
const delta = event.delta;
|
|
158
|
+
if (delta) {
|
|
159
|
+
contentText += delta;
|
|
160
|
+
onToken?.(delta);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
// Function call item added
|
|
164
|
+
if (eventType === "response.output_item.added") {
|
|
165
|
+
const item = event.item;
|
|
166
|
+
if (item?.type === "function_call") {
|
|
167
|
+
currentToolCallId = item.id || item.call_id || `tool_${Date.now()}`;
|
|
168
|
+
currentToolCallName = item.name || "";
|
|
169
|
+
toolArgumentsBuffer = "";
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
// Function call arguments streaming
|
|
173
|
+
if (eventType === "response.function_call_arguments.delta") {
|
|
174
|
+
const delta = event.delta;
|
|
175
|
+
if (delta) {
|
|
176
|
+
toolArgumentsBuffer += delta;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
// Function call arguments done
|
|
180
|
+
if (eventType === "response.function_call_arguments.done") {
|
|
181
|
+
try {
|
|
182
|
+
const args = JSON.parse(event.arguments || toolArgumentsBuffer);
|
|
183
|
+
toolCalls.push({
|
|
184
|
+
id: currentToolCallId,
|
|
185
|
+
name: currentToolCallName,
|
|
186
|
+
input: args,
|
|
187
|
+
});
|
|
188
|
+
onToolCall?.(currentToolCallName, args);
|
|
189
|
+
}
|
|
190
|
+
catch {
|
|
191
|
+
// Try buffer if event.arguments isn't set
|
|
192
|
+
try {
|
|
193
|
+
const args = JSON.parse(toolArgumentsBuffer);
|
|
194
|
+
toolCalls.push({
|
|
195
|
+
id: currentToolCallId,
|
|
196
|
+
name: currentToolCallName,
|
|
197
|
+
input: args,
|
|
198
|
+
});
|
|
199
|
+
onToolCall?.(currentToolCallName, args);
|
|
200
|
+
}
|
|
201
|
+
catch {
|
|
202
|
+
// Skip malformed tool call
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
toolArgumentsBuffer = "";
|
|
206
|
+
}
|
|
207
|
+
// Output item done (alternative tool call completion)
|
|
208
|
+
if (eventType === "response.output_item.done") {
|
|
209
|
+
const item = event.item;
|
|
210
|
+
if (item?.type === "function_call" && item.arguments) {
|
|
211
|
+
// Check if we already captured this from arguments.done
|
|
212
|
+
const alreadyCaptured = toolCalls.some(tc => tc.id === (item.id || item.call_id));
|
|
213
|
+
if (!alreadyCaptured) {
|
|
214
|
+
try {
|
|
215
|
+
const args = JSON.parse(item.arguments);
|
|
216
|
+
toolCalls.push({
|
|
217
|
+
id: item.id || item.call_id || currentToolCallId,
|
|
218
|
+
name: item.name || currentToolCallName,
|
|
219
|
+
input: args,
|
|
220
|
+
});
|
|
221
|
+
onToolCall?.(item.name || currentToolCallName, args);
|
|
222
|
+
}
|
|
223
|
+
catch {
|
|
224
|
+
// Skip
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
// Response completed — extract usage
|
|
230
|
+
if (eventType === "response.completed") {
|
|
231
|
+
const resp = event.response;
|
|
232
|
+
const usage = resp?.usage || event.usage;
|
|
233
|
+
if (usage) {
|
|
234
|
+
promptTokens = usage.input_tokens || usage.prompt_tokens || 0;
|
|
235
|
+
completionTokens = usage.output_tokens || usage.completion_tokens || 0;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
return {
|
|
241
|
+
contentText,
|
|
242
|
+
toolCalls,
|
|
243
|
+
promptTokens,
|
|
244
|
+
completionTokens,
|
|
245
|
+
};
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* Determine if a model should use the Responses API
|
|
249
|
+
*/
|
|
250
|
+
export function shouldUseResponsesAPI(model) {
|
|
251
|
+
const lower = model.toLowerCase();
|
|
252
|
+
// GPT-5.x and Codex models need Responses API for OAuth tokens
|
|
253
|
+
if (lower.startsWith("gpt-5"))
|
|
254
|
+
return true;
|
|
255
|
+
if (lower.includes("codex"))
|
|
256
|
+
return true;
|
|
257
|
+
// o-series reasoning models also work with Responses API
|
|
258
|
+
if (lower === "o3" || lower === "o3-mini" || lower === "o4-mini")
|
|
259
|
+
return true;
|
|
260
|
+
// gpt-4.1 works on both but Responses API is better for OAuth
|
|
261
|
+
if (lower.startsWith("gpt-4.1"))
|
|
262
|
+
return true;
|
|
263
|
+
return false;
|
|
264
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "codemaxxing",
|
|
3
|
-
"version": "1.0
|
|
3
|
+
"version": "1.1.0",
|
|
4
4
|
"description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"bin": {
|
|
@@ -27,7 +27,7 @@
|
|
|
27
27
|
"author": "Marcos Vallejo",
|
|
28
28
|
"license": "MIT",
|
|
29
29
|
"dependencies": {
|
|
30
|
-
"@anthropic-ai/sdk": "^0.
|
|
30
|
+
"@anthropic-ai/sdk": "^0.79.0",
|
|
31
31
|
"@modelcontextprotocol/sdk": "^1.27.1",
|
|
32
32
|
"@types/react": "^19.2.14",
|
|
33
33
|
"better-sqlite3": "^12.6.2",
|