codemaxxing 1.0.17 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,264 @@
1
+ /**
2
+ * OpenAI Codex Responses API handler
3
+ *
4
+ * Uses the ChatGPT backend endpoint (https://chatgpt.com/backend-api/codex/responses)
5
+ * which is what Codex CLI, OpenClaw, and other tools use with ChatGPT Plus OAuth tokens.
6
+ *
7
+ * This endpoint supports the Responses API format but is separate from api.openai.com.
8
+ * Standard API keys use api.openai.com/v1/responses; Codex OAuth tokens use this.
9
+ */
10
+ /**
11
+ * Execute a chat request using the Codex Responses API endpoint
12
+ * Streams text + handles tool calls
13
+ */
14
+ export async function chatWithResponsesAPI(options) {
15
+ const { baseUrl, apiKey, model, maxTokens, systemPrompt, messages, tools, onToken, onToolCall, } = options;
16
+ // Build input items from message history
17
+ const inputItems = [];
18
+ for (const msg of messages) {
19
+ if (msg.role === "system")
20
+ continue;
21
+ if (msg.role === "user") {
22
+ inputItems.push({
23
+ type: "message",
24
+ role: "user",
25
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
26
+ });
27
+ }
28
+ else if (msg.role === "assistant") {
29
+ if (msg.tool_calls?.length > 0) {
30
+ if (msg.content) {
31
+ inputItems.push({
32
+ type: "message",
33
+ role: "assistant",
34
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content),
35
+ });
36
+ }
37
+ for (const tc of msg.tool_calls) {
38
+ inputItems.push({
39
+ type: "function_call",
40
+ id: tc.id,
41
+ name: tc.function?.name || tc.name || "",
42
+ arguments: typeof tc.function?.arguments === "string"
43
+ ? tc.function.arguments
44
+ : JSON.stringify(tc.function?.arguments || tc.input || {}),
45
+ });
46
+ }
47
+ }
48
+ else {
49
+ inputItems.push({
50
+ type: "message",
51
+ role: "assistant",
52
+ content: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
53
+ });
54
+ }
55
+ }
56
+ else if (msg.role === "tool") {
57
+ inputItems.push({
58
+ type: "function_call_output",
59
+ call_id: msg.tool_call_id || "",
60
+ output: typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content) || "",
61
+ });
62
+ }
63
+ }
64
+ // Build tools in Responses API format
65
+ const responseTools = tools
66
+ .filter((t) => t.type === "function")
67
+ .map((t) => ({
68
+ type: "function",
69
+ name: t.function?.name || "",
70
+ description: t.function?.description || "",
71
+ parameters: t.function?.parameters || { type: "object", properties: {} },
72
+ }));
73
+ // Determine the endpoint URL
74
+ // OAuth tokens (JWTs, not sk- keys) must use ChatGPT backend
75
+ const isOAuthToken = !apiKey.startsWith("sk-") && !apiKey.startsWith("sess-");
76
+ let effectiveBaseUrl = baseUrl;
77
+ if (isOAuthToken && !baseUrl.includes("chatgpt.com")) {
78
+ effectiveBaseUrl = "https://chatgpt.com/backend-api";
79
+ }
80
+ let endpoint;
81
+ if (effectiveBaseUrl.includes("chatgpt.com/backend-api")) {
82
+ endpoint = effectiveBaseUrl.replace(/\/$/, "") + "/codex/responses";
83
+ }
84
+ else {
85
+ endpoint = effectiveBaseUrl.replace(/\/$/, "") + "/responses";
86
+ }
87
+ // Build request body
88
+ const body = {
89
+ model,
90
+ instructions: systemPrompt,
91
+ input: inputItems.length > 0 ? inputItems : "",
92
+ stream: true,
93
+ store: false,
94
+ };
95
+ if (responseTools.length > 0) {
96
+ body.tools = responseTools;
97
+ }
98
+ // Make the streaming request
99
+ const response = await fetch(endpoint, {
100
+ method: "POST",
101
+ headers: {
102
+ "Content-Type": "application/json",
103
+ "Authorization": `Bearer ${apiKey}`,
104
+ "User-Agent": "codemaxxing/1.0",
105
+ },
106
+ body: JSON.stringify(body),
107
+ });
108
+ if (!response.ok) {
109
+ const errText = await response.text();
110
+ throw new Error(`Responses API error (${response.status}): ${errText}`);
111
+ }
112
+ // Parse SSE stream
113
+ let contentText = "";
114
+ let promptTokens = 0;
115
+ let completionTokens = 0;
116
+ const toolCalls = [];
117
+ let currentToolCallId = "";
118
+ let currentToolCallName = "";
119
+ let toolArgumentsBuffer = "";
120
+ const reader = response.body?.getReader();
121
+ if (!reader)
122
+ throw new Error("No response body");
123
+ const decoder = new TextDecoder();
124
+ let buffer = "";
125
+ while (true) {
126
+ const { done, value } = await reader.read();
127
+ if (done)
128
+ break;
129
+ buffer += decoder.decode(value, { stream: true });
130
+ // Process complete SSE events
131
+ const lines = buffer.split("\n");
132
+ buffer = lines.pop() || ""; // Keep incomplete line in buffer
133
+ for (const line of lines) {
134
+ if (!line.startsWith("data: "))
135
+ continue;
136
+ const data = line.slice(6).trim();
137
+ if (data === "[DONE]")
138
+ continue;
139
+ let event;
140
+ try {
141
+ event = JSON.parse(data);
142
+ }
143
+ catch {
144
+ continue;
145
+ }
146
+ const eventType = event.type;
147
+ // Text content delta
148
+ if (eventType === "response.output_text.delta") {
149
+ const delta = event.delta;
150
+ if (delta) {
151
+ contentText += delta;
152
+ onToken?.(delta);
153
+ }
154
+ }
155
+ // Also handle the simpler delta format
156
+ if (eventType === "response.text_delta") {
157
+ const delta = event.delta;
158
+ if (delta) {
159
+ contentText += delta;
160
+ onToken?.(delta);
161
+ }
162
+ }
163
+ // Function call item added
164
+ if (eventType === "response.output_item.added") {
165
+ const item = event.item;
166
+ if (item?.type === "function_call") {
167
+ currentToolCallId = item.id || item.call_id || `tool_${Date.now()}`;
168
+ currentToolCallName = item.name || "";
169
+ toolArgumentsBuffer = "";
170
+ }
171
+ }
172
+ // Function call arguments streaming
173
+ if (eventType === "response.function_call_arguments.delta") {
174
+ const delta = event.delta;
175
+ if (delta) {
176
+ toolArgumentsBuffer += delta;
177
+ }
178
+ }
179
+ // Function call arguments done
180
+ if (eventType === "response.function_call_arguments.done") {
181
+ try {
182
+ const args = JSON.parse(event.arguments || toolArgumentsBuffer);
183
+ toolCalls.push({
184
+ id: currentToolCallId,
185
+ name: currentToolCallName,
186
+ input: args,
187
+ });
188
+ onToolCall?.(currentToolCallName, args);
189
+ }
190
+ catch {
191
+ // Try buffer if event.arguments isn't set
192
+ try {
193
+ const args = JSON.parse(toolArgumentsBuffer);
194
+ toolCalls.push({
195
+ id: currentToolCallId,
196
+ name: currentToolCallName,
197
+ input: args,
198
+ });
199
+ onToolCall?.(currentToolCallName, args);
200
+ }
201
+ catch {
202
+ // Skip malformed tool call
203
+ }
204
+ }
205
+ toolArgumentsBuffer = "";
206
+ }
207
+ // Output item done (alternative tool call completion)
208
+ if (eventType === "response.output_item.done") {
209
+ const item = event.item;
210
+ if (item?.type === "function_call" && item.arguments) {
211
+ // Check if we already captured this from arguments.done
212
+ const alreadyCaptured = toolCalls.some(tc => tc.id === (item.id || item.call_id));
213
+ if (!alreadyCaptured) {
214
+ try {
215
+ const args = JSON.parse(item.arguments);
216
+ toolCalls.push({
217
+ id: item.id || item.call_id || currentToolCallId,
218
+ name: item.name || currentToolCallName,
219
+ input: args,
220
+ });
221
+ onToolCall?.(item.name || currentToolCallName, args);
222
+ }
223
+ catch {
224
+ // Skip
225
+ }
226
+ }
227
+ }
228
+ }
229
+ // Response completed — extract usage
230
+ if (eventType === "response.completed") {
231
+ const resp = event.response;
232
+ const usage = resp?.usage || event.usage;
233
+ if (usage) {
234
+ promptTokens = usage.input_tokens || usage.prompt_tokens || 0;
235
+ completionTokens = usage.output_tokens || usage.completion_tokens || 0;
236
+ }
237
+ }
238
+ }
239
+ }
240
+ return {
241
+ contentText,
242
+ toolCalls,
243
+ promptTokens,
244
+ completionTokens,
245
+ };
246
+ }
247
+ /**
248
+ * Determine if a model should use the Responses API
249
+ */
250
+ export function shouldUseResponsesAPI(model) {
251
+ const lower = model.toLowerCase();
252
+ // GPT-5.x and Codex models need Responses API for OAuth tokens
253
+ if (lower.startsWith("gpt-5"))
254
+ return true;
255
+ if (lower.includes("codex"))
256
+ return true;
257
+ // o-series reasoning models also work with Responses API
258
+ if (lower === "o3" || lower === "o3-mini" || lower === "o4-mini")
259
+ return true;
260
+ // gpt-4.1 works on both but Responses API is better for OAuth
261
+ if (lower.startsWith("gpt-4.1"))
262
+ return true;
263
+ return false;
264
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "1.0.17",
3
+ "version": "1.1.0",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
@@ -27,7 +27,7 @@
27
27
  "author": "Marcos Vallejo",
28
28
  "license": "MIT",
29
29
  "dependencies": {
30
- "@anthropic-ai/sdk": "^0.78.0",
30
+ "@anthropic-ai/sdk": "^0.79.0",
31
31
  "@modelcontextprotocol/sdk": "^1.27.1",
32
32
  "@types/react": "^19.2.14",
33
33
  "better-sqlite3": "^12.6.2",