open-sse 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +180 -0
  2. package/config/constants.js +206 -0
  3. package/config/defaultThinkingSignature.js +7 -0
  4. package/config/ollamaModels.js +19 -0
  5. package/config/providerModels.js +161 -0
  6. package/handlers/chatCore.js +277 -0
  7. package/handlers/responsesHandler.js +69 -0
  8. package/index.js +69 -0
  9. package/package.json +44 -0
  10. package/services/accountFallback.js +148 -0
  11. package/services/combo.js +69 -0
  12. package/services/compact.js +64 -0
  13. package/services/model.js +109 -0
  14. package/services/provider.js +237 -0
  15. package/services/tokenRefresh.js +542 -0
  16. package/services/usage.js +398 -0
  17. package/translator/formats.js +12 -0
  18. package/translator/from-openai/claude.js +341 -0
  19. package/translator/from-openai/gemini.js +469 -0
  20. package/translator/from-openai/openai-responses.js +361 -0
  21. package/translator/helpers/claudeHelper.js +179 -0
  22. package/translator/helpers/geminiHelper.js +131 -0
  23. package/translator/helpers/openaiHelper.js +80 -0
  24. package/translator/helpers/responsesApiHelper.js +103 -0
  25. package/translator/helpers/toolCallHelper.js +111 -0
  26. package/translator/index.js +167 -0
  27. package/translator/to-openai/claude.js +238 -0
  28. package/translator/to-openai/gemini.js +151 -0
  29. package/translator/to-openai/openai-responses.js +140 -0
  30. package/translator/to-openai/openai.js +371 -0
  31. package/utils/bypassHandler.js +258 -0
  32. package/utils/error.js +133 -0
  33. package/utils/ollamaTransform.js +82 -0
  34. package/utils/requestLogger.js +217 -0
  35. package/utils/stream.js +274 -0
  36. package/utils/streamHandler.js +131 -0
@@ -0,0 +1,258 @@
1
+ import { detectFormat } from "../services/provider.js";
2
+ import { translateResponse, initState } from "../translator/index.js";
3
+ import { FORMATS } from "../translator/formats.js";
4
+ import { SKIP_PATTERNS } from "../config/constants.js";
5
+ import { formatSSE } from "./stream.js";
6
+
7
+ /**
8
+ * Check for bypass patterns (warmup, skip) - return fake response without calling provider
9
+ * Supports both streaming and non-streaming responses
10
+ * Returns response in the correct sourceFormat using translator
11
+ *
12
+ * @param {object} body - Request body
13
+ * @param {string} model - Model name
14
+ * @returns {object|null} { success: true, response: Response } or null if not bypass
15
+ */
16
+ export function handleBypassRequest(body, model) {
17
+ const messages = body.messages;
18
+ if (!messages?.length) return null;
19
+
20
+ // Helper to extract text from content
21
+ const getText = (content) => {
22
+ if (typeof content === "string") return content;
23
+ if (Array.isArray(content)) {
24
+ return content.filter(c => c.type === "text").map(c => c.text).join(" ");
25
+ }
26
+ return "";
27
+ };
28
+
29
+ let shouldBypass = false;
30
+
31
+ // Check warmup: first message "Warmup"
32
+ const firstText = getText(messages[0]?.content);
33
+ if (firstText === "Warmup") shouldBypass = true;
34
+
35
+ // Check skip patterns
36
+ if (!shouldBypass && SKIP_PATTERNS?.length) {
37
+ const allText = messages.map(m => getText(m.content)).join(" ");
38
+ shouldBypass = SKIP_PATTERNS.some(p => allText.includes(p));
39
+ }
40
+
41
+ if (!shouldBypass) return null;
42
+
43
+ // Detect source format and stream mode
44
+ const sourceFormat = detectFormat(body);
45
+ const stream = body.stream !== false;
46
+
47
+ // Create bypass response using translator
48
+ if (stream) {
49
+ return createStreamingResponse(sourceFormat, model);
50
+ } else {
51
+ return createNonStreamingResponse(sourceFormat, model);
52
+ }
53
+ }
54
+
55
+ /**
56
+ * Create OpenAI standard format response
57
+ */
58
+ function createOpenAIResponse(model) {
59
+ const id = `chatcmpl-${Date.now()}`;
60
+ const created = Math.floor(Date.now() / 1000);
61
+ const text = "CLI Command Execution: Clear Terminal";
62
+
63
+ return {
64
+ id,
65
+ object: "chat.completion",
66
+ created,
67
+ model,
68
+ choices: [{
69
+ index: 0,
70
+ message: {
71
+ role: "assistant",
72
+ content: text
73
+ },
74
+ finish_reason: "stop"
75
+ }],
76
+ usage: {
77
+ prompt_tokens: 1,
78
+ completion_tokens: 1,
79
+ total_tokens: 2
80
+ }
81
+ };
82
+ }
83
+
84
+ /**
85
+ * Create non-streaming response with translation
86
+ * Use translator to convert OpenAI → sourceFormat
87
+ */
88
+ function createNonStreamingResponse(sourceFormat, model) {
89
+ const openaiResponse = createOpenAIResponse(model);
90
+
91
+ // If sourceFormat is OpenAI, return directly
92
+ if (sourceFormat === FORMATS.OPENAI) {
93
+ return {
94
+ success: true,
95
+ response: new Response(JSON.stringify(openaiResponse), {
96
+ headers: {
97
+ "Content-Type": "application/json",
98
+ "Access-Control-Allow-Origin": "*"
99
+ }
100
+ })
101
+ };
102
+ }
103
+
104
+ // Use translator to convert: simulate streaming then collect all chunks
105
+ const state = initState(sourceFormat);
106
+ state.model = model;
107
+
108
+ const openaiChunks = createOpenAIStreamingChunks(openaiResponse);
109
+ const allTranslated = [];
110
+
111
+ for (const chunk of openaiChunks) {
112
+ const translated = translateResponse(FORMATS.OPENAI, sourceFormat, chunk, state);
113
+ if (translated?.length > 0) {
114
+ allTranslated.push(...translated);
115
+ }
116
+ }
117
+
118
+ // Flush remaining
119
+ const flushed = translateResponse(FORMATS.OPENAI, sourceFormat, null, state);
120
+ if (flushed?.length > 0) {
121
+ allTranslated.push(...flushed);
122
+ }
123
+
124
+ // For non-streaming, merge all chunks into final response
125
+ const finalResponse = mergeChunksToResponse(allTranslated, sourceFormat);
126
+
127
+ return {
128
+ success: true,
129
+ response: new Response(JSON.stringify(finalResponse), {
130
+ headers: {
131
+ "Content-Type": "application/json",
132
+ "Access-Control-Allow-Origin": "*"
133
+ }
134
+ })
135
+ };
136
+ }
137
+
138
+ /**
139
+ * Create streaming response with translation
140
+ * Use translator to convert OpenAI chunks → sourceFormat
141
+ */
142
+ function createStreamingResponse(sourceFormat, model) {
143
+ const openaiResponse = createOpenAIResponse(model);
144
+ const state = initState(sourceFormat);
145
+ state.model = model;
146
+
147
+ // Create OpenAI streaming chunks
148
+ const openaiChunks = createOpenAIStreamingChunks(openaiResponse);
149
+
150
+ // Translate each chunk to sourceFormat using translator
151
+ const translatedChunks = [];
152
+
153
+ for (const chunk of openaiChunks) {
154
+ const translated = translateResponse(FORMATS.OPENAI, sourceFormat, chunk, state);
155
+ if (translated?.length > 0) {
156
+ for (const item of translated) {
157
+ translatedChunks.push(formatSSE(item, sourceFormat));
158
+ }
159
+ }
160
+ }
161
+
162
+ // Flush remaining events
163
+ const flushed = translateResponse(FORMATS.OPENAI, sourceFormat, null, state);
164
+ if (flushed?.length > 0) {
165
+ for (const item of flushed) {
166
+ translatedChunks.push(formatSSE(item, sourceFormat));
167
+ }
168
+ }
169
+
170
+ // Add [DONE]
171
+ translatedChunks.push("data: [DONE]\n\n");
172
+
173
+ return {
174
+ success: true,
175
+ response: new Response(translatedChunks.join(""), {
176
+ headers: {
177
+ "Content-Type": "text/event-stream",
178
+ "Cache-Control": "no-cache",
179
+ "Connection": "keep-alive",
180
+ "Access-Control-Allow-Origin": "*"
181
+ }
182
+ })
183
+ };
184
+ }
185
+
186
+ /**
187
+ * Merge translated chunks into final response object (for non-streaming)
188
+ * Takes the last complete chunk as the final response
189
+ */
190
+ function mergeChunksToResponse(chunks, sourceFormat) {
191
+ if (!chunks || chunks.length === 0) {
192
+ return createOpenAIResponse("unknown");
193
+ }
194
+
195
+ // For most formats, the last chunk before done contains the complete response
196
+ // Find the most complete chunk (usually the last one with content)
197
+ let finalChunk = chunks[chunks.length - 1];
198
+
199
+ // For Claude format, find the message_stop or final message
200
+ if (sourceFormat === FORMATS.CLAUDE) {
201
+ const messageStop = chunks.find(c => c.type === "message_stop");
202
+ if (messageStop) {
203
+ // Reconstruct complete message from chunks
204
+ const contentDelta = chunks.find(c => c.type === "content_block_delta");
205
+ const messageDelta = chunks.find(c => c.type === "message_delta");
206
+ const messageStart = chunks.find(c => c.type === "message_start");
207
+
208
+ if (messageStart?.message) {
209
+ finalChunk = messageStart.message;
210
+ // Merge usage if available
211
+ if (messageDelta?.usage) {
212
+ finalChunk.usage = messageDelta.usage;
213
+ }
214
+ }
215
+ }
216
+ }
217
+
218
+ return finalChunk;
219
+ }
220
+
221
+ /**
222
+ * Create OpenAI streaming chunks from complete response
223
+ */
224
+ function createOpenAIStreamingChunks(completeResponse) {
225
+ const { id, created, model, choices } = completeResponse;
226
+ const content = choices[0].message.content;
227
+
228
+ return [
229
+ // Chunk with content
230
+ {
231
+ id,
232
+ object: "chat.completion.chunk",
233
+ created,
234
+ model,
235
+ choices: [{
236
+ index: 0,
237
+ delta: {
238
+ role: "assistant",
239
+ content
240
+ },
241
+ finish_reason: null
242
+ }]
243
+ },
244
+ // Final chunk with finish_reason
245
+ {
246
+ id,
247
+ object: "chat.completion.chunk",
248
+ created,
249
+ model,
250
+ choices: [{
251
+ index: 0,
252
+ delta: {},
253
+ finish_reason: "stop"
254
+ }],
255
+ usage: completeResponse.usage
256
+ }
257
+ ];
258
+ }
package/utils/error.js ADDED
@@ -0,0 +1,133 @@
1
+ // OpenAI-compatible error types mapping
2
+ const ERROR_TYPES = {
3
+ 400: { type: "invalid_request_error", code: "bad_request" },
4
+ 401: { type: "authentication_error", code: "invalid_api_key" },
5
+ 403: { type: "permission_error", code: "insufficient_quota" },
6
+ 404: { type: "invalid_request_error", code: "model_not_found" },
7
+ 429: { type: "rate_limit_error", code: "rate_limit_exceeded" },
8
+ 500: { type: "server_error", code: "internal_server_error" },
9
+ 502: { type: "server_error", code: "bad_gateway" },
10
+ 503: { type: "server_error", code: "service_unavailable" },
11
+ 504: { type: "server_error", code: "gateway_timeout" }
12
+ };
13
+
14
+ /**
15
+ * Build OpenAI-compatible error response body
16
+ * @param {number} statusCode - HTTP status code
17
+ * @param {string} message - Error message
18
+ * @returns {object} Error response object
19
+ */
20
+ export function buildErrorBody(statusCode, message) {
21
+ const errorInfo = ERROR_TYPES[statusCode] ||
22
+ (statusCode >= 500
23
+ ? { type: "server_error", code: "internal_server_error" }
24
+ : { type: "invalid_request_error", code: "" });
25
+
26
+ return {
27
+ error: {
28
+ message: message || getDefaultMessage(statusCode),
29
+ type: errorInfo.type,
30
+ code: errorInfo.code
31
+ }
32
+ };
33
+ }
34
+
35
+ /**
36
+ * Get default error message for status code
37
+ */
38
+ function getDefaultMessage(statusCode) {
39
+ const messages = {
40
+ 400: "Bad request",
41
+ 401: "Invalid API key provided",
42
+ 403: "You exceeded your current quota",
43
+ 404: "Model not found",
44
+ 429: "Rate limit exceeded",
45
+ 500: "Internal server error",
46
+ 502: "Bad gateway - upstream provider error",
47
+ 503: "Service temporarily unavailable",
48
+ 504: "Gateway timeout"
49
+ };
50
+ return messages[statusCode] || "An error occurred";
51
+ }
52
+
53
+ /**
54
+ * Create error Response object (for non-streaming)
55
+ * @param {number} statusCode - HTTP status code
56
+ * @param {string} message - Error message
57
+ * @returns {Response} HTTP Response object
58
+ */
59
+ export function errorResponse(statusCode, message) {
60
+ return new Response(JSON.stringify(buildErrorBody(statusCode, message)), {
61
+ status: statusCode,
62
+ headers: {
63
+ "Content-Type": "application/json",
64
+ "Access-Control-Allow-Origin": "*"
65
+ }
66
+ });
67
+ }
68
+
69
+ /**
70
+ * Write error to SSE stream (for streaming)
71
+ * @param {WritableStreamDefaultWriter} writer - Stream writer
72
+ * @param {number} statusCode - HTTP status code
73
+ * @param {string} message - Error message
74
+ */
75
+ export async function writeStreamError(writer, statusCode, message) {
76
+ const errorBody = buildErrorBody(statusCode, message);
77
+ const encoder = new TextEncoder();
78
+ await writer.write(encoder.encode(`data: ${JSON.stringify(errorBody)}\n\n`));
79
+ }
80
+
81
+ /**
82
+ * Parse upstream provider error response
83
+ * @param {Response} response - Fetch response from provider
84
+ * @returns {Promise<{statusCode: number, message: string}>}
85
+ */
86
+ export async function parseUpstreamError(response) {
87
+ let message = "";
88
+
89
+ try {
90
+ const text = await response.text();
91
+
92
+ // Try parse as JSON
93
+ try {
94
+ const json = JSON.parse(text);
95
+ message = json.error?.message || json.message || json.error || text;
96
+ } catch {
97
+ message = text;
98
+ }
99
+ } catch {
100
+ message = `Upstream error: ${response.status}`;
101
+ }
102
+
103
+ return {
104
+ statusCode: response.status,
105
+ message: typeof message === "string" ? message : JSON.stringify(message)
106
+ };
107
+ }
108
+
109
+ /**
110
+ * Create error result for chatCore handler
111
+ * @param {number} statusCode - HTTP status code
112
+ * @param {string} message - Error message
113
+ * @returns {{ success: false, status: number, error: string, response: Response }}
114
+ */
115
+ export function createErrorResult(statusCode, message) {
116
+ return {
117
+ success: false,
118
+ status: statusCode,
119
+ error: message,
120
+ response: errorResponse(statusCode, message)
121
+ };
122
+ }
123
+
124
+ /**
125
+ * Format provider error with context
126
+ * @param {Error} error - Original error
127
+ * @param {string} provider - Provider name
128
+ * @param {string} model - Model name
129
+ * @returns {string} Formatted error message
130
+ */
131
+ export function formatProviderError(error, provider, model) {
132
+ return error.message || "Unknown error";
133
+ }
@@ -0,0 +1,82 @@
1
+ // Transform OpenAI SSE stream to Ollama JSON lines format
2
+ export function transformToOllama(response, model) {
3
+ let buffer = "";
4
+ let pendingToolCalls = {};
5
+
6
+ const transform = new TransformStream({
7
+ transform(chunk, controller) {
8
+ const text = new TextDecoder().decode(chunk);
9
+ buffer += text;
10
+ const lines = buffer.split("\n");
11
+ buffer = lines.pop() || "";
12
+
13
+ for (const line of lines) {
14
+ if (!line.startsWith("data:")) continue;
15
+ const data = line.slice(5).trim();
16
+
17
+ if (data === "[DONE]") {
18
+ const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
19
+ controller.enqueue(new TextEncoder().encode(ollamaEnd));
20
+ return;
21
+ }
22
+
23
+ try {
24
+ const parsed = JSON.parse(data);
25
+ const delta = parsed.choices?.[0]?.delta || {};
26
+ const content = delta.content || "";
27
+ const toolCalls = delta.tool_calls;
28
+
29
+ if (toolCalls) {
30
+ for (const tc of toolCalls) {
31
+ const idx = tc.index;
32
+ if (!pendingToolCalls[idx]) {
33
+ pendingToolCalls[idx] = { id: tc.id, function: { name: "", arguments: "" } };
34
+ }
35
+ if (tc.function?.name) pendingToolCalls[idx].function.name += tc.function.name;
36
+ if (tc.function?.arguments) pendingToolCalls[idx].function.arguments += tc.function.arguments;
37
+ }
38
+ }
39
+
40
+ if (content) {
41
+ const ollama = JSON.stringify({ model, message: { role: "assistant", content }, done: false }) + "\n";
42
+ controller.enqueue(new TextEncoder().encode(ollama));
43
+ }
44
+
45
+ const finishReason = parsed.choices?.[0]?.finish_reason;
46
+ if (finishReason === "tool_calls" || finishReason === "stop") {
47
+ const toolCallsArr = Object.values(pendingToolCalls);
48
+ if (toolCallsArr.length > 0) {
49
+ const formattedCalls = toolCallsArr.map(tc => ({
50
+ function: {
51
+ name: tc.function.name,
52
+ arguments: JSON.parse(tc.function.arguments || "{}")
53
+ }
54
+ }));
55
+ const ollama = JSON.stringify({
56
+ model,
57
+ message: { role: "assistant", content: "", tool_calls: formattedCalls },
58
+ done: true
59
+ }) + "\n";
60
+ controller.enqueue(new TextEncoder().encode(ollama));
61
+ pendingToolCalls = {};
62
+ } else if (finishReason === "stop") {
63
+ const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
64
+ controller.enqueue(new TextEncoder().encode(ollamaEnd));
65
+ }
66
+ }
67
+ } catch (e) {
68
+ // Silently ignore parse errors
69
+ }
70
+ }
71
+ },
72
+ flush(controller) {
73
+ const ollamaEnd = JSON.stringify({ model, message: { role: "assistant", content: "" }, done: true }) + "\n";
74
+ controller.enqueue(new TextEncoder().encode(ollamaEnd));
75
+ }
76
+ });
77
+
78
+ return new Response(response.body.pipeThrough(transform), {
79
+ headers: { "Content-Type": "application/x-ndjson", "Access-Control-Allow-Origin": "*" }
80
+ });
81
+ }
82
+
@@ -0,0 +1,217 @@
1
+ // Check if running in Node.js environment (has fs module)
2
+ const isNode = typeof process !== "undefined" && process.versions?.node;
3
+
4
+ let fs = null;
5
+ let path = null;
6
+ let LOGS_DIR = null;
7
+
8
+ // Only import fs/path in Node.js environment
9
+ if (isNode) {
10
+ try {
11
+ fs = await import("fs");
12
+ path = await import("path");
13
+ LOGS_DIR = path.join(process.cwd(), "logs");
14
+ } catch {
15
+ // Running in non-Node environment (Worker, etc.)
16
+ }
17
+ }
18
+
19
+ // Format timestamp for folder name: 20251228_143045
20
+ function formatTimestamp(date = new Date()) {
21
+ const pad = (n) => String(n).padStart(2, "0");
22
+ const y = date.getFullYear();
23
+ const m = pad(date.getMonth() + 1);
24
+ const d = pad(date.getDate());
25
+ const h = pad(date.getHours());
26
+ const min = pad(date.getMinutes());
27
+ const s = pad(date.getSeconds());
28
+ return `${y}${m}${d}_${h}${min}${s}`;
29
+ }
30
+
31
+ // Create log session folder: {sourceFormat}_{targetFormat}_{model}_{timestamp}
32
+ function createLogSession(sourceFormat, targetFormat, model) {
33
+ if (!fs || !LOGS_DIR) return null;
34
+
35
+ try {
36
+ if (!fs.existsSync(LOGS_DIR)) {
37
+ fs.mkdirSync(LOGS_DIR, { recursive: true });
38
+ }
39
+
40
+ const timestamp = formatTimestamp();
41
+ const safeModel = model.replace(/[/:]/g, "-");
42
+ const folderName = `${sourceFormat}_${targetFormat}_${safeModel}_${timestamp}`;
43
+ const sessionPath = path.join(LOGS_DIR, folderName);
44
+
45
+ fs.mkdirSync(sessionPath, { recursive: true });
46
+
47
+ return sessionPath;
48
+ } catch (err) {
49
+ console.log("[LOG] Failed to create log session:", err.message);
50
+ return null;
51
+ }
52
+ }
53
+
54
+ // Write JSON file
55
+ function writeJsonFile(sessionPath, filename, data) {
56
+ if (!fs || !sessionPath) return;
57
+
58
+ try {
59
+ const filePath = path.join(sessionPath, filename);
60
+ fs.writeFileSync(filePath, JSON.stringify(data, null, 2));
61
+ } catch (err) {
62
+ console.log(`[LOG] Failed to write ${filename}:`, err.message);
63
+ }
64
+ }
65
+
66
+ // Mask sensitive data in headers
67
+ function maskSensitiveHeaders(headers) {
68
+ if (!headers) return {};
69
+ const masked = { ...headers };
70
+ const sensitiveKeys = ["authorization", "x-api-key", "cookie", "token"];
71
+
72
+ for (const key of Object.keys(masked)) {
73
+ const lowerKey = key.toLowerCase();
74
+ if (sensitiveKeys.some(sk => lowerKey.includes(sk))) {
75
+ const value = masked[key];
76
+ if (value && value.length > 20) {
77
+ masked[key] = value.slice(0, 10) + "..." + value.slice(-5);
78
+ }
79
+ }
80
+ }
81
+ return masked;
82
+ }
83
+
84
+ /**
85
+ * Create a new log session and return logger functions
86
+ * @param {string} sourceFormat - Source format from client (claude, openai, etc.)
87
+ * @param {string} targetFormat - Target format to provider (antigravity, gemini-cli, etc.)
88
+ * @param {string} model - Model name
89
+ * @returns {object} Logger object with methods to log each stage
90
+ */
91
+ export function createRequestLogger(sourceFormat, targetFormat, model) {
92
+ const sessionPath = createLogSession(sourceFormat, targetFormat, model);
93
+
94
+ return {
95
+ sessionPath,
96
+
97
+ // 0. Log client raw request (before any conversion)
98
+ logClientRawRequest(endpoint, body, headers = {}) {
99
+ writeJsonFile(sessionPath, "0_client_raw_request.json", {
100
+ timestamp: new Date().toISOString(),
101
+ endpoint,
102
+ headers: maskSensitiveHeaders(headers),
103
+ body
104
+ });
105
+ },
106
+
107
+ // 1. Log raw request from client (after initial conversion like responsesApi)
108
+ logRawRequest(body, headers = {}) {
109
+ writeJsonFile(sessionPath, "1_raw_request.json", {
110
+ timestamp: new Date().toISOString(),
111
+ headers: maskSensitiveHeaders(headers),
112
+ body
113
+ });
114
+ },
115
+
116
+ // 1a. Log format detection info
117
+ logFormatInfo(info) {
118
+ writeJsonFile(sessionPath, "1a_format_info.json", {
119
+ timestamp: new Date().toISOString(),
120
+ ...info
121
+ });
122
+ },
123
+
124
+ // 2. Log converted request to send to provider
125
+ logConvertedRequest(url, headers, body) {
126
+ writeJsonFile(sessionPath, "2_converted_request.json", {
127
+ timestamp: new Date().toISOString(),
128
+ url,
129
+ headers: maskSensitiveHeaders(headers),
130
+ body
131
+ });
132
+ },
133
+
134
+ // 3. Log provider response (for non-streaming or error)
135
+ logProviderResponse(status, statusText, headers, body) {
136
+ const filename = "3_provider_response.json";
137
+ writeJsonFile(sessionPath, filename, {
138
+ timestamp: new Date().toISOString(),
139
+ status,
140
+ statusText,
141
+ headers: headers ? (typeof headers.entries === "function" ? Object.fromEntries(headers.entries()) : headers) : {},
142
+ body
143
+ });
144
+ },
145
+
146
+ // 3. Append streaming chunk to provider response
147
+ appendProviderChunk(chunk) {
148
+ if (!fs || !sessionPath) return;
149
+ try {
150
+ const filePath = path.join(sessionPath, "3_provider_response.txt");
151
+ fs.appendFileSync(filePath, chunk);
152
+ } catch (err) {
153
+ // Ignore append errors
154
+ }
155
+ },
156
+
157
+ // 4. Log converted response to client (for non-streaming)
158
+ logConvertedResponse(body) {
159
+ writeJsonFile(sessionPath, "4_converted_response.json", {
160
+ timestamp: new Date().toISOString(),
161
+ body
162
+ });
163
+ },
164
+
165
+ // 4. Append streaming chunk to converted response
166
+ appendConvertedChunk(chunk) {
167
+ if (!fs || !sessionPath) return;
168
+ try {
169
+ const filePath = path.join(sessionPath, "4_converted_response.txt");
170
+ fs.appendFileSync(filePath, chunk);
171
+ } catch (err) {
172
+ // Ignore append errors
173
+ }
174
+ },
175
+
176
+ // 5. Log error
177
+ logError(error, requestBody = null) {
178
+ writeJsonFile(sessionPath, "5_error.json", {
179
+ timestamp: new Date().toISOString(),
180
+ error: error?.message || String(error),
181
+ stack: error?.stack,
182
+ requestBody
183
+ });
184
+ }
185
+ };
186
+ }
187
+
188
+ // Legacy functions for backward compatibility
189
+ export function logRequest() {}
190
+ export function logResponse() {}
191
+ export function logError(provider, { error, url, model, requestBody }) {
192
+ if (!fs || !LOGS_DIR) return;
193
+
194
+ try {
195
+ if (!fs.existsSync(LOGS_DIR)) {
196
+ fs.mkdirSync(LOGS_DIR, { recursive: true });
197
+ }
198
+
199
+ const date = new Date().toISOString().split("T")[0];
200
+ const logPath = path.join(LOGS_DIR, `${provider}-${date}.log`);
201
+
202
+ const logEntry = {
203
+ timestamp: new Date().toISOString(),
204
+ type: "error",
205
+ provider,
206
+ model,
207
+ url,
208
+ error: error?.message || String(error),
209
+ stack: error?.stack,
210
+ requestBody
211
+ };
212
+
213
+ fs.appendFileSync(logPath, JSON.stringify(logEntry) + "\n");
214
+ } catch (err) {
215
+ console.log("[LOG] Failed to write error log:", err.message);
216
+ }
217
+ }