aicodeswitch 1.4.1 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,202 @@
1
+ import { Transform } from 'stream';
2
+
3
+ /**
4
+ * SSEEvent - 表示一个完整的SSE事件
5
+ */
6
+ export interface SSEEvent {
7
+ event?: string;
8
+ id?: string;
9
+ data?: string;
10
+ raw: string; // 原始字符串
11
+ }
12
+
13
+ /**
14
+ * ChunkCollectorTransform - 收集stream chunks用于日志记录
15
+ * 这个Transform会记录所有经过它的数据块,同时将数据原封不动地传递给下一个stream
16
+ */
17
+ export class ChunkCollectorTransform extends Transform {
18
+ private chunks: string[] = [];
19
+
20
+ constructor() {
21
+ super();
22
+ }
23
+
24
+ _transform(chunk: Buffer, _encoding: BufferEncoding, callback: (error?: Error | null) => void) {
25
+ // 收集chunk数据
26
+ this.chunks.push(chunk.toString('utf8'));
27
+
28
+ // 将chunk传递给下一个stream
29
+ this.push(chunk);
30
+
31
+ callback();
32
+ }
33
+
34
+ /**
35
+ * 获取收集的所有chunks
36
+ */
37
+ getChunks(): string[] {
38
+ return this.chunks;
39
+ }
40
+
41
+ /**
42
+ * 清空已收集的chunks
43
+ */
44
+ clearChunks(): void {
45
+ this.chunks = [];
46
+ }
47
+ }
48
+
49
+ /**
50
+ * SSEEventCollectorTransform - 智能收集完整的SSE事件
51
+ * 这个Transform会解析SSE流并将每个完整的事件存储为一个单独的entry
52
+ * 确保每个chunk代表一条完整的消息,而不是随机的buffer片段
53
+ */
54
+ export class SSEEventCollectorTransform extends Transform {
55
+ private buffer = '';
56
+ private currentEvent: { event?: string; id?: string; dataLines: string[]; rawLines: string[] } = {
57
+ dataLines: [],
58
+ rawLines: []
59
+ };
60
+ private events: SSEEvent[] = [];
61
+
62
+ constructor() {
63
+ super();
64
+ }
65
+
66
+ _transform(chunk: Buffer, _encoding: BufferEncoding, callback: (error?: Error | null) => void) {
67
+ this.buffer += chunk.toString('utf8');
68
+ this.processBuffer();
69
+ // 将chunk传递给下一个stream
70
+ this.push(chunk);
71
+ callback();
72
+ }
73
+
74
+ _flush(callback: (error?: Error | null) => void) {
75
+ // 处理剩余的buffer
76
+ if (this.buffer.trim()) {
77
+ this.processBuffer();
78
+ }
79
+ // 刷新最后一个事件
80
+ this.flushEvent();
81
+ callback();
82
+ }
83
+
84
+ private processBuffer() {
85
+ const lines = this.buffer.split('\n');
86
+ // 保留最后一行(可能不完整)
87
+ this.buffer = lines.pop() || '';
88
+
89
+ for (const line of lines) {
90
+ this.processLine(line);
91
+ }
92
+ }
93
+
94
+ private processLine(line: string) {
95
+ // 记录原始行
96
+ this.currentEvent.rawLines.push(line);
97
+
98
+ // 空行表示一个事件结束
99
+ if (!line.trim()) {
100
+ this.flushEvent();
101
+ return;
102
+ }
103
+
104
+ if (line.startsWith('event:')) {
105
+ this.currentEvent.event = line.slice(6).trim();
106
+ return;
107
+ }
108
+
109
+ if (line.startsWith('id:')) {
110
+ this.currentEvent.id = line.slice(3).trim();
111
+ return;
112
+ }
113
+
114
+ if (line.startsWith('data:')) {
115
+ this.currentEvent.dataLines.push(line.slice(5).trim());
116
+ return;
117
+ }
118
+ }
119
+
120
+ private flushEvent() {
121
+ // 只有当有内容时才创建事件
122
+ if (!this.currentEvent.event && this.currentEvent.dataLines.length === 0 && !this.currentEvent.id) {
123
+ this.currentEvent = { dataLines: [], rawLines: [] };
124
+ return;
125
+ }
126
+
127
+ const raw = this.currentEvent.rawLines.join('\n');
128
+ const event: SSEEvent = {
129
+ event: this.currentEvent.event,
130
+ id: this.currentEvent.id,
131
+ data: this.currentEvent.dataLines.length > 0 ? this.currentEvent.dataLines.join('\n') : undefined,
132
+ raw
133
+ };
134
+
135
+ this.events.push(event);
136
+ this.currentEvent = { dataLines: [], rawLines: [] };
137
+ }
138
+
139
+ /**
140
+ * 获取收集的所有SSE事件
141
+ * 每个事件都是一个完整的SSE消息
142
+ */
143
+ getEvents(): SSEEvent[] {
144
+ return this.events;
145
+ }
146
+
147
+ /**
148
+ * 获取原始chunks(兼容旧接口)
149
+ */
150
+ getChunks(): string[] {
151
+ return this.events.map(e => e.raw);
152
+ }
153
+
154
+ /**
155
+ * 清空已收集的事件
156
+ */
157
+ clearEvents(): void {
158
+ this.events = [];
159
+ }
160
+
161
+ /**
162
+ * 从events中提取usage信息
163
+ */
164
+ extractUsage(): { input_tokens?: number; output_tokens?: number; cache_read_input_tokens?: number } | null {
165
+ for (const event of this.events) {
166
+ if (!event.data) continue;
167
+
168
+ try {
169
+ const data = JSON.parse(event.data);
170
+
171
+ // 尝试从不同的位置提取usage
172
+ // 1. message_delta事件中的usage
173
+ if (event.event === 'message_delta' && data.usage) {
174
+ return data.usage;
175
+ }
176
+
177
+ // 2. 直接在data中的usage
178
+ if (data.usage) {
179
+ return data.usage;
180
+ }
181
+
182
+ // 3. OpenAI格式: choices数组中最后一个元素的usage
183
+ if (Array.isArray(data.choices) && data.choices.length > 0) {
184
+ const lastChoice = data.choices[data.choices.length - 1];
185
+ if (lastChoice?.usage) {
186
+ return lastChoice.usage;
187
+ }
188
+ }
189
+
190
+ // 4. 直接在顶级的usage字段
191
+ if (data.input_tokens !== undefined || data.output_tokens !== undefined ||
192
+ data.prompt_tokens !== undefined || data.completion_tokens !== undefined) {
193
+ return data;
194
+ }
195
+ } catch {
196
+ // JSON解析失败,跳过
197
+ }
198
+ }
199
+
200
+ return null;
201
+ }
202
+ }
@@ -0,0 +1,261 @@
1
+ import type { TokenUsage } from '../../types';
2
+
3
+ type ClaudeContentBlock =
4
+ | { type: 'text'; text: string }
5
+ | { type: 'tool_use'; id?: string; name: string; input?: unknown }
6
+ | { type: 'tool_result'; tool_use_id?: string; content?: unknown }
7
+ | { type: 'thinking'; thinking?: string }
8
+ | Record<string, any>;
9
+
10
+ type ClaudeMessage = {
11
+ role: 'user' | 'assistant' | 'system' | 'tool';
12
+ content: string | ClaudeContentBlock[] | null;
13
+ };
14
+
15
+ type ClaudeRequest = {
16
+ model?: string;
17
+ messages?: ClaudeMessage[];
18
+ system?: string | ClaudeContentBlock[];
19
+ max_tokens?: number;
20
+ temperature?: number;
21
+ top_p?: number;
22
+ stream?: boolean;
23
+ tools?: Array<{ name: string; description?: string; input_schema?: unknown }>;
24
+ tool_choice?: unknown;
25
+ stop_sequences?: string[];
26
+ [key: string]: unknown;
27
+ };
28
+
29
+ const toTextContent = (content: unknown): string | null => {
30
+ if (typeof content === 'string') return content;
31
+ if (!Array.isArray(content)) return null;
32
+
33
+ const parts: string[] = [];
34
+ for (const item of content) {
35
+ if (item && typeof item === 'object' && (item as any).type === 'text' && typeof (item as any).text === 'string') {
36
+ parts.push((item as any).text);
37
+ }
38
+ }
39
+ return parts.length > 0 ? parts.join('') : null;
40
+ };
41
+
42
+ const mapClaudeToolChoiceToOpenAI = (toolChoice: unknown): unknown => {
43
+ if (toolChoice === 'auto' || toolChoice === 'none' || toolChoice === 'required') {
44
+ return toolChoice;
45
+ }
46
+ if (toolChoice && typeof toolChoice === 'object' && (toolChoice as any).name) {
47
+ return {
48
+ type: 'function',
49
+ function: { name: (toolChoice as any).name },
50
+ };
51
+ }
52
+ return toolChoice;
53
+ };
54
+
55
+ export const convertOpenAIUsageToClaude = (usage: any) => {
56
+ const cached = usage?.prompt_tokens_details?.cached_tokens || 0;
57
+ return {
58
+ input_tokens: (usage?.prompt_tokens || 0) - cached,
59
+ output_tokens: usage?.completion_tokens || 0,
60
+ cache_read_input_tokens: cached,
61
+ };
62
+ };
63
+
64
+ export const mapStopReason = (finishReason?: string | null): string => {
65
+ switch (finishReason) {
66
+ case 'stop':
67
+ return 'end_turn';
68
+ case 'length':
69
+ return 'max_tokens';
70
+ case 'tool_calls':
71
+ return 'tool_use';
72
+ case 'content_filter':
73
+ return 'content_filter';
74
+ default:
75
+ return 'end_turn';
76
+ }
77
+ };
78
+
79
+ export const transformClaudeRequestToOpenAIChat = (body: ClaudeRequest, targetModel?: string) => {
80
+ const messages: any[] = [];
81
+
82
+ if (body.system) {
83
+ const systemText = toTextContent(body.system);
84
+ if (systemText) {
85
+ messages.push({ role: 'system', content: systemText });
86
+ }
87
+ }
88
+
89
+ if (Array.isArray(body.messages)) {
90
+ for (const message of body.messages) {
91
+ if (typeof message.content === 'string' || message.content === null) {
92
+ messages.push({ role: message.role, content: message.content });
93
+ continue;
94
+ }
95
+
96
+ if (Array.isArray(message.content)) {
97
+ const textParts: string[] = [];
98
+ const toolCalls: any[] = [];
99
+ const toolResultMessages: any[] = [];
100
+
101
+ for (const block of message.content) {
102
+ if (block && typeof block === 'object') {
103
+ if (block.type === 'text' && typeof (block as any).text === 'string') {
104
+ textParts.push((block as any).text);
105
+ }
106
+ if (block.type === 'tool_use') {
107
+ const toolId = (block as any).id || `tool_${toolCalls.length + 1}`;
108
+ const toolName = (block as any).name || 'tool';
109
+ const input = (block as any).input ?? {};
110
+ toolCalls.push({
111
+ id: toolId,
112
+ type: 'function',
113
+ function: {
114
+ name: toolName,
115
+ arguments: JSON.stringify(input),
116
+ },
117
+ });
118
+ }
119
+ if (block.type === 'tool_result') {
120
+ const toolCallId = (block as any).tool_use_id || (block as any).id;
121
+ const toolContent = (block as any).content;
122
+ toolResultMessages.push({
123
+ role: 'tool',
124
+ tool_call_id: toolCallId,
125
+ content: typeof toolContent === 'string' ? toolContent : JSON.stringify(toolContent ?? {}),
126
+ });
127
+ }
128
+ }
129
+ }
130
+
131
+ const content = textParts.length > 0 ? textParts.join('') : null;
132
+ const openaiMessage: any = {
133
+ role: message.role,
134
+ content,
135
+ };
136
+
137
+ if (toolCalls.length > 0) {
138
+ openaiMessage.tool_calls = toolCalls;
139
+ }
140
+ messages.push(openaiMessage);
141
+ toolResultMessages.forEach((toolMessage) => messages.push(toolMessage));
142
+ }
143
+ }
144
+ }
145
+
146
+ const openaiBody: any = {
147
+ model: targetModel || body.model,
148
+ messages,
149
+ };
150
+
151
+ if (typeof body.temperature === 'number') openaiBody.temperature = body.temperature;
152
+ if (typeof body.top_p === 'number') openaiBody.top_p = body.top_p;
153
+ if (typeof body.max_tokens === 'number') openaiBody.max_tokens = body.max_tokens;
154
+ if (Array.isArray(body.stop_sequences)) openaiBody.stop = body.stop_sequences;
155
+
156
+ if (body.tools) {
157
+ openaiBody.tools = body.tools.map((tool) => ({
158
+ type: 'function',
159
+ function: {
160
+ name: tool.name,
161
+ description: tool.description,
162
+ parameters: tool.input_schema,
163
+ },
164
+ }));
165
+ }
166
+
167
+ if (body.tool_choice) {
168
+ openaiBody.tool_choice = mapClaudeToolChoiceToOpenAI(body.tool_choice);
169
+ }
170
+
171
+ if (body.stream === true) {
172
+ openaiBody.stream = true;
173
+ openaiBody.stream_options = { include_usage: true };
174
+ }
175
+
176
+ return openaiBody;
177
+ };
178
+
179
+ const extractOpenAIText = (content: unknown): string | null => {
180
+ if (typeof content === 'string') return content;
181
+ if (!Array.isArray(content)) return null;
182
+
183
+ const parts: string[] = [];
184
+ for (const item of content) {
185
+ if (item && typeof item === 'object' && typeof (item as any).text === 'string') {
186
+ parts.push((item as any).text);
187
+ }
188
+ }
189
+ return parts.length > 0 ? parts.join('') : null;
190
+ };
191
+
192
+ export const transformOpenAIChatResponseToClaude = (body: any) => {
193
+ const choice = Array.isArray(body?.choices) ? body.choices[0] : null;
194
+ const message = choice?.message || {};
195
+ const contentBlocks: any[] = [];
196
+
197
+ const contentText = extractOpenAIText(message.content);
198
+ if (contentText) {
199
+ contentBlocks.push({ type: 'text', text: contentText });
200
+ }
201
+
202
+ if (Array.isArray(message.tool_calls)) {
203
+ for (const toolCall of message.tool_calls) {
204
+ const toolName = toolCall?.function?.name || 'tool';
205
+ let input: unknown = {};
206
+ if (toolCall?.function?.arguments) {
207
+ try {
208
+ input = JSON.parse(toolCall.function.arguments);
209
+ } catch {
210
+ input = toolCall.function.arguments;
211
+ }
212
+ }
213
+ contentBlocks.push({
214
+ type: 'tool_use',
215
+ id: toolCall.id,
216
+ name: toolName,
217
+ input,
218
+ });
219
+ }
220
+ }
221
+
222
+ const usage = body?.usage ? convertOpenAIUsageToClaude(body.usage) : null;
223
+
224
+ return {
225
+ id: body?.id,
226
+ type: 'message',
227
+ role: 'assistant',
228
+ model: body?.model,
229
+ content: contentBlocks,
230
+ stop_reason: mapStopReason(choice?.finish_reason),
231
+ stop_sequence: null,
232
+ usage: usage || {
233
+ input_tokens: 0,
234
+ output_tokens: 0,
235
+ cache_read_input_tokens: 0,
236
+ },
237
+ };
238
+ };
239
+
240
+ export const extractTokenUsageFromOpenAIUsage = (usage: any): TokenUsage | undefined => {
241
+ if (!usage) return undefined;
242
+ const converted = convertOpenAIUsageToClaude(usage);
243
+ return {
244
+ inputTokens: converted.input_tokens,
245
+ outputTokens: converted.output_tokens,
246
+ totalTokens: usage.total_tokens,
247
+ cacheReadInputTokens: converted.cache_read_input_tokens,
248
+ };
249
+ };
250
+
251
+ export const extractTokenUsageFromClaudeUsage = (usage: any): TokenUsage | undefined => {
252
+ if (!usage) return undefined;
253
+ return {
254
+ inputTokens: usage.input_tokens ?? 0,
255
+ outputTokens: usage.output_tokens ?? 0,
256
+ totalTokens: usage.input_tokens !== undefined && usage.output_tokens !== undefined
257
+ ? usage.input_tokens + usage.output_tokens
258
+ : undefined,
259
+ cacheReadInputTokens: usage.cache_read_input_tokens,
260
+ };
261
+ };