@animalabs/membrane 0.2.4 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/membrane.d.ts.map +1 -1
- package/dist/membrane.js +18 -14
- package/dist/membrane.js.map +1 -1
- package/dist/providers/anthropic-chat.d.ts +50 -0
- package/dist/providers/anthropic-chat.d.ts.map +1 -0
- package/dist/providers/anthropic-chat.js +212 -0
- package/dist/providers/anthropic-chat.js.map +1 -0
- package/dist/providers/anthropic-multiuser.d.ts +64 -0
- package/dist/providers/anthropic-multiuser.d.ts.map +1 -0
- package/dist/providers/anthropic-multiuser.js +297 -0
- package/dist/providers/anthropic-multiuser.js.map +1 -0
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +2 -0
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/index.d.ts +2 -0
- package/dist/providers/index.d.ts.map +1 -1
- package/dist/providers/index.js +2 -0
- package/dist/providers/index.js.map +1 -1
- package/dist/providers/openai-compatible.d.ts.map +1 -1
- package/dist/providers/openai-compatible.js +2 -0
- package/dist/providers/openai-compatible.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +2 -0
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai.d.ts.map +1 -1
- package/dist/providers/openai.js +2 -0
- package/dist/providers/openai.js.map +1 -1
- package/dist/providers/openrouter.d.ts.map +1 -1
- package/dist/providers/openrouter.js +2 -0
- package/dist/providers/openrouter.js.map +1 -1
- package/dist/types/provider.d.ts +2 -0
- package/dist/types/provider.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/membrane.ts +19 -21
- package/src/providers/anthropic-chat.ts +294 -0
- package/src/providers/anthropic-multiuser.ts +387 -0
- package/src/providers/anthropic.ts +2 -0
- package/src/providers/index.ts +10 -0
- package/src/providers/openai-compatible.ts +2 -0
- package/src/providers/openai-completions.ts +2 -0
- package/src/providers/openai.ts +2 -0
- package/src/providers/openrouter.ts +2 -0
- package/src/types/provider.ts +2 -0
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic Multiuser Adapter - Multi-party conversation support
|
|
3
|
+
*
|
|
4
|
+
* For conversations with multiple participants (e.g., group chats, Discord).
|
|
5
|
+
* - All non-bot participants map to 'user' role
|
|
6
|
+
* - Bot participant maps to 'assistant' role
|
|
7
|
+
* - Prefixes messages with participant names for context
|
|
8
|
+
* - Native Anthropic tool API
|
|
9
|
+
*
|
|
10
|
+
* Use AnthropicChatAdapter for simple two-party Human/Assistant conversations.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
14
|
+
import type {
|
|
15
|
+
ProviderAdapter,
|
|
16
|
+
ProviderRequest,
|
|
17
|
+
ProviderRequestOptions,
|
|
18
|
+
ProviderResponse,
|
|
19
|
+
StreamCallbacks,
|
|
20
|
+
ContentBlock,
|
|
21
|
+
} from '../types/index.js';
|
|
22
|
+
import {
|
|
23
|
+
MembraneError,
|
|
24
|
+
rateLimitError,
|
|
25
|
+
contextLengthError,
|
|
26
|
+
authError,
|
|
27
|
+
serverError,
|
|
28
|
+
abortError,
|
|
29
|
+
} from '../types/index.js';
|
|
30
|
+
import { fromAnthropicContent } from './anthropic.js';
|
|
31
|
+
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Adapter Configuration
|
|
34
|
+
// ============================================================================
|
|
35
|
+
|
|
36
|
+
export interface AnthropicMultiuserAdapterConfig {
|
|
37
|
+
/** API key (defaults to ANTHROPIC_API_KEY env var) */
|
|
38
|
+
apiKey?: string;
|
|
39
|
+
|
|
40
|
+
/** Base URL override */
|
|
41
|
+
baseURL?: string;
|
|
42
|
+
|
|
43
|
+
/** Default max tokens */
|
|
44
|
+
defaultMaxTokens?: number;
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Bot/assistant participant name (default: 'Claude')
|
|
48
|
+
* Messages with this participant become 'assistant' role (no name prefix).
|
|
49
|
+
*/
|
|
50
|
+
assistantParticipant?: string;
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Whether to prefix user messages with participant names (default: true)
|
|
54
|
+
* When true: "Alice: Hello there"
|
|
55
|
+
* When false: "Hello there"
|
|
56
|
+
*/
|
|
57
|
+
includeParticipantNames?: boolean;
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Format for participant name prefix (default: '{name}: ')
|
|
61
|
+
* Use {name} as placeholder for participant name.
|
|
62
|
+
*/
|
|
63
|
+
nameFormat?: string;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// ============================================================================
|
|
67
|
+
// Anthropic Multiuser Adapter
|
|
68
|
+
// ============================================================================
|
|
69
|
+
|
|
70
|
+
export class AnthropicMultiuserAdapter implements ProviderAdapter {
|
|
71
|
+
readonly name = 'anthropic-multiuser';
|
|
72
|
+
private client: Anthropic;
|
|
73
|
+
private defaultMaxTokens: number;
|
|
74
|
+
private assistantParticipant: string;
|
|
75
|
+
private includeParticipantNames: boolean;
|
|
76
|
+
private nameFormat: string;
|
|
77
|
+
|
|
78
|
+
constructor(config: AnthropicMultiuserAdapterConfig = {}) {
|
|
79
|
+
this.client = new Anthropic({
|
|
80
|
+
apiKey: config.apiKey,
|
|
81
|
+
baseURL: config.baseURL,
|
|
82
|
+
});
|
|
83
|
+
this.defaultMaxTokens = config.defaultMaxTokens ?? 4096;
|
|
84
|
+
this.assistantParticipant = config.assistantParticipant ?? 'Claude';
|
|
85
|
+
this.includeParticipantNames = config.includeParticipantNames ?? true;
|
|
86
|
+
this.nameFormat = config.nameFormat ?? '{name}: ';
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
supportsModel(modelId: string): boolean {
|
|
90
|
+
return modelId.startsWith('claude-');
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
async complete(
|
|
94
|
+
request: ProviderRequest,
|
|
95
|
+
options?: ProviderRequestOptions
|
|
96
|
+
): Promise<ProviderResponse> {
|
|
97
|
+
const anthropicRequest = this.buildRequest(request);
|
|
98
|
+
const fullRequest = { ...anthropicRequest, stream: false as const };
|
|
99
|
+
options?.onRequest?.(fullRequest);
|
|
100
|
+
|
|
101
|
+
try {
|
|
102
|
+
const response = await this.client.messages.create(fullRequest, {
|
|
103
|
+
signal: options?.signal,
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
return this.parseResponse(response, fullRequest);
|
|
107
|
+
} catch (error) {
|
|
108
|
+
throw this.handleError(error, fullRequest);
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
async stream(
|
|
113
|
+
request: ProviderRequest,
|
|
114
|
+
callbacks: StreamCallbacks,
|
|
115
|
+
options?: ProviderRequestOptions
|
|
116
|
+
): Promise<ProviderResponse> {
|
|
117
|
+
const anthropicRequest = this.buildRequest(request);
|
|
118
|
+
const fullRequest = { ...anthropicRequest, stream: true };
|
|
119
|
+
options?.onRequest?.(fullRequest);
|
|
120
|
+
|
|
121
|
+
try {
|
|
122
|
+
const stream = await this.client.messages.stream(anthropicRequest, {
|
|
123
|
+
signal: options?.signal,
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
let accumulated = '';
|
|
127
|
+
const contentBlocks: unknown[] = [];
|
|
128
|
+
let currentBlockIndex = -1;
|
|
129
|
+
|
|
130
|
+
for await (const event of stream) {
|
|
131
|
+
if (event.type === 'content_block_start') {
|
|
132
|
+
currentBlockIndex = event.index;
|
|
133
|
+
contentBlocks[currentBlockIndex] = event.content_block;
|
|
134
|
+
callbacks.onContentBlock?.(currentBlockIndex, event.content_block);
|
|
135
|
+
} else if (event.type === 'content_block_delta') {
|
|
136
|
+
if (event.delta.type === 'text_delta') {
|
|
137
|
+
const chunk = event.delta.text;
|
|
138
|
+
accumulated += chunk;
|
|
139
|
+
callbacks.onChunk(chunk);
|
|
140
|
+
} else if (event.delta.type === 'thinking_delta') {
|
|
141
|
+
callbacks.onChunk(event.delta.thinking);
|
|
142
|
+
}
|
|
143
|
+
} else if (event.type === 'content_block_stop') {
|
|
144
|
+
callbacks.onContentBlock?.(currentBlockIndex, contentBlocks[currentBlockIndex]);
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const finalMessage = await stream.finalMessage();
|
|
149
|
+
return this.parseResponse(finalMessage, fullRequest);
|
|
150
|
+
|
|
151
|
+
} catch (error) {
|
|
152
|
+
throw this.handleError(error, fullRequest);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// ============================================================================
|
|
157
|
+
// Message Conversion
|
|
158
|
+
// ============================================================================
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Convert normalized messages to Anthropic format.
|
|
162
|
+
* - Bot messages become assistant role
|
|
163
|
+
* - All other messages become user role with optional name prefix
|
|
164
|
+
*/
|
|
165
|
+
private convertMessages(
|
|
166
|
+
messages: Array<{ participant: string; content: ContentBlock[] }>
|
|
167
|
+
): Anthropic.MessageParam[] {
|
|
168
|
+
const result: Anthropic.MessageParam[] = [];
|
|
169
|
+
|
|
170
|
+
for (const msg of messages) {
|
|
171
|
+
const isAssistant = msg.participant === this.assistantParticipant;
|
|
172
|
+
const role: 'user' | 'assistant' = isAssistant ? 'assistant' : 'user';
|
|
173
|
+
|
|
174
|
+
// Convert content blocks
|
|
175
|
+
const content: Anthropic.ContentBlockParam[] = [];
|
|
176
|
+
|
|
177
|
+
for (const block of msg.content) {
|
|
178
|
+
if (block.type === 'text') {
|
|
179
|
+
let text = block.text;
|
|
180
|
+
|
|
181
|
+
// Prefix with participant name for non-assistant messages
|
|
182
|
+
if (!isAssistant && this.includeParticipantNames) {
|
|
183
|
+
const prefix = this.nameFormat.replace('{name}', msg.participant);
|
|
184
|
+
text = prefix + text;
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const textBlock: any = { type: 'text', text };
|
|
188
|
+
if (block.cache_control) {
|
|
189
|
+
textBlock.cache_control = block.cache_control;
|
|
190
|
+
}
|
|
191
|
+
content.push(textBlock);
|
|
192
|
+
} else if (block.type === 'image' && block.source.type === 'base64') {
|
|
193
|
+
content.push({
|
|
194
|
+
type: 'image',
|
|
195
|
+
source: {
|
|
196
|
+
type: 'base64',
|
|
197
|
+
media_type: block.source.mediaType as 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp',
|
|
198
|
+
data: block.source.data,
|
|
199
|
+
},
|
|
200
|
+
});
|
|
201
|
+
} else if (block.type === 'document') {
|
|
202
|
+
content.push({
|
|
203
|
+
type: 'document',
|
|
204
|
+
source: {
|
|
205
|
+
type: 'base64',
|
|
206
|
+
media_type: block.source.mediaType as 'application/pdf',
|
|
207
|
+
data: block.source.data,
|
|
208
|
+
},
|
|
209
|
+
});
|
|
210
|
+
} else if (block.type === 'tool_use') {
|
|
211
|
+
content.push({
|
|
212
|
+
type: 'tool_use',
|
|
213
|
+
id: block.id,
|
|
214
|
+
name: block.name,
|
|
215
|
+
input: block.input,
|
|
216
|
+
});
|
|
217
|
+
} else if (block.type === 'tool_result') {
|
|
218
|
+
content.push({
|
|
219
|
+
type: 'tool_result',
|
|
220
|
+
tool_use_id: block.toolUseId,
|
|
221
|
+
content: typeof block.content === 'string'
|
|
222
|
+
? block.content
|
|
223
|
+
: JSON.stringify(block.content),
|
|
224
|
+
is_error: block.isError,
|
|
225
|
+
});
|
|
226
|
+
} else if (block.type === 'thinking') {
|
|
227
|
+
content.push({
|
|
228
|
+
type: 'thinking',
|
|
229
|
+
thinking: block.thinking,
|
|
230
|
+
} as any);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
result.push({ role, content });
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
// Anthropic requires alternating user/assistant messages
|
|
238
|
+
// Merge consecutive same-role messages
|
|
239
|
+
return this.mergeConsecutiveRoles(result);
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
/**
|
|
243
|
+
* Merge consecutive messages with the same role.
|
|
244
|
+
* Anthropic API requires strictly alternating user/assistant messages.
|
|
245
|
+
*/
|
|
246
|
+
private mergeConsecutiveRoles(messages: Anthropic.MessageParam[]): Anthropic.MessageParam[] {
|
|
247
|
+
if (messages.length === 0) return [];
|
|
248
|
+
|
|
249
|
+
const merged: Anthropic.MessageParam[] = [];
|
|
250
|
+
let current: Anthropic.MessageParam = messages[0]!;
|
|
251
|
+
|
|
252
|
+
for (let i = 1; i < messages.length; i++) {
|
|
253
|
+
const next: Anthropic.MessageParam = messages[i]!;
|
|
254
|
+
|
|
255
|
+
if (next.role === current.role) {
|
|
256
|
+
// Merge content arrays
|
|
257
|
+
const currentContent = Array.isArray(current.content) ? current.content : [{ type: 'text' as const, text: current.content }];
|
|
258
|
+
const nextContent = Array.isArray(next.content) ? next.content : [{ type: 'text' as const, text: next.content }];
|
|
259
|
+
current = {
|
|
260
|
+
role: current.role,
|
|
261
|
+
content: [...currentContent, ...nextContent],
|
|
262
|
+
};
|
|
263
|
+
} else {
|
|
264
|
+
merged.push(current);
|
|
265
|
+
current = next;
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
merged.push(current);
|
|
270
|
+
return merged;
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
private buildRequest(request: ProviderRequest): Anthropic.MessageCreateParams {
|
|
274
|
+
// Get normalized messages from extra (preferred) or fall back to provider messages
|
|
275
|
+
const normalizedMessages = request.extra?.normalizedMessages as Array<{ participant: string; content: ContentBlock[] }> | undefined;
|
|
276
|
+
|
|
277
|
+
let messages: Anthropic.MessageParam[];
|
|
278
|
+
if (normalizedMessages) {
|
|
279
|
+
messages = this.convertMessages(normalizedMessages);
|
|
280
|
+
} else {
|
|
281
|
+
// Assume already in provider format
|
|
282
|
+
messages = request.messages as Anthropic.MessageParam[];
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
const params: Anthropic.MessageCreateParams = {
|
|
286
|
+
model: request.model,
|
|
287
|
+
max_tokens: request.maxTokens || this.defaultMaxTokens,
|
|
288
|
+
messages,
|
|
289
|
+
};
|
|
290
|
+
|
|
291
|
+
// Handle system prompt
|
|
292
|
+
if (request.system) {
|
|
293
|
+
if (typeof request.system === 'string') {
|
|
294
|
+
params.system = request.system;
|
|
295
|
+
} else if (Array.isArray(request.system)) {
|
|
296
|
+
params.system = request.system as Anthropic.TextBlockParam[];
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
if (request.temperature !== undefined) {
|
|
301
|
+
params.temperature = request.temperature;
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
if (request.stopSequences && request.stopSequences.length > 0) {
|
|
305
|
+
params.stop_sequences = request.stopSequences;
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
if (request.tools && request.tools.length > 0) {
|
|
309
|
+
params.tools = request.tools as Anthropic.Tool[];
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
// Handle extended thinking
|
|
313
|
+
if ((request as any).thinking) {
|
|
314
|
+
(params as any).thinking = (request as any).thinking;
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
// Apply extra params (excluding normalizedMessages)
|
|
318
|
+
if (request.extra) {
|
|
319
|
+
const { normalizedMessages: _, ...rest } = request.extra;
|
|
320
|
+
Object.assign(params, rest);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
return params;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
private parseResponse(response: Anthropic.Message, rawRequest: unknown): ProviderResponse {
|
|
327
|
+
return {
|
|
328
|
+
content: fromAnthropicContent(response.content),
|
|
329
|
+
stopReason: response.stop_reason ?? 'end_turn',
|
|
330
|
+
stopSequence: response.stop_sequence ?? undefined,
|
|
331
|
+
usage: {
|
|
332
|
+
inputTokens: response.usage.input_tokens,
|
|
333
|
+
outputTokens: response.usage.output_tokens,
|
|
334
|
+
cacheCreationTokens: (response.usage as any).cache_creation_input_tokens,
|
|
335
|
+
cacheReadTokens: (response.usage as any).cache_read_input_tokens,
|
|
336
|
+
},
|
|
337
|
+
model: response.model,
|
|
338
|
+
rawRequest,
|
|
339
|
+
raw: response,
|
|
340
|
+
};
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
private handleError(error: unknown, rawRequest?: unknown): MembraneError {
|
|
344
|
+
if (error instanceof Anthropic.APIError) {
|
|
345
|
+
const status = error.status;
|
|
346
|
+
const message = error.message;
|
|
347
|
+
|
|
348
|
+
if (status === 429) {
|
|
349
|
+
const retryAfter = this.parseRetryAfter(error);
|
|
350
|
+
return rateLimitError(message, retryAfter, error, rawRequest);
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
if (status === 401) {
|
|
354
|
+
return authError(message, error, rawRequest);
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
if (message.includes('context') || message.includes('too long')) {
|
|
358
|
+
return contextLengthError(message, error, rawRequest);
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
if (status >= 500) {
|
|
362
|
+
return serverError(message, status, error, rawRequest);
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
367
|
+
return abortError(undefined, rawRequest);
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
return new MembraneError({
|
|
371
|
+
type: 'unknown',
|
|
372
|
+
message: error instanceof Error ? error.message : String(error),
|
|
373
|
+
retryable: false,
|
|
374
|
+
rawError: error,
|
|
375
|
+
rawRequest,
|
|
376
|
+
});
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
private parseRetryAfter(error: { message: string }): number | undefined {
|
|
380
|
+
const message = error.message;
|
|
381
|
+
const match = message.match(/retry after (\d+)/i);
|
|
382
|
+
if (match && match[1]) {
|
|
383
|
+
return parseInt(match[1], 10) * 1000;
|
|
384
|
+
}
|
|
385
|
+
return undefined;
|
|
386
|
+
}
|
|
387
|
+
}
|
|
@@ -62,6 +62,7 @@ export class AnthropicAdapter implements ProviderAdapter {
|
|
|
62
62
|
): Promise<ProviderResponse> {
|
|
63
63
|
const anthropicRequest = this.buildRequest(request);
|
|
64
64
|
const fullRequest = { ...anthropicRequest, stream: false as const };
|
|
65
|
+
options?.onRequest?.(fullRequest);
|
|
65
66
|
|
|
66
67
|
try {
|
|
67
68
|
const response = await this.client.messages.create(fullRequest, {
|
|
@@ -82,6 +83,7 @@ export class AnthropicAdapter implements ProviderAdapter {
|
|
|
82
83
|
const anthropicRequest = this.buildRequest(request);
|
|
83
84
|
// Note: stream is implicitly true when using .stream()
|
|
84
85
|
const fullRequest = { ...anthropicRequest, stream: true };
|
|
86
|
+
options?.onRequest?.(fullRequest);
|
|
85
87
|
|
|
86
88
|
try {
|
|
87
89
|
const stream = await this.client.messages.stream(anthropicRequest, {
|
package/src/providers/index.ts
CHANGED
|
@@ -9,6 +9,16 @@ export {
|
|
|
9
9
|
type AnthropicAdapterConfig,
|
|
10
10
|
} from './anthropic.js';
|
|
11
11
|
|
|
12
|
+
export {
|
|
13
|
+
AnthropicChatAdapter,
|
|
14
|
+
type AnthropicChatAdapterConfig,
|
|
15
|
+
} from './anthropic-chat.js';
|
|
16
|
+
|
|
17
|
+
export {
|
|
18
|
+
AnthropicMultiuserAdapter,
|
|
19
|
+
type AnthropicMultiuserAdapterConfig,
|
|
20
|
+
} from './anthropic-multiuser.js';
|
|
21
|
+
|
|
12
22
|
export {
|
|
13
23
|
OpenRouterAdapter,
|
|
14
24
|
toOpenRouterMessages,
|
|
@@ -130,6 +130,7 @@ export class OpenAICompatibleAdapter implements ProviderAdapter {
|
|
|
130
130
|
options?: ProviderRequestOptions
|
|
131
131
|
): Promise<ProviderResponse> {
|
|
132
132
|
const openAIRequest = this.buildRequest(request);
|
|
133
|
+
options?.onRequest?.(openAIRequest);
|
|
133
134
|
|
|
134
135
|
try {
|
|
135
136
|
const response = await this.makeRequest(openAIRequest, options);
|
|
@@ -146,6 +147,7 @@ export class OpenAICompatibleAdapter implements ProviderAdapter {
|
|
|
146
147
|
): Promise<ProviderResponse> {
|
|
147
148
|
const openAIRequest = this.buildRequest(request);
|
|
148
149
|
openAIRequest.stream = true;
|
|
150
|
+
options?.onRequest?.(openAIRequest);
|
|
149
151
|
|
|
150
152
|
try {
|
|
151
153
|
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
|
@@ -147,6 +147,7 @@ export class OpenAICompletionsAdapter implements ProviderAdapter {
|
|
|
147
147
|
options?: ProviderRequestOptions
|
|
148
148
|
): Promise<ProviderResponse> {
|
|
149
149
|
const completionsRequest = this.buildRequest(request);
|
|
150
|
+
options?.onRequest?.(completionsRequest);
|
|
150
151
|
|
|
151
152
|
try {
|
|
152
153
|
const response = await this.makeRequest(completionsRequest, options);
|
|
@@ -163,6 +164,7 @@ export class OpenAICompletionsAdapter implements ProviderAdapter {
|
|
|
163
164
|
): Promise<ProviderResponse> {
|
|
164
165
|
const completionsRequest = this.buildRequest(request);
|
|
165
166
|
completionsRequest.stream = true;
|
|
167
|
+
options?.onRequest?.(completionsRequest);
|
|
166
168
|
|
|
167
169
|
try {
|
|
168
170
|
const response = await fetch(`${this.baseURL}/completions`, {
|
package/src/providers/openai.ts
CHANGED
|
@@ -203,6 +203,7 @@ export class OpenAIAdapter implements ProviderAdapter {
|
|
|
203
203
|
options?: ProviderRequestOptions
|
|
204
204
|
): Promise<ProviderResponse> {
|
|
205
205
|
const openAIRequest = this.buildRequest(request);
|
|
206
|
+
options?.onRequest?.(openAIRequest);
|
|
206
207
|
|
|
207
208
|
try {
|
|
208
209
|
const response = await this.makeRequest(openAIRequest, options);
|
|
@@ -221,6 +222,7 @@ export class OpenAIAdapter implements ProviderAdapter {
|
|
|
221
222
|
openAIRequest.stream = true;
|
|
222
223
|
// Request usage data in stream for cache metrics
|
|
223
224
|
openAIRequest.stream_options = { include_usage: true };
|
|
225
|
+
options?.onRequest?.(openAIRequest);
|
|
224
226
|
|
|
225
227
|
try {
|
|
226
228
|
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
|
@@ -137,6 +137,7 @@ export class OpenRouterAdapter implements ProviderAdapter {
|
|
|
137
137
|
options?: ProviderRequestOptions
|
|
138
138
|
): Promise<ProviderResponse> {
|
|
139
139
|
const openRouterRequest = this.buildRequest(request);
|
|
140
|
+
options?.onRequest?.(openRouterRequest);
|
|
140
141
|
|
|
141
142
|
try {
|
|
142
143
|
const response = await this.makeRequest(openRouterRequest, options);
|
|
@@ -155,6 +156,7 @@ export class OpenRouterAdapter implements ProviderAdapter {
|
|
|
155
156
|
openRouterRequest.stream = true;
|
|
156
157
|
// Request usage data in stream for cache metrics
|
|
157
158
|
openRouterRequest.stream_options = { include_usage: true };
|
|
159
|
+
options?.onRequest?.(openRouterRequest);
|
|
158
160
|
|
|
159
161
|
try {
|
|
160
162
|
const response = await fetch(`${this.baseURL}/chat/completions`, {
|
package/src/types/provider.ts
CHANGED
|
@@ -216,6 +216,8 @@ export interface ProviderRequest {
|
|
|
216
216
|
export interface ProviderRequestOptions {
|
|
217
217
|
signal?: AbortSignal;
|
|
218
218
|
timeoutMs?: number;
|
|
219
|
+
/** Called with the raw API request body right before fetch */
|
|
220
|
+
onRequest?: (rawRequest: unknown) => void;
|
|
219
221
|
}
|
|
220
222
|
|
|
221
223
|
export interface ProviderResponse {
|