@windrun-huaiin/backend-core 15.1.0 → 17.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/LICENSE +1 -1
  2. package/dist/index.d.ts +1 -0
  3. package/dist/index.d.ts.map +1 -1
  4. package/dist/index.js +44 -0
  5. package/dist/index.mjs +8 -1
  6. package/dist/lib/index.js +19 -0
  7. package/dist/lib/index.mjs +1 -1
  8. package/dist/lib/upstash/qstash.d.ts +20 -7
  9. package/dist/lib/upstash/qstash.d.ts.map +1 -1
  10. package/dist/lib/upstash/qstash.js +33 -7
  11. package/dist/lib/upstash/qstash.mjs +33 -7
  12. package/dist/lib/upstash/redis-structures.d.ts +83 -0
  13. package/dist/lib/upstash/redis-structures.d.ts.map +1 -1
  14. package/dist/lib/upstash/redis-structures.js +220 -0
  15. package/dist/lib/upstash/redis-structures.mjs +202 -1
  16. package/dist/lib/upstash-config.d.ts.map +1 -1
  17. package/dist/lib/upstash-config.js +76 -4
  18. package/dist/lib/upstash-config.mjs +76 -4
  19. package/dist/services/ai/abort.d.ts +2 -0
  20. package/dist/services/ai/abort.d.ts.map +1 -0
  21. package/dist/services/ai/abort.js +24 -0
  22. package/dist/services/ai/abort.mjs +22 -0
  23. package/dist/services/ai/env.d.ts +21 -0
  24. package/dist/services/ai/env.d.ts.map +1 -0
  25. package/dist/services/ai/env.js +85 -0
  26. package/dist/services/ai/env.mjs +80 -0
  27. package/dist/services/ai/error.d.ts +3 -0
  28. package/dist/services/ai/error.d.ts.map +1 -0
  29. package/dist/services/ai/error.js +54 -0
  30. package/dist/services/ai/error.mjs +52 -0
  31. package/dist/services/ai/index.d.ts +9 -0
  32. package/dist/services/ai/index.d.ts.map +1 -0
  33. package/dist/services/ai/index.js +30 -0
  34. package/dist/services/ai/index.mjs +7 -0
  35. package/dist/services/ai/message-builder.d.ts +4 -0
  36. package/dist/services/ai/message-builder.d.ts.map +1 -0
  37. package/dist/services/ai/message-builder.js +15 -0
  38. package/dist/services/ai/message-builder.mjs +13 -0
  39. package/dist/services/ai/mock.d.ts +30 -0
  40. package/dist/services/ai/mock.d.ts.map +1 -0
  41. package/dist/services/ai/mock.js +314 -0
  42. package/dist/services/ai/mock.mjs +308 -0
  43. package/dist/services/ai/openrouter-client.d.ts +12 -0
  44. package/dist/services/ai/openrouter-client.d.ts.map +1 -0
  45. package/dist/services/ai/openrouter-client.js +81 -0
  46. package/dist/services/ai/openrouter-client.mjs +78 -0
  47. package/dist/services/ai/route.d.ts +6 -0
  48. package/dist/services/ai/route.d.ts.map +1 -0
  49. package/dist/services/ai/route.js +178 -0
  50. package/dist/services/ai/route.mjs +173 -0
  51. package/dist/services/ai/types.d.ts +98 -0
  52. package/dist/services/ai/types.d.ts.map +1 -0
  53. package/package.json +11 -4
  54. package/src/index.ts +1 -0
  55. package/src/lib/upstash/qstash.ts +55 -15
  56. package/src/lib/upstash/redis-structures.ts +248 -0
  57. package/src/lib/upstash-config.ts +106 -4
  58. package/src/services/ai/abort.ts +26 -0
  59. package/src/services/ai/env.ts +120 -0
  60. package/src/services/ai/error.ts +64 -0
  61. package/src/services/ai/index.ts +8 -0
  62. package/src/services/ai/message-builder.ts +17 -0
  63. package/src/services/ai/mock.ts +378 -0
  64. package/src/services/ai/openrouter-client.ts +94 -0
  65. package/src/services/ai/route.ts +218 -0
  66. package/src/services/ai/types.ts +131 -0
@@ -0,0 +1,120 @@
1
+ import type { AIMockHandler, AIRuntimeContext, OpenRouterClientConfig } from './types';
2
+ import { createScenarioMockHandler } from './mock';
3
+
4
+ function parseNumber(value: string | undefined, fallback: number) {
5
+ const parsed = Number(value);
6
+ return Number.isFinite(parsed) && parsed > 0 ? parsed : fallback;
7
+ }
8
+
9
+ function parseBoolean(value: string | undefined, fallback: boolean) {
10
+ if (value === undefined) {
11
+ return fallback;
12
+ }
13
+
14
+ return value === '1' || value === 'true' || value === 'TRUE';
15
+ }
16
+
17
+ export type OpenRouterEnvConfig = {
18
+ appName: string;
19
+ timeoutMs: number;
20
+ apiKey: string;
21
+ modelName: string;
22
+ enableMock: boolean;
23
+ mockType: number;
24
+ mockTimeoutSeconds: number;
25
+ mockStreamChunkDelayMs: number;
26
+ mockStreamChunkSize: number;
27
+ contextWindowTurns: number;
28
+ debug: boolean;
29
+ baseUrl?: string;
30
+ referer?: string;
31
+ };
32
+
33
+ type RequestMockOverride = {
34
+ enabled?: boolean;
35
+ type?: number;
36
+ timeoutSeconds?: number;
37
+ chunkDelayMs?: number;
38
+ chunkSize?: number;
39
+ };
40
+
41
+ export function getOpenRouterEnvConfig(): OpenRouterEnvConfig {
42
+ return {
43
+ appName: process.env.NEXT_PUBLIC_APP_NAME || 'DDaaS',
44
+ timeoutMs: parseNumber(process.env.OPENROUTER_TIMEOUT_SECONDS, 240) * 1000,
45
+ apiKey: process.env.OPENROUTER_API_KEY || '',
46
+ modelName:
47
+ process.env.NEXT_PUBLIC_OPENROUTER_MODEL_NAME || 'google/gemini-2.0-flash-001',
48
+ enableMock: parseBoolean(process.env.OPENROUTER_ENABLE_MOCK, true),
49
+ mockType: parseNumber(process.env.OPENROUTER_MOCK_TYPE, 0),
50
+ mockTimeoutSeconds: parseNumber(process.env.OPENROUTER_MOCK_TIMEOUT_SECONDS, 3),
51
+ mockStreamChunkDelayMs: parseNumber(process.env.OPENROUTER_MOCK_STREAM_CHUNK_DELAY_MS, 60),
52
+ mockStreamChunkSize: parseNumber(process.env.OPENROUTER_MOCK_STREAM_CHUNK_SIZE, 8),
53
+ contextWindowTurns: parseNumber(process.env.NEXT_PUBLIC_CHAT_CONTEXT_WINDOW_TURNS, 6),
54
+ debug: parseBoolean(process.env.NEXT_PUBLIC_OPENROUTER_DEBUG, false),
55
+ baseUrl: process.env.OPENROUTER_BASE_URL,
56
+ referer: process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000',
57
+ };
58
+ }
59
+
60
+ export function createOpenRouterClientConfigFromEnv(
61
+ overrides?: Partial<OpenRouterClientConfig>,
62
+ ): OpenRouterClientConfig {
63
+ const envConfig = getOpenRouterEnvConfig();
64
+
65
+ return {
66
+ apiKey: overrides?.apiKey ?? envConfig.apiKey,
67
+ baseUrl: overrides?.baseUrl ?? envConfig.baseUrl,
68
+ defaultModel: overrides?.defaultModel ?? envConfig.modelName,
69
+ referer: overrides?.referer ?? envConfig.referer,
70
+ title: overrides?.title ?? envConfig.appName,
71
+ timeoutMs: overrides?.timeoutMs ?? envConfig.timeoutMs,
72
+ provider: overrides?.provider,
73
+ temperature: overrides?.temperature,
74
+ maxTokens: overrides?.maxTokens,
75
+ fetchImpl: overrides?.fetchImpl,
76
+ };
77
+ }
78
+
79
+ export function createOpenRouterMockFromEnv(): AIMockHandler | undefined {
80
+ return createOpenRouterMockFromEnvForContext();
81
+ }
82
+
83
+ function getRequestMockOverride(context?: AIRuntimeContext): RequestMockOverride | null {
84
+ const value = context?.input.metadata?.mock;
85
+ if (!value || typeof value !== 'object') {
86
+ return null;
87
+ }
88
+
89
+ return value as RequestMockOverride;
90
+ }
91
+
92
+ export function createOpenRouterMockFromEnvForContext(
93
+ context?: AIRuntimeContext,
94
+ ): AIMockHandler | undefined {
95
+ const envConfig = getOpenRouterEnvConfig();
96
+
97
+ if (!envConfig.enableMock) {
98
+ return undefined;
99
+ }
100
+
101
+ const requestOverride = getRequestMockOverride(context);
102
+ if (requestOverride?.enabled === false) {
103
+ return undefined;
104
+ }
105
+
106
+ const mockType = requestOverride?.type ?? envConfig.mockType;
107
+ const mockTimeoutSeconds = requestOverride?.timeoutSeconds ?? envConfig.mockTimeoutSeconds;
108
+ const mockStreamChunkDelayMs =
109
+ requestOverride?.chunkDelayMs ?? envConfig.mockStreamChunkDelayMs;
110
+ const mockStreamChunkSize = requestOverride?.chunkSize ?? envConfig.mockStreamChunkSize;
111
+
112
+ return createScenarioMockHandler({
113
+ text:
114
+ 'This is a mock AI response from the shared backend-core runtime. Configure OPENROUTER_API_KEY and disable OPENROUTER_ENABLE_MOCK to use the real upstream model.',
115
+ mockType,
116
+ mockTimeoutSeconds,
117
+ mockStreamChunkDelayMs,
118
+ mockStreamChunkSize,
119
+ });
120
+ }
@@ -0,0 +1,64 @@
1
+ import {
2
+ createAIErrorPayload,
3
+ type AIErrorPayload,
4
+ } from '@windrun-huaiin/contracts/ai';
5
+
6
+ function isObject(value: unknown): value is Record<string, unknown> {
7
+ return typeof value === 'object' && value !== null;
8
+ }
9
+
10
+ function getProviderErrorMessage(data: unknown) {
11
+ if (!isObject(data)) {
12
+ return null;
13
+ }
14
+
15
+ const error = data.error;
16
+ if (isObject(error) && typeof error.message === 'string') {
17
+ return error.message;
18
+ }
19
+
20
+ if (typeof data.message === 'string') {
21
+ return data.message;
22
+ }
23
+
24
+ return null;
25
+ }
26
+
27
+ function isAbortError(error: unknown) {
28
+ return error instanceof DOMException && error.name === 'AbortError';
29
+ }
30
+
31
+ export function normalizeAIError(error: unknown): AIErrorPayload {
32
+ if (isObject(error) && typeof error.status === 'number') {
33
+ const message =
34
+ getProviderErrorMessage(error) ??
35
+ (typeof error.message === 'string' ? error.message : 'Error communicating with AI');
36
+
37
+ return createAIErrorPayload({
38
+ message,
39
+ upstreamStatusCode: error.status,
40
+ });
41
+ }
42
+
43
+ if (error instanceof Response) {
44
+ return createAIErrorPayload({
45
+ message: error.statusText || 'Error communicating with AI',
46
+ upstreamStatusCode: error.status || 500,
47
+ });
48
+ }
49
+
50
+ if (isAbortError(error)) {
51
+ return {
52
+ error: 'Request timed out',
53
+ status: 'timeout',
54
+ upstreamStatusCode: 408,
55
+ };
56
+ }
57
+
58
+ return {
59
+ error: error instanceof Error ? error.message : 'Error communicating with AI',
60
+ status: 'failed',
61
+ failureReason: 'unknown',
62
+ upstreamStatusCode: 500,
63
+ };
64
+ }
@@ -0,0 +1,8 @@
1
+ export * from './types';
2
+ export * from './abort';
3
+ export * from './error';
4
+ export * from './env';
5
+ export * from './message-builder';
6
+ export * from './mock';
7
+ export * from './openrouter-client';
8
+ export * from './route';
@@ -0,0 +1,17 @@
1
+ import {
2
+ getMessageText,
3
+ type AIRuntimeRequest,
4
+ } from '@windrun-huaiin/contracts/ai';
5
+ import type { OpenRouterRequestBody } from './types';
6
+
7
+ export function buildModelMessages(
8
+ messages: AIRuntimeRequest['messages'],
9
+ ): OpenRouterRequestBody['messages'] {
10
+ return messages
11
+ .filter((message) => message.status !== 'failed')
12
+ .map((message) => ({
13
+ role: message.role,
14
+ content: getMessageText(message),
15
+ }))
16
+ .filter((message) => message.content.trim().length > 0);
17
+ }
@@ -0,0 +1,378 @@
1
+ import {
2
+ createAIErrorPayload,
3
+ type AIStreamEvent,
4
+ } from '@windrun-huaiin/contracts/ai';
5
+ import type { AIMockHandler, AIRuntimeContext } from './types';
6
+
7
+ const streamingHeaders = {
8
+ 'Content-Type': 'text/event-stream; charset=utf-8',
9
+ 'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate, no-transform',
10
+ Connection: 'keep-alive',
11
+ Pragma: 'no-cache',
12
+ 'X-Accel-Buffering': 'no',
13
+ } as const;
14
+
15
+ function encodeEvent(event: AIStreamEvent) {
16
+ return `data: ${JSON.stringify(event)}\n\n`;
17
+ }
18
+
19
+ function createStreamResponse(events: AIStreamEvent[]) {
20
+ const encoder = new TextEncoder();
21
+ const stream = new ReadableStream<Uint8Array>({
22
+ start(controller) {
23
+ for (const event of events) {
24
+ controller.enqueue(encoder.encode(encodeEvent(event)));
25
+ }
26
+ controller.close();
27
+ },
28
+ });
29
+
30
+ return new Response(stream, {
31
+ headers: streamingHeaders,
32
+ });
33
+ }
34
+
35
+ export function createSimpleMockHandler(text: string): AIMockHandler {
36
+ return (context: AIRuntimeContext) => {
37
+ const messageId = `mock-${context.requestId}`;
38
+ return createStreamResponse([
39
+ {
40
+ type: 'message_started',
41
+ messageId,
42
+ createdAt: Date.now(),
43
+ },
44
+ {
45
+ type: 'text_delta',
46
+ messageId,
47
+ text,
48
+ },
49
+ {
50
+ type: 'message_completed',
51
+ messageId,
52
+ createdAt: Date.now(),
53
+ },
54
+ ]);
55
+ };
56
+ }
57
+
58
+ export function createErrorMockResponse(statusCode: number, message: string) {
59
+ return Response.json(
60
+ createAIErrorPayload({
61
+ message,
62
+ upstreamStatusCode: statusCode,
63
+ }),
64
+ { status: statusCode },
65
+ );
66
+ }
67
+
68
+ type MockFailureType = 'timeout' | 'request_aborted' | 'stream_error';
69
+
70
+ type MockScenario = {
71
+ mode?: 'text_stream' | 'event_sequence';
72
+ initialDelayMs?: number;
73
+ streamFailureType?: MockFailureType;
74
+ streamFailureAfterChunks?: number;
75
+ immediateErrorType?: MockFailureType;
76
+ };
77
+
78
+ export type ConfigurableMockOptions = {
79
+ text: string;
80
+ initialDelayMs?: number;
81
+ chunkDelayMs?: number;
82
+ chunkSize?: number;
83
+ streamFailureType?: MockFailureType;
84
+ streamFailureAfterChunks?: number;
85
+ };
86
+
87
+ export function getMockScenario(mockType: number, mockTimeoutMs: number): MockScenario {
88
+ switch (mockType) {
89
+ case 1:
90
+ return {
91
+ mode: 'text_stream',
92
+ initialDelayMs: mockTimeoutMs,
93
+ };
94
+ case 2:
95
+ return {
96
+ mode: 'text_stream',
97
+ immediateErrorType: 'timeout',
98
+ };
99
+ case 3:
100
+ return {
101
+ mode: 'text_stream',
102
+ streamFailureType: 'timeout',
103
+ streamFailureAfterChunks: 3,
104
+ };
105
+ case 4:
106
+ return {
107
+ mode: 'text_stream',
108
+ streamFailureType: 'request_aborted',
109
+ streamFailureAfterChunks: 3,
110
+ };
111
+ case 5:
112
+ return {
113
+ mode: 'text_stream',
114
+ streamFailureType: 'stream_error',
115
+ streamFailureAfterChunks: 3,
116
+ };
117
+ case 6:
118
+ return {
119
+ mode: 'event_sequence',
120
+ };
121
+ case 7:
122
+ return {
123
+ mode: 'event_sequence',
124
+ };
125
+ default:
126
+ return {
127
+ mode: 'text_stream',
128
+ };
129
+ }
130
+ }
131
+
132
+ async function sleep(delayInMs: number) {
133
+ await new Promise((resolve) => setTimeout(resolve, delayInMs));
134
+ }
135
+
136
+ function createMockErrorPayload(failureType: MockFailureType) {
137
+ if (failureType === 'timeout') {
138
+ return createAIErrorPayload({
139
+ message: 'Request timed out',
140
+ upstreamStatusCode: 408,
141
+ });
142
+ }
143
+
144
+ if (failureType === 'request_aborted') {
145
+ return createAIErrorPayload({
146
+ message: 'Request aborted',
147
+ upstreamStatusCode: 499,
148
+ });
149
+ }
150
+
151
+ return createAIErrorPayload({
152
+ message: 'Error communicating with AI',
153
+ upstreamStatusCode: 502,
154
+ failureReason: 'stream_error',
155
+ });
156
+ }
157
+
158
+ function createMockFailureResponse(failureType: MockFailureType) {
159
+ const payload = createMockErrorPayload(failureType);
160
+ return Response.json(payload, { status: payload.upstreamStatusCode ?? 500 });
161
+ }
162
+
163
+ function chunkTextByWords(text: string, chunkSize: number) {
164
+ const wordChunks = text.match(/\S+\s*/g) ?? [text];
165
+ const normalizedChunkSize = Math.max(1, chunkSize);
166
+ const chunks: string[] = [];
167
+
168
+ for (let index = 0; index < wordChunks.length; index += normalizedChunkSize) {
169
+ chunks.push(wordChunks.slice(index, index + normalizedChunkSize).join(''));
170
+ }
171
+
172
+ return chunks;
173
+ }
174
+
175
+ export function createConfigurableMockHandler(options: ConfigurableMockOptions): AIMockHandler {
176
+ return async (context: AIRuntimeContext) => {
177
+ if ((options.initialDelayMs ?? 0) > 0) {
178
+ await sleep(options.initialDelayMs!);
179
+ }
180
+
181
+ const messageId = `mock-${context.requestId}`;
182
+ const chunks = chunkTextByWords(options.text, options.chunkSize ?? 4);
183
+ const chunkDelayMs = Math.max(0, options.chunkDelayMs ?? 0);
184
+ const failureAfterChunks = options.streamFailureAfterChunks ?? 0;
185
+ const encoder = new TextEncoder();
186
+
187
+ const stream = new ReadableStream<Uint8Array>({
188
+ async start(controller) {
189
+ controller.enqueue(
190
+ encoder.encode(
191
+ encodeEvent({
192
+ type: 'message_started',
193
+ messageId,
194
+ createdAt: Date.now(),
195
+ }),
196
+ ),
197
+ );
198
+
199
+ for (let index = 0; index < chunks.length; index += 1) {
200
+ if (
201
+ options.streamFailureType &&
202
+ failureAfterChunks > 0 &&
203
+ index >= failureAfterChunks
204
+ ) {
205
+ controller.enqueue(
206
+ encoder.encode(
207
+ encodeEvent({
208
+ type: 'error',
209
+ error: createMockErrorPayload(options.streamFailureType),
210
+ }),
211
+ ),
212
+ );
213
+ controller.close();
214
+ return;
215
+ }
216
+
217
+ controller.enqueue(
218
+ encoder.encode(
219
+ encodeEvent({
220
+ type: 'text_delta',
221
+ messageId,
222
+ text: chunks[index],
223
+ }),
224
+ ),
225
+ );
226
+
227
+ if (chunkDelayMs > 0) {
228
+ await sleep(chunkDelayMs);
229
+ }
230
+ }
231
+
232
+ controller.enqueue(
233
+ encoder.encode(
234
+ encodeEvent({
235
+ type: 'message_completed',
236
+ messageId,
237
+ createdAt: Date.now(),
238
+ }),
239
+ ),
240
+ );
241
+ controller.close();
242
+ },
243
+ });
244
+
245
+ return new Response(stream, {
246
+ headers: streamingHeaders,
247
+ });
248
+ };
249
+ }
250
+
251
+ function createEventSequenceMockHandler(events: AIStreamEvent[]): AIMockHandler {
252
+ return async () => createStreamResponse(events);
253
+ }
254
+
255
+ function createMarkdownShowcaseEvents(messageId: string): AIStreamEvent[] {
256
+ return [
257
+ {
258
+ type: 'message_started',
259
+ messageId,
260
+ createdAt: Date.now(),
261
+ },
262
+ {
263
+ type: 'text_delta',
264
+ messageId,
265
+ text: [
266
+ '# Markdown Showcase',
267
+ '',
268
+ 'This scenario verifies headings, lists, quotes, tables, code, and image rendering in the chat message body.',
269
+ '',
270
+ '## Bullet List',
271
+ '',
272
+ '- Bullet list item one',
273
+ '- Bullet list item two',
274
+ '',
275
+ '## Quote',
276
+ '',
277
+ '> Blockquote content for layout verification.',
278
+ '',
279
+ '## Table',
280
+ '',
281
+ '| Column | Value |',
282
+ '| --- | --- |',
283
+ '| Status | OK |',
284
+ '| Mode | Markdown |',
285
+ '',
286
+ '## Code',
287
+ '',
288
+ '```ts',
289
+ "const mode = 'markdown-showcase';",
290
+ 'console.log(mode);',
291
+ '```',
292
+ '',
293
+ '## Image',
294
+ '',
295
+ '![Mock Image](https://r2.d8ger.com/default.webp)',
296
+ ].join('\n'),
297
+ },
298
+ {
299
+ type: 'message_completed',
300
+ messageId,
301
+ createdAt: Date.now(),
302
+ },
303
+ ];
304
+ }
305
+
306
+ function createTrophyCardShowcaseEvents(messageId: string): AIStreamEvent[] {
307
+ return [
308
+ {
309
+ type: 'message_started',
310
+ messageId,
311
+ createdAt: Date.now(),
312
+ },
313
+ {
314
+ type: 'text_delta',
315
+ messageId,
316
+ text: [
317
+ '# Trophy Card Showcase',
318
+ '',
319
+ 'This scenario verifies a structured chat part rendered between normal markdown blocks.',
320
+ '',
321
+ 'The card below is emitted as a dedicated `trophy_card` part, not as Markdown component syntax.',
322
+ ].join('\n'),
323
+ },
324
+ {
325
+ type: 'part',
326
+ messageId,
327
+ part: {
328
+ type: 'trophy_card',
329
+ title: 'Structured Trophy Card',
330
+ description: 'This is rendered from `MessagePart`, which is the recommended path for chat-specific rich blocks.\n\n- Reusable shared React component\n- Chat-specific structured part\n- Ready to extend to file, audio, and video cards',
331
+ },
332
+ },
333
+ {
334
+ type: 'text_delta',
335
+ messageId,
336
+ text: '\n\nUse this as the reference pattern for future file, audio, video, or tool result parts.',
337
+ },
338
+ {
339
+ type: 'message_completed',
340
+ messageId,
341
+ createdAt: Date.now(),
342
+ },
343
+ ];
344
+ }
345
+
346
+ export function createScenarioMockHandler(params: {
347
+ text: string;
348
+ mockType: number;
349
+ mockTimeoutSeconds: number;
350
+ mockStreamChunkDelayMs: number;
351
+ mockStreamChunkSize: number;
352
+ }): AIMockHandler {
353
+ const scenario = getMockScenario(params.mockType, params.mockTimeoutSeconds * 1000);
354
+ const messageId = `mock-scenario-${params.mockType}`;
355
+
356
+ if (scenario.immediateErrorType) {
357
+ return async () => createMockFailureResponse(scenario.immediateErrorType!);
358
+ }
359
+
360
+ if (scenario.mode === 'event_sequence') {
361
+ if (params.mockType === 6) {
362
+ return createEventSequenceMockHandler(createMarkdownShowcaseEvents(messageId));
363
+ }
364
+
365
+ if (params.mockType === 7) {
366
+ return createEventSequenceMockHandler(createTrophyCardShowcaseEvents(messageId));
367
+ }
368
+ }
369
+
370
+ return createConfigurableMockHandler({
371
+ text: params.text,
372
+ initialDelayMs: scenario.initialDelayMs,
373
+ chunkDelayMs: params.mockStreamChunkDelayMs,
374
+ chunkSize: params.mockStreamChunkSize,
375
+ streamFailureType: scenario.streamFailureType,
376
+ streamFailureAfterChunks: scenario.streamFailureAfterChunks,
377
+ });
378
+ }
@@ -0,0 +1,94 @@
1
+ import { normalizeAIError } from './error';
2
+ import type {
3
+ OpenRouterClientConfig,
4
+ OpenRouterRequestBody,
5
+ OpenRouterStreamResult,
6
+ } from './types';
7
+
8
+ const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
9
+
10
+ export async function callOpenRouterStream(
11
+ config: OpenRouterClientConfig,
12
+ body: OpenRouterRequestBody,
13
+ signal: AbortSignal,
14
+ ): Promise<OpenRouterStreamResult> {
15
+ const fetchImpl = config.fetchImpl ?? fetch;
16
+ const response = await fetchImpl(`${config.baseUrl ?? OPENROUTER_BASE_URL}/chat/completions`, {
17
+ method: 'POST',
18
+ signal,
19
+ headers: {
20
+ Authorization: `Bearer ${config.apiKey}`,
21
+ 'Content-Type': 'application/json',
22
+ ...(config.referer ? { 'HTTP-Referer': config.referer } : {}),
23
+ ...(config.title ? { 'X-Title': config.title } : {}),
24
+ },
25
+ body: JSON.stringify(body),
26
+ });
27
+
28
+ if (!response.ok || !response.body) {
29
+ let errorData: unknown = null;
30
+ try {
31
+ errorData = await response.clone().json();
32
+ } catch {
33
+ errorData = { message: response.statusText };
34
+ }
35
+
36
+ throw {
37
+ status: response.status,
38
+ message: response.statusText,
39
+ ...((typeof errorData === 'object' && errorData !== null) ? errorData : {}),
40
+ };
41
+ }
42
+
43
+ return {
44
+ response,
45
+ status: response.status,
46
+ };
47
+ }
48
+
49
+ export async function guardedOpenRouterStreamStart(
50
+ response: Response,
51
+ ) {
52
+ const reader = response.body?.getReader();
53
+ if (!reader) {
54
+ return {
55
+ ok: false as const,
56
+ error: normalizeAIError(new Error('Empty upstream response body')),
57
+ };
58
+ }
59
+
60
+ const firstChunk = await reader.read();
61
+ if (firstChunk.done || !firstChunk.value) {
62
+ return {
63
+ ok: false as const,
64
+ error: {
65
+ error: 'AI returned an empty response',
66
+ status: 'failed' as const,
67
+ failureReason: 'empty_response' as const,
68
+ upstreamStatusCode: response.status,
69
+ },
70
+ };
71
+ }
72
+
73
+ const stream = new ReadableStream<Uint8Array>({
74
+ start(controller) {
75
+ controller.enqueue(firstChunk.value);
76
+ },
77
+ async pull(controller) {
78
+ const nextChunk = await reader.read();
79
+ if (nextChunk.done) {
80
+ controller.close();
81
+ return;
82
+ }
83
+ controller.enqueue(nextChunk.value);
84
+ },
85
+ cancel(reason) {
86
+ void reader.cancel(reason);
87
+ },
88
+ });
89
+
90
+ return {
91
+ ok: true as const,
92
+ stream,
93
+ };
94
+ }