@ifc-lite/viewer 1.17.4 → 1.17.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.turbo/turbo-build.log +16 -16
  2. package/.turbo/turbo-typecheck.log +1 -1
  3. package/CHANGELOG.md +117 -0
  4. package/DESKTOP_CONTRACT_VERSION +1 -1
  5. package/dist/assets/{basketViewActivator-BmnNtVfZ.js → basketViewActivator-86rgogji.js} +1 -1
  6. package/dist/assets/drawing-2d-DoxKMqbO.js +257 -0
  7. package/dist/assets/{exporters-ChAtBmlj.js → exporters-CcPS9MK5.js} +2274 -2227
  8. package/dist/assets/{geometry.worker-BQ0rzNo-.js → geometry.worker-BFUYA08u.js} +1 -1
  9. package/dist/assets/ids-DQ5jY0E8.js +1 -0
  10. package/dist/assets/ifc-lite_bg-BINvzoCP.wasm +0 -0
  11. package/dist/assets/{index-Co8E2-FE.js → index-Bfms9I4A.js} +35160 -33084
  12. package/dist/assets/index-_bfZsDCC.css +1 -0
  13. package/dist/assets/{native-bridge-BRvbckFQ.js → native-bridge-DUyLCMZS.js} +104 -104
  14. package/dist/assets/{sandbox-DZiNLNMk.js → sandbox-C8575tul.js} +4340 -4322
  15. package/dist/assets/{server-client-BV8zHZ7Y.js → server-client-BuZK7OST.js} +1 -1
  16. package/dist/assets/{wasm-bridge-g01g7T9b.js → wasm-bridge-JsqEGDV8.js} +1 -1
  17. package/dist/index.html +8 -7
  18. package/index.html +1 -0
  19. package/package.json +7 -7
  20. package/src/App.tsx +16 -2
  21. package/src/components/viewer/CesiumOverlay.tsx +62 -19
  22. package/src/components/viewer/ChatPanel.tsx +195 -91
  23. package/src/components/viewer/MainToolbar.tsx +4 -3
  24. package/src/components/viewer/PropertiesPanel.tsx +16 -2
  25. package/src/components/viewer/SettingsPage.tsx +252 -101
  26. package/src/components/viewer/ThemeSwitch.tsx +63 -7
  27. package/src/components/viewer/ViewerLayout.tsx +1 -0
  28. package/src/components/viewer/Viewport.tsx +14 -2
  29. package/src/components/viewer/ViewportContainer.tsx +49 -64
  30. package/src/components/viewer/ViewportOverlays.tsx +5 -2
  31. package/src/components/viewer/bcf/BCFTopicDetail.tsx +4 -4
  32. package/src/components/viewer/chat/ModelSelector.tsx +90 -54
  33. package/src/components/viewer/properties/GeoreferencingPanel.tsx +113 -51
  34. package/src/components/viewer/properties/LocationMap.tsx +9 -7
  35. package/src/components/viewer/properties/ModelMetadataPanel.tsx +1 -1
  36. package/src/components/viewer/tools/SectionCapControls.tsx +237 -0
  37. package/src/components/viewer/tools/SectionPanel.tsx +39 -18
  38. package/src/components/viewer/useAnimationLoop.ts +9 -1
  39. package/src/components/viewer/useRenderUpdates.ts +1 -1
  40. package/src/hooks/ids/idsDataAccessor.ts +60 -24
  41. package/src/hooks/ingest/viewerModelIngest.ts +7 -2
  42. package/src/hooks/useIfcFederation.ts +326 -71
  43. package/src/hooks/useIfcLoader.ts +1 -0
  44. package/src/hooks/useViewControls.ts +13 -5
  45. package/src/index.css +484 -10
  46. package/src/lib/desktop-entitlement.ts +2 -4
  47. package/src/lib/geo/cesium-bridge.ts +15 -7
  48. package/src/lib/geo/effective-georef.test.ts +73 -0
  49. package/src/lib/geo/effective-georef.ts +111 -0
  50. package/src/lib/geo/reproject.ts +105 -19
  51. package/src/lib/llm/byok-guard.test.ts +77 -0
  52. package/src/lib/llm/byok-guard.ts +39 -0
  53. package/src/lib/llm/free-models.test.ts +0 -6
  54. package/src/lib/llm/models.ts +104 -42
  55. package/src/lib/llm/stream-client.ts +74 -110
  56. package/src/lib/llm/stream-direct.test.ts +130 -0
  57. package/src/lib/llm/stream-direct.ts +316 -0
  58. package/src/lib/llm/types.ts +14 -2
  59. package/src/main.tsx +1 -10
  60. package/src/services/api-keys.ts +73 -0
  61. package/src/store/constants.ts +20 -2
  62. package/src/store/index.ts +12 -5
  63. package/src/store/slices/cesiumSlice.ts +5 -0
  64. package/src/store/slices/chatSlice.test.ts +6 -76
  65. package/src/store/slices/chatSlice.ts +17 -58
  66. package/src/store/slices/sectionSlice.test.ts +87 -7
  67. package/src/store/slices/sectionSlice.ts +151 -5
  68. package/src/store/slices/uiSlice.ts +28 -5
  69. package/src/store/types.ts +26 -0
  70. package/src/utils/nativeSpatialDataStore.ts +4 -1
  71. package/src/utils/viewportUtils.ts +7 -2
  72. package/src/vite-env.d.ts +0 -4
  73. package/dist/assets/drawing-2d-gWfpdfYe.js +0 -257
  74. package/dist/assets/ids-B4jTqB1O.js +0 -1
  75. package/dist/assets/ifc-lite_bg-BX4E7TX8.wasm +0 -0
  76. package/dist/assets/index-DckuDqlv.css +0 -1
  77. package/src/components/viewer/UpgradePage.tsx +0 -71
  78. package/src/lib/desktop/ClerkDesktopEntitlementSync.tsx +0 -175
  79. package/src/lib/llm/ClerkChatSync.tsx +0 -74
  80. package/src/lib/llm/clerk-auth.ts +0 -62
@@ -4,11 +4,10 @@
4
4
 
5
5
  /**
6
6
  * LLM model registry.
7
- * Model IDs are sourced from environment variables only.
8
- * Pro model cost metadata comes from cost-bucket env vars:
9
- * - *_PRO_MODELS_LOW => $
10
- * - *_PRO_MODELS_MEDIUM => $$
11
- * - *_PRO_MODELS_HIGH => $$$
7
+ *
8
+ * Free models: sourced from VITE_LLM_FREE_MODELS env var, served through the server proxy.
9
+ * BYOK models: statically defined Anthropic and OpenAI models, accessed directly from the
10
+ * browser using the user's own API key.
12
11
  */
13
12
 
14
13
  import type { LLMModel } from './types.js';
@@ -93,46 +92,25 @@ function humanizeModelSlug(slug: string): string {
93
92
  .join(' ');
94
93
  }
95
94
 
96
- function buildModel(id: string, tier: 'free' | 'pro', cost?: LLMModel['cost']): LLMModel {
95
+ function buildModel(id: string, tier: 'free' | 'byok', cost?: LLMModel['cost'], source?: LLMModel['source']): LLMModel {
97
96
  const [providerRaw, modelRaw = id] = id.split('/');
98
97
  return {
99
98
  id,
100
99
  tier,
100
+ source: source ?? 'proxy',
101
101
  name: humanizeModelSlug(modelRaw),
102
102
  provider: titleCaseProvider(providerRaw ?? 'Unknown'),
103
103
  contextWindow: 128_000,
104
104
  supportsImages: false,
105
105
  supportsFileAttachments: true,
106
- cost: tier === 'pro' ? cost : undefined,
106
+ cost: tier === 'byok' ? cost : undefined,
107
107
  };
108
108
  }
109
109
 
110
110
  const freeModelIds = uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_FREE_MODELS', 'LLM_FREE_MODELS']));
111
- const proLowCostIds = uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_PRO_MODELS_LOW', 'LLM_PRO_MODELS_LOW']));
112
- const proMediumCostIds = uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_PRO_MODELS_MEDIUM', 'LLM_PRO_MODELS_MEDIUM']));
113
- const proHighCostIds = uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_PRO_MODELS_HIGH', 'LLM_PRO_MODELS_HIGH']));
114
-
115
- // Backward-compatible fallback for older env shape with one pro list.
116
- const legacyProIds = uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_PRO_MODELS', 'LLM_PRO_MODELS']));
117
- const useLegacyProList = proLowCostIds.length === 0 && proMediumCostIds.length === 0 && proHighCostIds.length === 0;
118
111
 
119
112
  const rawFreeModels: LLMModel[] = freeModelIds.map((id) => buildModel(id, 'free'));
120
113
 
121
- const proCostBuckets: Array<{ ids: string[]; cost: LLMModel['cost'] }> = [
122
- { ids: proLowCostIds, cost: '$' },
123
- { ids: useLegacyProList ? legacyProIds : proMediumCostIds, cost: '$$' },
124
- { ids: proHighCostIds, cost: '$$$' },
125
- ];
126
-
127
- const seenProModelIds = new Set<string>();
128
- const rawProModels: LLMModel[] = proCostBuckets.flatMap(({ ids, cost }) =>
129
- ids.flatMap((id) => {
130
- if (seenProModelIds.has(id)) return [];
131
- seenProModelIds.add(id);
132
- return [buildModel(id, 'pro', cost)];
133
- }),
134
- );
135
-
136
114
  const imageCapableModelIds = new Set(
137
115
  uniqueInOrder(parseCsvFromFirstDefined(['VITE_LLM_IMAGE_MODELS', 'LLM_IMAGE_MODELS'])),
138
116
  );
@@ -155,43 +133,127 @@ function applyCapabilities(model: LLMModel): LLMModel {
155
133
  }
156
134
 
157
135
  export const FREE_MODELS: LLMModel[] = rawFreeModels.map(applyCapabilities);
158
- export const PRO_MODELS: LLMModel[] = rawProModels.map(applyCapabilities);
159
- export const ALL_MODELS = [...FREE_MODELS, ...PRO_MODELS];
136
+
137
+ // ── BYOK (Bring Your Own Key) models ───────────────────────────────────────
138
+ // Static list of well-known models users can access with their own API keys.
139
+ // Requests go directly from the browser to the provider (no server proxy).
140
+
141
+ const ANTHROPIC_BYOK_MODELS: LLMModel[] = [
142
+ {
143
+ id: 'claude-opus-4-6',
144
+ name: 'Claude Opus 4.6',
145
+ provider: 'Anthropic',
146
+ tier: 'byok',
147
+ source: 'anthropic',
148
+ contextWindow: 200_000,
149
+ supportsImages: true,
150
+ supportsFileAttachments: true,
151
+ cost: '$$$',
152
+ },
153
+ {
154
+ id: 'claude-sonnet-4-6',
155
+ name: 'Claude Sonnet 4.6',
156
+ provider: 'Anthropic',
157
+ tier: 'byok',
158
+ source: 'anthropic',
159
+ contextWindow: 200_000,
160
+ supportsImages: true,
161
+ supportsFileAttachments: true,
162
+ cost: '$$',
163
+ },
164
+ {
165
+ id: 'claude-haiku-4-5-20251001',
166
+ name: 'Claude Haiku 4.5',
167
+ provider: 'Anthropic',
168
+ tier: 'byok',
169
+ source: 'anthropic',
170
+ contextWindow: 200_000,
171
+ supportsImages: true,
172
+ supportsFileAttachments: true,
173
+ cost: '$',
174
+ },
175
+ ];
176
+
177
+ const OPENAI_BYOK_MODELS: LLMModel[] = [
178
+ {
179
+ id: 'gpt-5.4',
180
+ name: 'GPT-5.4',
181
+ provider: 'OpenAI',
182
+ tier: 'byok',
183
+ source: 'openai',
184
+ contextWindow: 128_000,
185
+ supportsImages: true,
186
+ supportsFileAttachments: true,
187
+ cost: '$$$',
188
+ },
189
+ {
190
+ id: 'gpt-5.3-codex',
191
+ name: 'GPT-5.3 Codex',
192
+ provider: 'OpenAI',
193
+ tier: 'byok',
194
+ source: 'openai',
195
+ contextWindow: 128_000,
196
+ supportsImages: false,
197
+ supportsFileAttachments: true,
198
+ cost: '$$',
199
+ openaiApi: 'responses',
200
+ },
201
+ {
202
+ id: 'gpt-5.4-mini-2026-03-17',
203
+ name: 'GPT-5.4 Mini',
204
+ provider: 'OpenAI',
205
+ tier: 'byok',
206
+ source: 'openai',
207
+ contextWindow: 128_000,
208
+ supportsImages: true,
209
+ supportsFileAttachments: true,
210
+ cost: '$',
211
+ },
212
+ ];
213
+
214
+ export const BYOK_MODELS: LLMModel[] = [...ANTHROPIC_BYOK_MODELS, ...OPENAI_BYOK_MODELS];
215
+ export const ALL_MODELS = [...FREE_MODELS, ...BYOK_MODELS];
160
216
 
161
217
  const FALLBACK_MODEL: LLMModel = {
162
218
  id: 'llm-model-missing',
163
219
  name: 'No model configured',
164
220
  provider: 'Unknown',
165
221
  tier: 'free',
222
+ source: 'proxy',
166
223
  contextWindow: 128_000,
167
224
  supportsImages: false,
168
225
  supportsFileAttachments: true,
169
- notes: 'Set VITE_LLM_FREE_MODELS and VITE_LLM_PRO_MODELS_LOW/MEDIUM/HIGH in environment.',
226
+ notes: 'Set VITE_LLM_FREE_MODELS in environment or add your own API key in Settings.',
170
227
  };
171
228
 
172
- export const DEFAULT_FREE_MODEL = FREE_MODELS[0] ?? PRO_MODELS[0] ?? FALLBACK_MODEL;
173
- export const DEFAULT_PRO_MODEL = PRO_MODELS[0] ?? DEFAULT_FREE_MODEL;
229
+ export const DEFAULT_FREE_MODEL = FREE_MODELS[0] ?? FALLBACK_MODEL;
230
+ export const DEFAULT_BYOK_MODEL = BYOK_MODELS[0] ?? DEFAULT_FREE_MODEL;
174
231
 
175
232
  export function getModelById(id: string): LLMModel | undefined {
176
233
  return ALL_MODELS.find((m) => m.id === id);
177
234
  }
178
235
 
179
- /** Check whether a model ID requires a pro subscription */
180
- export function requiresPro(modelId: string): boolean {
236
+ /** Check whether a model ID requires a user-provided API key (BYOK) */
237
+ export function requiresByokKey(modelId: string): boolean {
181
238
  const model = getModelById(modelId);
182
- return model?.tier === 'pro';
239
+ return model?.tier === 'byok';
240
+ }
241
+
242
+ /** Get BYOK models available for a given provider source */
243
+ export function getByokModelsForSource(source: 'anthropic' | 'openai'): LLMModel[] {
244
+ return BYOK_MODELS.filter((m) => m.source === source);
183
245
  }
184
246
 
185
- export function getDefaultModelForEntitlement(hasPro: boolean): LLMModel {
186
- return hasPro ? DEFAULT_PRO_MODEL : DEFAULT_FREE_MODEL;
247
+ export function getDefaultModelForEntitlement(hasByokKey: boolean): LLMModel {
248
+ return hasByokKey ? DEFAULT_BYOK_MODEL : DEFAULT_FREE_MODEL;
187
249
  }
188
250
 
189
- export function coerceModelForEntitlement(modelId: string | null | undefined, hasPro: boolean): string {
251
+ export function coerceModelForEntitlement(modelId: string | null | undefined, hasByokKey: boolean): string {
190
252
  if (modelId) {
191
253
  const model = getModelById(modelId);
192
- if (model && (!requiresPro(modelId) || hasPro)) {
254
+ if (model && (!requiresByokKey(modelId) || hasByokKey)) {
193
255
  return modelId;
194
256
  }
195
257
  }
196
- return getDefaultModelForEntitlement(hasPro).id;
258
+ return getDefaultModelForEntitlement(hasByokKey).id;
197
259
  }
@@ -53,8 +53,6 @@ export interface StreamOptions {
53
53
  messages: StreamMessage[];
54
54
  /** System prompt */
55
55
  system?: string;
56
- /** Auth JWT */
57
- authToken?: string | null;
58
56
  /** AbortSignal for cancellation */
59
57
  signal?: AbortSignal;
60
58
  /** Called for each text chunk as it arrives */
@@ -117,16 +115,59 @@ export function drainSseBuffer(buffer: string, flush: boolean = false): { events
117
115
  };
118
116
  }
119
117
 
118
+ /**
119
+ * Read an SSE stream, invoking onEvent for each `data:` payload.
120
+ * Skips `[DONE]` sentinels and malformed lines. Returns true if the stream
121
+ * completed normally; false on abort or error (errors are forwarded via
122
+ * onError, aborts are silent).
123
+ */
124
+ export async function readSseStream(
125
+ body: ReadableStream<Uint8Array>,
126
+ signal: AbortSignal | undefined,
127
+ onEvent: (data: string) => void,
128
+ onError: (err: Error) => void,
129
+ ): Promise<boolean> {
130
+ const reader = body.getReader();
131
+ const decoder = new TextDecoder();
132
+ let buffer = '';
133
+
134
+ const dispatchDrained = (events: string[]) => {
135
+ for (const evt of events) {
136
+ for (const line of evt.split('\n')) {
137
+ if (!line.startsWith('data: ')) continue;
138
+ const data = line.slice(6);
139
+ if (data === '[DONE]') continue;
140
+ try { onEvent(data); } catch { /* skip malformed */ }
141
+ }
142
+ }
143
+ };
144
+
145
+ try {
146
+ while (true) {
147
+ const { done, value } = await reader.read();
148
+ if (done) break;
149
+ buffer += decoder.decode(value, { stream: true });
150
+ const drained = drainSseBuffer(buffer);
151
+ buffer = drained.remainder;
152
+ dispatchDrained(drained.events);
153
+ }
154
+ buffer += decoder.decode();
155
+ dispatchDrained(drainSseBuffer(buffer, true).events);
156
+ return true;
157
+ } catch (err) {
158
+ if (signal?.aborted) return false;
159
+ onError(err instanceof Error ? err : new Error(String(err)));
160
+ return false;
161
+ }
162
+ }
163
+
120
164
  /**
121
165
  * Fetch current usage snapshot without sending a chat message.
122
166
  * Used for instant UI hydration and periodic refresh.
123
167
  */
124
- export async function fetchUsageSnapshot(proxyUrl: string, authToken?: string | null): Promise<UsageInfo | null> {
168
+ export async function fetchUsageSnapshot(proxyUrl: string): Promise<UsageInfo | null> {
125
169
  const isDev = Boolean((import.meta as unknown as { env?: Record<string, unknown> }).env?.DEV);
126
170
  const headers: Record<string, string> = {};
127
- if (authToken) {
128
- headers['Authorization'] = `Bearer ${authToken}`;
129
- }
130
171
 
131
172
  const snapshotUrl = `${proxyUrl}${proxyUrl.includes('?') ? '&' : '?'}usage=1`;
132
173
  const appSnapshotUrl = '/api/chat?usage=1';
@@ -165,15 +206,12 @@ export async function fetchUsageSnapshot(proxyUrl: string, authToken?: string |
165
206
  * Parses SSE format (data: {...}\n\n).
166
207
  */
167
208
  export async function streamChat(options: StreamOptions): Promise<void> {
168
- const { proxyUrl, model, messages, system, authToken, signal, onChunk, onComplete, onError, onUsageInfo, onFinishReason } = options;
209
+ const { proxyUrl, model, messages, system, signal, onChunk, onComplete, onError, onUsageInfo, onFinishReason } = options;
169
210
  const isDev = Boolean((import.meta as unknown as { env?: Record<string, unknown> }).env?.DEV);
170
211
 
171
212
  const headers: Record<string, string> = {
172
213
  'Content-Type': 'application/json',
173
214
  };
174
- if (authToken) {
175
- headers['Authorization'] = `Bearer ${authToken}`;
176
- }
177
215
 
178
216
  const requestBody = JSON.stringify({ messages, model, system });
179
217
  const fetchChat = async (url: string) => {
@@ -255,21 +293,13 @@ export async function streamChat(options: StreamOptions): Promise<void> {
255
293
  };
256
294
  errorDetail = errorBody.error || errorDetail;
257
295
 
258
- if (response.status === 403 && errorBody.upgrade) {
259
- errorDetail = 'Upgrade to Pro to use this model.';
260
- }
261
-
262
296
  if (response.status === 401) {
263
- errorDetail = 'Authentication expired. Please sign out and sign in again.';
297
+ errorDetail = 'Authentication error.';
264
298
  }
265
299
 
266
300
  if (response.status === 429) {
267
- if (errorBody.type === 'credits') {
268
- const contactEmail = errorBody.contactEmail as string | undefined;
269
- const contactSuffix = contactEmail ? ` Need more? Reach out at ${contactEmail}.` : '';
270
- errorDetail = `Monthly credits used up. Resets ${errorBody.resetAt ? new Date(errorBody.resetAt).toLocaleDateString() : 'next month'}.${contactSuffix}`;
271
- } else if (errorBody.type === 'request_cap') {
272
- errorDetail = errorBody.error || 'Daily limit reached. Upgrade to Pro for more.';
301
+ if (errorBody.type === 'request_cap') {
302
+ errorDetail = errorBody.error || 'Daily limit reached. Add your own API key in Settings for unlimited access.';
273
303
  } else {
274
304
  errorDetail = errorBody.error || 'Limit reached. Please try again later.';
275
305
  }
@@ -308,102 +338,36 @@ export async function streamChat(options: StreamOptions): Promise<void> {
308
338
  return;
309
339
  }
310
340
 
311
- // Parse SSE stream
312
- const reader = response.body.getReader();
313
- const decoder = new TextDecoder();
314
- let buffer = '';
315
341
  let fullText = '';
316
342
  let finishReason: string | null = null;
317
343
 
318
- try {
319
- while (true) {
320
- const { done, value } = await reader.read();
321
- if (done) break;
322
-
323
- buffer += decoder.decode(value, { stream: true });
324
-
325
- const drained = drainSseBuffer(buffer);
326
- buffer = drained.remainder;
344
+ const ok = await readSseStream(response.body, signal, (data) => {
345
+ const parsed = JSON.parse(data) as {
346
+ __ifcLiteUsage?: UsageInfo;
347
+ choices?: Array<{
348
+ delta?: { content?: string };
349
+ finish_reason?: string | null;
350
+ }>;
351
+ };
327
352
 
328
- for (const event of drained.events) {
329
- for (const line of event.split('\n')) {
330
- if (!line.startsWith('data: ')) continue;
331
- const data = line.slice(6);
332
-
333
- if (data === '[DONE]') continue;
334
-
335
- try {
336
- const parsed = JSON.parse(data) as {
337
- __ifcLiteUsage?: UsageInfo;
338
- choices?: Array<{
339
- delta?: { content?: string };
340
- finish_reason?: string | null;
341
- }>;
342
- };
343
-
344
- // Final usage update emitted by proxy after stream-end reconciliation.
345
- if (parsed.__ifcLiteUsage && onUsageInfo) {
346
- onUsageInfo(parsed.__ifcLiteUsage);
347
- continue;
348
- }
349
-
350
- const content = parsed.choices?.[0]?.delta?.content;
351
- if (content) {
352
- fullText += content;
353
- onChunk(content);
354
- }
355
- const chunkFinishReason = parsed.choices?.[0]?.finish_reason;
356
- if (chunkFinishReason) {
357
- finishReason = chunkFinishReason;
358
- }
359
- } catch {
360
- // Skip malformed SSE lines
361
- }
362
- }
363
- }
353
+ // Final usage update emitted by proxy after stream-end reconciliation.
354
+ if (parsed.__ifcLiteUsage && onUsageInfo) {
355
+ onUsageInfo(parsed.__ifcLiteUsage);
356
+ return;
364
357
  }
365
- buffer += decoder.decode();
366
- const drained = drainSseBuffer(buffer, true);
367
- for (const event of drained.events) {
368
- for (const line of event.split('\n')) {
369
- if (!line.startsWith('data: ')) continue;
370
- const data = line.slice(6);
371
358
 
372
- if (data === '[DONE]') continue;
373
-
374
- try {
375
- const parsed = JSON.parse(data) as {
376
- __ifcLiteUsage?: UsageInfo;
377
- choices?: Array<{
378
- delta?: { content?: string };
379
- finish_reason?: string | null;
380
- }>;
381
- };
382
-
383
- if (parsed.__ifcLiteUsage && onUsageInfo) {
384
- onUsageInfo(parsed.__ifcLiteUsage);
385
- continue;
386
- }
387
-
388
- const content = parsed.choices?.[0]?.delta?.content;
389
- if (content) {
390
- fullText += content;
391
- onChunk(content);
392
- }
393
- const chunkFinishReason = parsed.choices?.[0]?.finish_reason;
394
- if (chunkFinishReason) {
395
- finishReason = chunkFinishReason;
396
- }
397
- } catch {
398
- // Skip malformed SSE lines
399
- }
400
- }
359
+ const content = parsed.choices?.[0]?.delta?.content;
360
+ if (content) {
361
+ fullText += content;
362
+ onChunk(content);
401
363
  }
402
- } catch (err) {
403
- if (signal?.aborted) return;
404
- onError(err instanceof Error ? err : new Error(String(err)));
405
- return;
406
- }
364
+ const chunkFinishReason = parsed.choices?.[0]?.finish_reason;
365
+ if (chunkFinishReason) {
366
+ finishReason = chunkFinishReason;
367
+ }
368
+ }, onError);
369
+
370
+ if (!ok) return;
407
371
 
408
372
  onFinishReason?.(finishReason);
409
373
  onComplete(fullText);
@@ -0,0 +1,130 @@
1
+ /* This Source Code Form is subject to the terms of the Mozilla Public
2
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
3
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
+
5
+ import test from 'node:test';
6
+ import assert from 'node:assert/strict';
7
+ import { streamOpenAiChat } from './stream-direct.js';
8
+
9
+ const CODEX_MODEL_ID = 'gpt-5.3-codex';
10
+
11
+ type FetchImpl = (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>;
12
+
13
+ function withMockFetch<T>(impl: FetchImpl, fn: () => Promise<T>): Promise<T> {
14
+ const original = globalThis.fetch;
15
+ globalThis.fetch = impl as typeof globalThis.fetch;
16
+ return fn().finally(() => {
17
+ globalThis.fetch = original;
18
+ });
19
+ }
20
+
21
+ function sseResponse(events: string[]): Response {
22
+ return new Response(new ReadableStream({
23
+ start(controller) {
24
+ for (const evt of events) {
25
+ controller.enqueue(new TextEncoder().encode(`data: ${evt}\n\n`));
26
+ }
27
+ controller.close();
28
+ },
29
+ }), {
30
+ status: 200,
31
+ headers: { 'Content-Type': 'text/event-stream' },
32
+ });
33
+ }
34
+
35
+ test('streamOpenAiChat (Responses API) reports finish_reason=length when output is truncated', async () => {
36
+ await withMockFetch(
37
+ async () => sseResponse([
38
+ JSON.stringify({ type: 'response.output_text.delta', delta: 'partial' }),
39
+ JSON.stringify({
40
+ type: 'response.incomplete',
41
+ response: { status: 'incomplete', incomplete_details: { reason: 'max_output_tokens' } },
42
+ }),
43
+ ]),
44
+ async () => {
45
+ let fullText = '';
46
+ let finishReason: string | null = null;
47
+ await streamOpenAiChat('sk-test', {
48
+ model: CODEX_MODEL_ID,
49
+ messages: [{ role: 'user', content: 'hi' }],
50
+ onChunk: (text) => { fullText += text; },
51
+ onComplete: (text) => { fullText = text; },
52
+ onFinishReason: (reason) => { finishReason = reason; },
53
+ onError: (err) => { throw err; },
54
+ });
55
+ assert.equal(fullText, 'partial');
56
+ assert.equal(finishReason, 'length');
57
+ },
58
+ );
59
+ });
60
+
61
+ test('streamOpenAiChat (Responses API) reports finish_reason=length when incomplete has no reason', async () => {
62
+ await withMockFetch(
63
+ async () => sseResponse([
64
+ JSON.stringify({ type: 'response.output_text.delta', delta: 'partial' }),
65
+ JSON.stringify({
66
+ type: 'response.incomplete',
67
+ response: { status: 'incomplete' },
68
+ }),
69
+ ]),
70
+ async () => {
71
+ let finishReason: string | null = null;
72
+ await streamOpenAiChat('sk-test', {
73
+ model: CODEX_MODEL_ID,
74
+ messages: [{ role: 'user', content: 'hi' }],
75
+ onChunk: () => undefined,
76
+ onComplete: () => undefined,
77
+ onFinishReason: (reason) => { finishReason = reason; },
78
+ onError: (err) => { throw err; },
79
+ });
80
+ assert.equal(finishReason, 'length');
81
+ },
82
+ );
83
+ });
84
+
85
+ test('streamOpenAiChat (Responses API) reports finish_reason=stop on normal completion', async () => {
86
+ await withMockFetch(
87
+ async () => sseResponse([
88
+ JSON.stringify({ type: 'response.output_text.delta', delta: 'ok' }),
89
+ JSON.stringify({
90
+ type: 'response.completed',
91
+ response: { status: 'completed' },
92
+ }),
93
+ ]),
94
+ async () => {
95
+ let finishReason: string | null = null;
96
+ await streamOpenAiChat('sk-test', {
97
+ model: CODEX_MODEL_ID,
98
+ messages: [{ role: 'user', content: 'hi' }],
99
+ onChunk: () => undefined,
100
+ onComplete: () => undefined,
101
+ onFinishReason: (reason) => { finishReason = reason; },
102
+ onError: (err) => { throw err; },
103
+ });
104
+ assert.equal(finishReason, 'stop');
105
+ },
106
+ );
107
+ });
108
+
109
+ test('streamOpenAiChat (Responses API) hits the /v1/responses endpoint for codex models', async () => {
110
+ let capturedUrl: string | null = null;
111
+ await withMockFetch(
112
+ async (input) => {
113
+ capturedUrl = typeof input === 'string' ? input : input instanceof URL ? input.toString() : input.url;
114
+ return sseResponse([
115
+ JSON.stringify({ type: 'response.output_text.delta', delta: 'x' }),
116
+ JSON.stringify({ type: 'response.completed', response: { status: 'completed' } }),
117
+ ]);
118
+ },
119
+ async () => {
120
+ await streamOpenAiChat('sk-test', {
121
+ model: CODEX_MODEL_ID,
122
+ messages: [{ role: 'user', content: 'hi' }],
123
+ onChunk: () => undefined,
124
+ onComplete: () => undefined,
125
+ onError: (err) => { throw err; },
126
+ });
127
+ },
128
+ );
129
+ assert.equal(capturedUrl, 'https://api.openai.com/v1/responses');
130
+ });