@azumag/opencode-rate-limit-fallback 1.0.15 → 1.0.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +20 -12
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -316,28 +316,36 @@ export const RateLimitFallback = async ({ client, directory }) => {
|
|
|
316
316
|
parts: parts,
|
|
317
317
|
model: { providerID: nextModel.providerID, modelID: nextModel.modelID },
|
|
318
318
|
};
|
|
319
|
-
|
|
320
|
-
//
|
|
321
|
-
//
|
|
322
|
-
//
|
|
323
|
-
//
|
|
324
|
-
//
|
|
325
|
-
//
|
|
326
|
-
//
|
|
327
|
-
//
|
|
319
|
+
// CRITICAL PATH: abort → promptAsync with NO delay between them.
|
|
320
|
+
//
|
|
321
|
+
// In headless mode (opencode run), the server disposes within milliseconds
|
|
322
|
+
// after session goes idle (observed ~8ms in production logs — this is not
|
|
323
|
+
// a guaranteed bound, just empirically observed). Any delay (setTimeout,
|
|
324
|
+
// awaited toast, etc.) means promptAsync arrives after the server is dead.
|
|
325
|
+
//
|
|
326
|
+
// The await on promptAsync waits for the HTTP round-trip (server acknowledgment),
|
|
327
|
+
// NOT for prompt completion — generation runs asynchronously on the server.
|
|
328
|
+
//
|
|
329
|
+
// Do NOT use prompt() (sync) here — it triggers the abort flag race condition
|
|
330
|
+
// in TUI mode, causing the new prompt to be immediately interrupted.
|
|
328
331
|
try {
|
|
329
332
|
await client.session.abort({ path: { id: sessionID } });
|
|
330
333
|
logToFile(`abort succeeded for session ${sessionID}`);
|
|
331
334
|
}
|
|
332
335
|
catch (abortErr) {
|
|
333
|
-
|
|
336
|
+
// If abort fails, the session may still be in its retry loop.
|
|
337
|
+
// We still send promptAsync as best-effort: when the retry loop eventually
|
|
338
|
+
// completes (timeout or success), the queued prompt should be processed.
|
|
339
|
+
logToFile(`abort failed: ${abortErr} — sending promptAsync as best-effort`);
|
|
334
340
|
}
|
|
335
341
|
await client.session.promptAsync({
|
|
336
342
|
path: { id: sessionID },
|
|
337
343
|
body: promptBody,
|
|
338
344
|
});
|
|
339
|
-
logToFile(`promptAsync sent
|
|
340
|
-
|
|
345
|
+
logToFile(`promptAsync sent for session ${sessionID} with model ${nextModel.providerID}/${nextModel.modelID}`);
|
|
346
|
+
// Toasts are fire-and-forget: placed AFTER the critical path so they cannot
|
|
347
|
+
// interfere with the abort→promptAsync timing, even if they fail in headless.
|
|
348
|
+
toast("Fallback Active", `Now using ${nextModel.modelID}`, "success").catch(() => { });
|
|
341
349
|
retryState.delete(stateKey);
|
|
342
350
|
// Clear fallback flag to allow next fallback if needed
|
|
343
351
|
fallbackInProgress.delete(sessionID);
|
package/package.json
CHANGED