@j0hanz/code-review-analyst-mcp 1.7.2 → 1.7.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -514,12 +514,27 @@ async function throwGeminiFailure(attemptsMade, lastError, onLog) {
514
514
  async function runWithRetries(request, model, timeoutMs, maxRetries, onLog) {
515
515
  let lastError;
516
516
  let attempt = 0;
517
+ let currentModel = model;
517
518
  for (; attempt <= maxRetries; attempt += 1) {
518
519
  try {
519
- return await executeAttempt(request, model, timeoutMs, attempt, onLog);
520
+ return await executeAttempt(request, currentModel, timeoutMs, attempt, onLog);
520
521
  }
521
522
  catch (error) {
522
523
  lastError = error;
524
+ if (getNumericErrorCode(error) === 404 &&
525
+ currentModel === 'gemini-3-flash-preview') {
526
+ currentModel = 'gemini-2.5-flash';
527
+ delete request.thinkingLevel;
528
+ await emitGeminiLog(onLog, 'warning', {
529
+ event: 'gemini_model_fallback',
530
+ details: {
531
+ from: 'gemini-3-flash-preview',
532
+ to: 'gemini-2.5-flash',
533
+ reason: 'Model not found (404)',
534
+ },
535
+ });
536
+ continue;
537
+ }
523
538
  if (!canRetryAttempt(attempt, maxRetries, error)) {
524
539
  attempt += 1; // Count this attempt before breaking
525
540
  break;
@@ -737,6 +752,43 @@ async function cancelBatchIfNeeded(request, batches, batchName, onLog, completed
737
752
  });
738
753
  }
739
754
  }
755
+ async function createBatchJobWithFallback(request, batches, model, onLog) {
756
+ let currentModel = model;
757
+ const createSignal = request.signal ?? NEVER_ABORT_SIGNAL;
758
+ for (let attempt = 0; attempt <= 1; attempt += 1) {
759
+ try {
760
+ const createPayload = {
761
+ model: currentModel,
762
+ src: [
763
+ {
764
+ contents: [{ role: 'user', parts: [{ text: request.prompt }] }],
765
+ config: buildGenerationConfig(request, createSignal),
766
+ },
767
+ ],
768
+ };
769
+ return await batches.create(createPayload);
770
+ }
771
+ catch (error) {
772
+ if (attempt === 0 &&
773
+ getNumericErrorCode(error) === 404 &&
774
+ currentModel === 'gemini-3-flash-preview') {
775
+ currentModel = 'gemini-2.5-flash';
776
+ delete request.thinkingLevel;
777
+ await emitGeminiLog(onLog, 'warning', {
778
+ event: 'gemini_model_fallback',
779
+ details: {
780
+ from: 'gemini-3-flash-preview',
781
+ to: 'gemini-2.5-flash',
782
+ reason: 'Model not found (404) during batch create',
783
+ },
784
+ });
785
+ continue;
786
+ }
787
+ throw error;
788
+ }
789
+ }
790
+ throw new Error('Unexpected state: batch creation loop exited without returning or throwing.');
791
+ }
740
792
  async function runInlineBatchWithPolling(request, model, onLog) {
741
793
  const client = getClient();
742
794
  const { batches } = client;
@@ -747,17 +799,7 @@ async function runInlineBatchWithPolling(request, model, onLog) {
747
799
  let completed = false;
748
800
  let timedOut = false;
749
801
  try {
750
- const createSignal = request.signal ?? NEVER_ABORT_SIGNAL;
751
- const createPayload = {
752
- model,
753
- src: [
754
- {
755
- contents: [{ role: 'user', parts: [{ text: request.prompt }] }],
756
- config: buildGenerationConfig(request, createSignal),
757
- },
758
- ],
759
- };
760
- const createdJob = await batches.create(createPayload);
802
+ const createdJob = await createBatchJobWithFallback(request, batches, model, onLog);
761
803
  const createdRecord = toRecord(createdJob);
762
804
  batchName =
763
805
  typeof createdRecord?.name === 'string' ? createdRecord.name : undefined;
@@ -392,6 +392,11 @@ export class ToolExecutionRunner {
392
392
  await reportProgressStepUpdate(this.reportProgress, this.config.name, this.progressContext, STEP_CALLING_MODEL, msg);
393
393
  await this.updateStatusMessage(msg);
394
394
  }
395
+ else if (record.event === 'gemini_queue_acquired') {
396
+ const msg = 'Model queue acquired, generating response...';
397
+ await reportProgressStepUpdate(this.reportProgress, this.config.name, this.progressContext, STEP_CALLING_MODEL, msg);
398
+ await this.updateStatusMessage(msg);
399
+ }
395
400
  }
396
401
  setResponseSchemaOverride(responseSchema) {
397
402
  this.responseSchema = responseSchema;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@j0hanz/code-review-analyst-mcp",
3
- "version": "1.7.2",
3
+ "version": "1.7.3",
4
4
  "mcpName": "io.github.j0hanz/code-review-analyst",
5
5
  "description": "Gemini-powered MCP server for code review analysis.",
6
6
  "type": "module",