@j0hanz/code-review-analyst-mcp 1.7.2 → 1.7.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -514,12 +514,27 @@ async function throwGeminiFailure(attemptsMade, lastError, onLog) {
514
514
  async function runWithRetries(request, model, timeoutMs, maxRetries, onLog) {
515
515
  let lastError;
516
516
  let attempt = 0;
517
+ let currentModel = model;
517
518
  for (; attempt <= maxRetries; attempt += 1) {
518
519
  try {
519
- return await executeAttempt(request, model, timeoutMs, attempt, onLog);
520
+ return await executeAttempt(request, currentModel, timeoutMs, attempt, onLog);
520
521
  }
521
522
  catch (error) {
522
523
  lastError = error;
524
+ if (getNumericErrorCode(error) === 404 &&
525
+ currentModel === 'gemini-3-flash-preview') {
526
+ currentModel = 'gemini-2.5-flash';
527
+ delete request.thinkingLevel;
528
+ await emitGeminiLog(onLog, 'warning', {
529
+ event: 'gemini_model_fallback',
530
+ details: {
531
+ from: 'gemini-3-flash-preview',
532
+ to: 'gemini-2.5-flash',
533
+ reason: 'Model not found (404)',
534
+ },
535
+ });
536
+ continue;
537
+ }
523
538
  if (!canRetryAttempt(attempt, maxRetries, error)) {
524
539
  attempt += 1; // Count this attempt before breaking
525
540
  break;
@@ -737,6 +752,43 @@ async function cancelBatchIfNeeded(request, batches, batchName, onLog, completed
737
752
  });
738
753
  }
739
754
  }
755
+ async function createBatchJobWithFallback(request, batches, model, onLog) {
756
+ let currentModel = model;
757
+ const createSignal = request.signal ?? NEVER_ABORT_SIGNAL;
758
+ for (let attempt = 0; attempt <= 1; attempt += 1) {
759
+ try {
760
+ const createPayload = {
761
+ model: currentModel,
762
+ src: [
763
+ {
764
+ contents: [{ role: 'user', parts: [{ text: request.prompt }] }],
765
+ config: buildGenerationConfig(request, createSignal),
766
+ },
767
+ ],
768
+ };
769
+ return await batches.create(createPayload);
770
+ }
771
+ catch (error) {
772
+ if (attempt === 0 &&
773
+ getNumericErrorCode(error) === 404 &&
774
+ currentModel === 'gemini-3-flash-preview') {
775
+ currentModel = 'gemini-2.5-flash';
776
+ delete request.thinkingLevel;
777
+ await emitGeminiLog(onLog, 'warning', {
778
+ event: 'gemini_model_fallback',
779
+ details: {
780
+ from: 'gemini-3-flash-preview',
781
+ to: 'gemini-2.5-flash',
782
+ reason: 'Model not found (404) during batch create',
783
+ },
784
+ });
785
+ continue;
786
+ }
787
+ throw error;
788
+ }
789
+ }
790
+ throw new Error('Unexpected state: batch creation loop exited without returning or throwing.');
791
+ }
740
792
  async function runInlineBatchWithPolling(request, model, onLog) {
741
793
  const client = getClient();
742
794
  const { batches } = client;
@@ -747,17 +799,7 @@ async function runInlineBatchWithPolling(request, model, onLog) {
747
799
  let completed = false;
748
800
  let timedOut = false;
749
801
  try {
750
- const createSignal = request.signal ?? NEVER_ABORT_SIGNAL;
751
- const createPayload = {
752
- model,
753
- src: [
754
- {
755
- contents: [{ role: 'user', parts: [{ text: request.prompt }] }],
756
- config: buildGenerationConfig(request, createSignal),
757
- },
758
- ],
759
- };
760
- const createdJob = await batches.create(createPayload);
802
+ const createdJob = await createBatchJobWithFallback(request, batches, model, onLog);
761
803
  const createdRecord = toRecord(createdJob);
762
804
  batchName =
763
805
  typeof createdRecord?.name === 'string' ? createdRecord.name : undefined;
@@ -183,13 +183,17 @@ function createProgressReporter(extra) {
183
183
  };
184
184
  }
185
185
  const progressToken = rawToken;
186
- let lastCurrent = 0;
186
+ let lastCurrent = -1;
187
187
  let didSendTerminal = false;
188
188
  return async (payload) => {
189
189
  if (didSendTerminal) {
190
190
  return;
191
191
  }
192
- const current = Math.max(payload.current, lastCurrent);
192
+ let { current } = payload;
193
+ if (current <= lastCurrent && current < (payload.total ?? Infinity)) {
194
+ current = lastCurrent + 0.01;
195
+ }
196
+ current = Math.max(current, lastCurrent);
193
197
  const total = payload.total !== undefined
194
198
  ? Math.max(payload.total, current)
195
199
  : undefined;
@@ -265,18 +269,20 @@ async function sendSingleStepProgress(extra, toolName, context, current, state)
265
269
  async function reportProgressStepUpdate(reportProgress, toolName, context, current, metadata) {
266
270
  await reportProgress({
267
271
  current,
272
+ total: TASK_PROGRESS_TOTAL,
268
273
  message: formatProgressStep(toolName, context, metadata),
269
274
  });
270
275
  }
271
276
  async function reportProgressCompletionUpdate(reportProgress, toolName, context, outcome) {
272
277
  await reportProgress({
273
278
  current: TASK_PROGRESS_TOTAL,
279
+ total: TASK_PROGRESS_TOTAL,
274
280
  message: formatProgressCompletion(toolName, context, outcome),
275
281
  });
276
282
  }
277
283
  async function reportSchemaRetryProgressBestEffort(reportProgress, toolName, context, retryCount, maxRetries) {
278
284
  try {
279
- await reportProgressStepUpdate(reportProgress, toolName, context, STEP_VALIDATING_RESPONSE, `Schema repair in progress (attempt ${retryCount}/${maxRetries})...`);
285
+ await reportProgressStepUpdate(reportProgress, toolName, context, STEP_VALIDATING_RESPONSE + retryCount / (maxRetries + 1), `Schema repair in progress (attempt ${retryCount}/${maxRetries})...`);
280
286
  }
281
287
  catch {
282
288
  // Progress updates are best-effort and must not interrupt retries.
@@ -392,6 +398,11 @@ export class ToolExecutionRunner {
392
398
  await reportProgressStepUpdate(this.reportProgress, this.config.name, this.progressContext, STEP_CALLING_MODEL, msg);
393
399
  await this.updateStatusMessage(msg);
394
400
  }
401
+ else if (record.event === 'gemini_queue_acquired') {
402
+ const msg = 'Model queue acquired, generating response...';
403
+ await reportProgressStepUpdate(this.reportProgress, this.config.name, this.progressContext, STEP_CALLING_MODEL, msg);
404
+ await this.updateStatusMessage(msg);
405
+ }
395
406
  }
396
407
  setResponseSchemaOverride(responseSchema) {
397
408
  this.responseSchema = responseSchema;
@@ -552,35 +563,54 @@ export function registerStructuredToolTask(server, config) {
552
563
  geminiSchema: config.geminiSchema,
553
564
  resultSchema: config.resultSchema,
554
565
  });
555
- server.registerTool(config.name, {
566
+ server.experimental.tasks.registerToolTask(config.name, {
556
567
  title: config.title,
557
568
  description: config.description,
558
569
  inputSchema: config.inputSchema,
559
570
  outputSchema: DefaultOutputSchema,
560
571
  annotations: buildToolAnnotations(config.annotations),
561
- }, async (input, extra) => {
562
- const runner = new ToolExecutionRunner(config, {
563
- onLog: async (level, data) => {
564
- // Standard logging for tool calls
565
- try {
566
- await server.sendLoggingMessage({
567
- level: toLoggingLevel(level),
568
- logger: 'gemini',
569
- data: asObjectRecord(data),
570
- });
571
- }
572
- catch {
573
- // Fallback if logging fails
574
- }
575
- },
576
- reportProgress: createProgressReporter(extra),
577
- statusReporter: {
578
- updateStatus: async () => {
579
- // No-op for standard tool calls as they don't have a persistent task status
572
+ execution: {
573
+ taskSupport: 'optional',
574
+ },
575
+ }, {
576
+ createTask: async (input, extra) => {
577
+ const task = await extra.taskStore.createTask({ ttl: 300000 });
578
+ const extendedStore = extra.taskStore;
579
+ const runner = new ToolExecutionRunner(config, {
580
+ onLog: async (level, data) => {
581
+ try {
582
+ await server.sendLoggingMessage({
583
+ level: toLoggingLevel(level),
584
+ logger: 'gemini',
585
+ data: asObjectRecord(data),
586
+ });
587
+ }
588
+ catch {
589
+ // Fallback if logging fails
590
+ }
580
591
  },
581
- },
582
- });
583
- runner.setResponseSchemaOverride(responseSchema);
584
- return await runner.run(input);
592
+ reportProgress: createProgressReporter(extra),
593
+ statusReporter: {
594
+ updateStatus: async (message) => {
595
+ await extendedStore.updateTaskStatus(task.taskId, 'working', message);
596
+ },
597
+ storeResult: async (status, result) => {
598
+ await extra.taskStore.storeTaskResult(task.taskId, status, result);
599
+ },
600
+ },
601
+ });
602
+ runner.setResponseSchemaOverride(responseSchema);
603
+ // Run in background
604
+ runner.run(input).catch(async (error) => {
605
+ await extendedStore.updateTaskStatus(task.taskId, 'failed', getErrorMessage(error));
606
+ });
607
+ return { task };
608
+ },
609
+ getTask: async (input, extra) => {
610
+ return await extra.taskStore.getTask(extra.taskId);
611
+ },
612
+ getTaskResult: async (input, extra) => {
613
+ return (await extra.taskStore.getTaskResult(extra.taskId));
614
+ },
585
615
  });
586
616
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@j0hanz/code-review-analyst-mcp",
3
- "version": "1.7.2",
3
+ "version": "1.7.4",
4
4
  "mcpName": "io.github.j0hanz/code-review-analyst",
5
5
  "description": "Gemini-powered MCP server for code review analysis.",
6
6
  "type": "module",