@google/gemini-cli-a2a-server 0.18.0-nightly.20251120.2231497b1 → 0.19.0-nightly.20251121.5982abeff
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/a2a-server.mjs +206 -110
- package/dist/src/agent/task.js +0 -1
- package/dist/src/agent/task.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +2 -2
package/dist/a2a-server.mjs
CHANGED
|
@@ -84519,28 +84519,48 @@ var init_events = __esm({
|
|
|
84519
84519
|
CoreEvent2["UserFeedback"] = "user-feedback";
|
|
84520
84520
|
CoreEvent2["FallbackModeChanged"] = "fallback-mode-changed";
|
|
84521
84521
|
CoreEvent2["ModelChanged"] = "model-changed";
|
|
84522
|
+
CoreEvent2["ConsoleLog"] = "console-log";
|
|
84523
|
+
CoreEvent2["Output"] = "output";
|
|
84522
84524
|
CoreEvent2["MemoryChanged"] = "memory-changed";
|
|
84525
|
+
CoreEvent2["ExternalEditorClosed"] = "external-editor-closed";
|
|
84523
84526
|
})(CoreEvent || (CoreEvent = {}));
|
|
84524
84527
|
CoreEventEmitter = class _CoreEventEmitter extends EventEmitter4 {
|
|
84525
|
-
|
|
84528
|
+
_eventBacklog = [];
|
|
84526
84529
|
static MAX_BACKLOG_SIZE = 1e4;
|
|
84527
84530
|
constructor() {
|
|
84528
84531
|
super();
|
|
84529
84532
|
}
|
|
84533
|
+
_emitOrQueue(event, ...args2) {
|
|
84534
|
+
if (this.listenerCount(event) === 0) {
|
|
84535
|
+
if (this._eventBacklog.length >= _CoreEventEmitter.MAX_BACKLOG_SIZE) {
|
|
84536
|
+
this._eventBacklog.shift();
|
|
84537
|
+
}
|
|
84538
|
+
this._eventBacklog.push({ event, args: args2 });
|
|
84539
|
+
} else {
|
|
84540
|
+
this.emit(event, ...args2);
|
|
84541
|
+
}
|
|
84542
|
+
}
|
|
84530
84543
|
/**
|
|
84531
84544
|
* Sends actionable feedback to the user.
|
|
84532
84545
|
* Buffers automatically if the UI hasn't subscribed yet.
|
|
84533
84546
|
*/
|
|
84534
84547
|
emitFeedback(severity, message, error) {
|
|
84535
84548
|
const payload = { severity, message, error };
|
|
84536
|
-
|
|
84537
|
-
|
|
84538
|
-
|
|
84539
|
-
|
|
84540
|
-
|
|
84541
|
-
|
|
84542
|
-
|
|
84543
|
-
|
|
84549
|
+
this._emitOrQueue(CoreEvent.UserFeedback, payload);
|
|
84550
|
+
}
|
|
84551
|
+
/**
|
|
84552
|
+
* Broadcasts a console log message.
|
|
84553
|
+
*/
|
|
84554
|
+
emitConsoleLog(type, content) {
|
|
84555
|
+
const payload = { type, content };
|
|
84556
|
+
this._emitOrQueue(CoreEvent.ConsoleLog, payload);
|
|
84557
|
+
}
|
|
84558
|
+
/**
|
|
84559
|
+
* Broadcasts stdout/stderr output.
|
|
84560
|
+
*/
|
|
84561
|
+
emitOutput(isStderr, chunk2, encoding) {
|
|
84562
|
+
const payload = { isStderr, chunk: chunk2, encoding };
|
|
84563
|
+
this._emitOrQueue(CoreEvent.Output, payload);
|
|
84544
84564
|
}
|
|
84545
84565
|
/**
|
|
84546
84566
|
* Notifies subscribers that fallback mode has changed.
|
|
@@ -84561,11 +84581,11 @@ var init_events = __esm({
|
|
|
84561
84581
|
* Flushes buffered messages. Call this immediately after primary UI listener
|
|
84562
84582
|
* subscribes.
|
|
84563
84583
|
*/
|
|
84564
|
-
|
|
84565
|
-
const backlog = [...this.
|
|
84566
|
-
this.
|
|
84567
|
-
for (const
|
|
84568
|
-
this.emit(
|
|
84584
|
+
drainBacklogs() {
|
|
84585
|
+
const backlog = [...this._eventBacklog];
|
|
84586
|
+
this._eventBacklog.length = 0;
|
|
84587
|
+
for (const item of backlog) {
|
|
84588
|
+
this.emit(item.event, ...item.args);
|
|
84569
84589
|
}
|
|
84570
84590
|
}
|
|
84571
84591
|
};
|
|
@@ -307570,8 +307590,8 @@ var Float64Vector = import_vector.default.Float64Vector;
|
|
|
307570
307590
|
var PointerVector = import_vector.default.PointerVector;
|
|
307571
307591
|
|
|
307572
307592
|
// packages/core/dist/src/generated/git-commit.js
|
|
307573
|
-
var GIT_COMMIT_INFO = "
|
|
307574
|
-
var CLI_VERSION = "0.
|
|
307593
|
+
var GIT_COMMIT_INFO = "5982abeff";
|
|
307594
|
+
var CLI_VERSION = "0.19.0-nightly.20251121.5982abeff";
|
|
307575
307595
|
|
|
307576
307596
|
// packages/core/dist/src/ide/detect-ide.js
|
|
307577
307597
|
var IDE_DEFINITIONS = {
|
|
@@ -310430,7 +310450,7 @@ async function createContentGenerator(config2, gcConfig, sessionId2) {
|
|
|
310430
310450
|
if (gcConfig.fakeResponses) {
|
|
310431
310451
|
return FakeContentGenerator.fromFile(gcConfig.fakeResponses);
|
|
310432
310452
|
}
|
|
310433
|
-
const version3 = "0.
|
|
310453
|
+
const version3 = "0.19.0-nightly.20251121.5982abeff";
|
|
310434
310454
|
const userAgent = `GeminiCLI/${version3} (${process.platform}; ${process.arch})`;
|
|
310435
310455
|
const baseHeaders = {
|
|
310436
310456
|
"User-Agent": userAgent
|
|
@@ -342218,6 +342238,7 @@ import * as crypto19 from "node:crypto";
|
|
|
342218
342238
|
|
|
342219
342239
|
// packages/core/dist/src/utils/editor.js
|
|
342220
342240
|
import { execSync, spawn as spawn6, spawnSync as spawnSync2 } from "node:child_process";
|
|
342241
|
+
init_events();
|
|
342221
342242
|
function isValidEditorType(editor) {
|
|
342222
342243
|
return [
|
|
342223
342244
|
"vscode",
|
|
@@ -342303,7 +342324,7 @@ function getDiffCommand(oldPath, newPath, editor) {
|
|
|
342303
342324
|
return null;
|
|
342304
342325
|
}
|
|
342305
342326
|
}
|
|
342306
|
-
async function openDiff(oldPath, newPath, editor
|
|
342327
|
+
async function openDiff(oldPath, newPath, editor) {
|
|
342307
342328
|
const diffCommand = getDiffCommand(oldPath, newPath, editor);
|
|
342308
342329
|
if (!diffCommand) {
|
|
342309
342330
|
debugLogger.error("No diff tool available. Install a supported editor.");
|
|
@@ -342322,7 +342343,7 @@ async function openDiff(oldPath, newPath, editor, onEditorClose) {
|
|
|
342322
342343
|
throw new Error(`${editor} exited with code ${result.status}`);
|
|
342323
342344
|
}
|
|
342324
342345
|
} finally {
|
|
342325
|
-
|
|
342346
|
+
coreEvents.emit(CoreEvent.ExternalEditorClosed);
|
|
342326
342347
|
}
|
|
342327
342348
|
return;
|
|
342328
342349
|
}
|
|
@@ -342412,14 +342433,14 @@ function deleteTempFiles(oldPath, newPath, dirPath) {
|
|
|
342412
342433
|
debugLogger.error(`Error deleting temp diff directory: ${dirPath}`);
|
|
342413
342434
|
}
|
|
342414
342435
|
}
|
|
342415
|
-
async function modifyWithEditor(originalParams, modifyContext, editorType, _abortSignal,
|
|
342436
|
+
async function modifyWithEditor(originalParams, modifyContext, editorType, _abortSignal, overrides) {
|
|
342416
342437
|
const hasCurrentOverride = overrides !== void 0 && "currentContent" in overrides;
|
|
342417
342438
|
const hasProposedOverride = overrides !== void 0 && "proposedContent" in overrides;
|
|
342418
342439
|
const currentContent = hasCurrentOverride ? overrides.currentContent ?? "" : await modifyContext.getCurrentContent(originalParams);
|
|
342419
342440
|
const proposedContent = hasProposedOverride ? overrides.proposedContent ?? "" : await modifyContext.getProposedContent(originalParams);
|
|
342420
342441
|
const { oldPath, newPath, dirPath } = createTempFilesForModify(currentContent ?? "", proposedContent ?? "", modifyContext.getFilePath(originalParams));
|
|
342421
342442
|
try {
|
|
342422
|
-
await openDiff(oldPath, newPath, editorType
|
|
342443
|
+
await openDiff(oldPath, newPath, editorType);
|
|
342423
342444
|
const result = getUpdatedParams(oldPath, newPath, originalParams, modifyContext);
|
|
342424
342445
|
return result;
|
|
342425
342446
|
} finally {
|
|
@@ -355800,7 +355821,6 @@ var CoreToolScheduler = class _CoreToolScheduler {
|
|
|
355800
355821
|
onToolCallsUpdate;
|
|
355801
355822
|
getPreferredEditor;
|
|
355802
355823
|
config;
|
|
355803
|
-
onEditorClose;
|
|
355804
355824
|
isFinalizingToolCalls = false;
|
|
355805
355825
|
isScheduling = false;
|
|
355806
355826
|
isCancelling = false;
|
|
@@ -355813,7 +355833,6 @@ var CoreToolScheduler = class _CoreToolScheduler {
|
|
|
355813
355833
|
this.onAllToolCallsComplete = options2.onAllToolCallsComplete;
|
|
355814
355834
|
this.onToolCallsUpdate = options2.onToolCallsUpdate;
|
|
355815
355835
|
this.getPreferredEditor = options2.getPreferredEditor;
|
|
355816
|
-
this.onEditorClose = options2.onEditorClose;
|
|
355817
355836
|
if (this.config.getEnableMessageBusIntegration()) {
|
|
355818
355837
|
const messageBus = this.config.getMessageBus();
|
|
355819
355838
|
if (!_CoreToolScheduler.subscribedMessageBuses.has(messageBus)) {
|
|
@@ -356186,7 +356205,7 @@ var CoreToolScheduler = class _CoreToolScheduler {
|
|
|
356186
356205
|
currentContent: waitingToolCall.confirmationDetails.originalContent,
|
|
356187
356206
|
proposedContent: waitingToolCall.confirmationDetails.newContent
|
|
356188
356207
|
} : void 0;
|
|
356189
|
-
const { updatedParams, updatedDiff } = await modifyWithEditor(waitingToolCall.request.args, modifyContext, editorType, signal,
|
|
356208
|
+
const { updatedParams, updatedDiff } = await modifyWithEditor(waitingToolCall.request.args, modifyContext, editorType, signal, contentOverrides);
|
|
356190
356209
|
this.setArgsInternal(callId, updatedParams);
|
|
356191
356210
|
this.setStatusInternal(callId, "awaiting_approval", signal, {
|
|
356192
356211
|
...waitingToolCall.confirmationDetails,
|
|
@@ -356953,16 +356972,18 @@ var InvalidStreamError = class extends Error {
|
|
|
356953
356972
|
};
|
|
356954
356973
|
var GeminiChat = class {
|
|
356955
356974
|
config;
|
|
356956
|
-
|
|
356975
|
+
systemInstruction;
|
|
356976
|
+
tools;
|
|
356957
356977
|
history;
|
|
356958
356978
|
// A promise to represent the current state of the message being sent to the
|
|
356959
356979
|
// model.
|
|
356960
356980
|
sendPromise = Promise.resolve();
|
|
356961
356981
|
chatRecordingService;
|
|
356962
356982
|
lastPromptTokenCount;
|
|
356963
|
-
constructor(config2,
|
|
356983
|
+
constructor(config2, systemInstruction = "", tools = [], history = [], resumedSessionData) {
|
|
356964
356984
|
this.config = config2;
|
|
356965
|
-
this.
|
|
356985
|
+
this.systemInstruction = systemInstruction;
|
|
356986
|
+
this.tools = tools;
|
|
356966
356987
|
this.history = history;
|
|
356967
356988
|
validateHistory2(history);
|
|
356968
356989
|
this.chatRecordingService = new ChatRecordingService(config2);
|
|
@@ -356970,7 +356991,7 @@ var GeminiChat = class {
|
|
|
356970
356991
|
this.lastPromptTokenCount = Math.ceil(JSON.stringify(this.history).length / 4);
|
|
356971
356992
|
}
|
|
356972
356993
|
setSystemInstruction(sysInstr) {
|
|
356973
|
-
this.
|
|
356994
|
+
this.systemInstruction = sysInstr;
|
|
356974
356995
|
}
|
|
356975
356996
|
/**
|
|
356976
356997
|
* Sends a message to the model and returns the response in chunks.
|
|
@@ -356980,7 +357001,10 @@ var GeminiChat = class {
|
|
|
356980
357001
|
* sending the next message.
|
|
356981
357002
|
*
|
|
356982
357003
|
* @see {@link Chat#sendMessage} for non-streaming method.
|
|
356983
|
-
* @param
|
|
357004
|
+
* @param modelConfigKey - The key for the model config.
|
|
357005
|
+
* @param message - The list of messages to send.
|
|
357006
|
+
* @param prompt_id - The ID of the prompt.
|
|
357007
|
+
* @param signal - An abort signal for this message.
|
|
356984
357008
|
* @return The model's response.
|
|
356985
357009
|
*
|
|
356986
357010
|
* @example
|
|
@@ -356994,7 +357018,7 @@ var GeminiChat = class {
|
|
|
356994
357018
|
* }
|
|
356995
357019
|
* ```
|
|
356996
357020
|
*/
|
|
356997
|
-
async sendMessageStream(
|
|
357021
|
+
async sendMessageStream(modelConfigKey, message, prompt_id, signal) {
|
|
356998
357022
|
await this.sendPromise;
|
|
356999
357023
|
this.config.setPreviewModelBypassMode(false);
|
|
357000
357024
|
let streamDoneResolver;
|
|
@@ -357002,9 +357026,11 @@ var GeminiChat = class {
|
|
|
357002
357026
|
streamDoneResolver = resolve14;
|
|
357003
357027
|
});
|
|
357004
357028
|
this.sendPromise = streamDonePromise;
|
|
357005
|
-
const userContent = createUserContent(
|
|
357029
|
+
const userContent = createUserContent(message);
|
|
357030
|
+
const { model, generateContentConfig } = this.config.modelConfigService.getResolvedConfig(modelConfigKey);
|
|
357031
|
+
generateContentConfig.abortSignal = signal;
|
|
357006
357032
|
if (!isFunctionResponse(userContent)) {
|
|
357007
|
-
const userMessage = Array.isArray(
|
|
357033
|
+
const userMessage = Array.isArray(message) ? message : [message];
|
|
357008
357034
|
const userMessageContent = partListUnionToString(toParts(userMessage));
|
|
357009
357035
|
this.chatRecordingService.recordMessage({
|
|
357010
357036
|
model,
|
|
@@ -357027,14 +357053,10 @@ var GeminiChat = class {
|
|
|
357027
357053
|
if (attempt > 0) {
|
|
357028
357054
|
yield { type: StreamEventType.RETRY };
|
|
357029
357055
|
}
|
|
357030
|
-
const currentParams = { ...params };
|
|
357031
357056
|
if (attempt > 0) {
|
|
357032
|
-
|
|
357033
|
-
...currentParams.config,
|
|
357034
|
-
temperature: 1
|
|
357035
|
-
};
|
|
357057
|
+
generateContentConfig.temperature = 1;
|
|
357036
357058
|
}
|
|
357037
|
-
const stream3 = await self2.makeApiCallAndProcessStream(model,
|
|
357059
|
+
const stream3 = await self2.makeApiCallAndProcessStream(model, generateContentConfig, requestContents, prompt_id);
|
|
357038
357060
|
for await (const chunk2 of stream3) {
|
|
357039
357061
|
yield { type: StreamEventType.CHUNK, value: chunk2 };
|
|
357040
357062
|
}
|
|
@@ -357068,7 +357090,7 @@ var GeminiChat = class {
|
|
|
357068
357090
|
}
|
|
357069
357091
|
}();
|
|
357070
357092
|
}
|
|
357071
|
-
async makeApiCallAndProcessStream(model,
|
|
357093
|
+
async makeApiCallAndProcessStream(model, generateContentConfig, requestContents, prompt_id) {
|
|
357072
357094
|
let effectiveModel = model;
|
|
357073
357095
|
const contentsForPreviewModel = this.ensureActiveLoopHasThoughtSignatures(requestContents);
|
|
357074
357096
|
const apiCall = () => {
|
|
@@ -357077,10 +357099,24 @@ var GeminiChat = class {
|
|
|
357077
357099
|
modelToUse = DEFAULT_GEMINI_MODEL;
|
|
357078
357100
|
}
|
|
357079
357101
|
effectiveModel = modelToUse;
|
|
357102
|
+
const config2 = {
|
|
357103
|
+
...generateContentConfig,
|
|
357104
|
+
// TODO(12622): Ensure we don't overrwrite these when they are
|
|
357105
|
+
// passed via config.
|
|
357106
|
+
systemInstruction: this.systemInstruction,
|
|
357107
|
+
tools: this.tools
|
|
357108
|
+
};
|
|
357109
|
+
if (modelToUse.startsWith("gemini-3")) {
|
|
357110
|
+
config2.thinkingConfig = {
|
|
357111
|
+
...config2.thinkingConfig,
|
|
357112
|
+
thinkingLevel: ThinkingLevel.HIGH
|
|
357113
|
+
};
|
|
357114
|
+
delete config2.thinkingConfig?.thinkingBudget;
|
|
357115
|
+
}
|
|
357080
357116
|
return this.config.getContentGenerator().generateContentStream({
|
|
357081
357117
|
model: modelToUse,
|
|
357082
357118
|
contents: modelToUse === PREVIEW_GEMINI_MODEL ? contentsForPreviewModel : requestContents,
|
|
357083
|
-
config:
|
|
357119
|
+
config: config2
|
|
357084
357120
|
}, prompt_id);
|
|
357085
357121
|
};
|
|
357086
357122
|
const onPersistent429Callback = async (authType, error) => await handleFallback(this.config, effectiveModel, authType, error);
|
|
@@ -357088,7 +357124,7 @@ var GeminiChat = class {
|
|
|
357088
357124
|
onPersistent429: onPersistent429Callback,
|
|
357089
357125
|
authType: this.config.getContentGeneratorConfig()?.authType,
|
|
357090
357126
|
retryFetchErrors: this.config.getRetryFetchErrors(),
|
|
357091
|
-
signal:
|
|
357127
|
+
signal: generateContentConfig.abortSignal,
|
|
357092
357128
|
maxAttempts: this.config.isPreviewModelFallbackMode() && model === PREVIEW_GEMINI_MODEL ? 1 : void 0
|
|
357093
357129
|
});
|
|
357094
357130
|
return this.processStreamResponse(model, streamResponse);
|
|
@@ -357192,7 +357228,7 @@ var GeminiChat = class {
|
|
|
357192
357228
|
return newContents;
|
|
357193
357229
|
}
|
|
357194
357230
|
setTools(tools) {
|
|
357195
|
-
this.
|
|
357231
|
+
this.tools = tools;
|
|
357196
357232
|
}
|
|
357197
357233
|
async maybeIncludeSchemaDepthContext(error) {
|
|
357198
357234
|
if (isSchemaDepthError(error.message) || isInvalidArgumentError(error.message)) {
|
|
@@ -357369,14 +357405,9 @@ var Turn = class {
|
|
|
357369
357405
|
this.prompt_id = prompt_id;
|
|
357370
357406
|
}
|
|
357371
357407
|
// The run method yields simpler events suitable for server logic
|
|
357372
|
-
async *run(
|
|
357408
|
+
async *run(modelConfigKey, req, signal) {
|
|
357373
357409
|
try {
|
|
357374
|
-
const responseStream = await this.chat.sendMessageStream(
|
|
357375
|
-
message: req,
|
|
357376
|
-
config: {
|
|
357377
|
-
abortSignal: signal
|
|
357378
|
-
}
|
|
357379
|
-
}, this.prompt_id);
|
|
357410
|
+
const responseStream = await this.chat.sendMessageStream(modelConfigKey, req, this.prompt_id, signal);
|
|
357380
357411
|
for await (const streamEvent of responseStream) {
|
|
357381
357412
|
if (signal?.aborted) {
|
|
357382
357413
|
yield { type: GeminiEventType.UserCancelled };
|
|
@@ -358554,7 +358585,7 @@ var LoopDetectionService = class {
|
|
|
358554
358585
|
};
|
|
358555
358586
|
|
|
358556
358587
|
// packages/core/dist/src/services/chatCompressionService.js
|
|
358557
|
-
var DEFAULT_COMPRESSION_TOKEN_THRESHOLD = 0.
|
|
358588
|
+
var DEFAULT_COMPRESSION_TOKEN_THRESHOLD = 0.5;
|
|
358558
358589
|
var COMPRESSION_PRESERVE_THRESHOLD = 0.3;
|
|
358559
358590
|
function findCompressSplitPoint(contents, fraction) {
|
|
358560
358591
|
if (fraction <= 0 || fraction >= 1) {
|
|
@@ -358679,18 +358710,10 @@ var ChatCompressionService = class {
|
|
|
358679
358710
|
};
|
|
358680
358711
|
|
|
358681
358712
|
// packages/core/dist/src/core/client.js
|
|
358682
|
-
function isThinkingSupported(model) {
|
|
358683
|
-
return !model.startsWith("gemini-2.0");
|
|
358684
|
-
}
|
|
358685
358713
|
var MAX_TURNS = 100;
|
|
358686
358714
|
var GeminiClient = class {
|
|
358687
358715
|
config;
|
|
358688
358716
|
chat;
|
|
358689
|
-
generateContentConfig = {
|
|
358690
|
-
temperature: 1,
|
|
358691
|
-
topP: 0.95,
|
|
358692
|
-
topK: 64
|
|
358693
|
-
};
|
|
358694
358717
|
sessionTurnCount = 0;
|
|
358695
358718
|
loopDetector;
|
|
358696
358719
|
compressionService;
|
|
@@ -358777,6 +358800,14 @@ var GeminiClient = class {
|
|
|
358777
358800
|
parts: [{ text: await getDirectoryContextString(this.config) }]
|
|
358778
358801
|
});
|
|
358779
358802
|
}
|
|
358803
|
+
async updateSystemInstruction() {
|
|
358804
|
+
if (!this.isInitialized()) {
|
|
358805
|
+
return;
|
|
358806
|
+
}
|
|
358807
|
+
const userMemory = this.config.getUserMemory();
|
|
358808
|
+
const systemInstruction = getCoreSystemPrompt(this.config, userMemory);
|
|
358809
|
+
this.getChat().setSystemInstruction(systemInstruction);
|
|
358810
|
+
}
|
|
358780
358811
|
async startChat(extraHistory, resumedSessionData) {
|
|
358781
358812
|
this.forceFullIdeContext = true;
|
|
358782
358813
|
this.hasFailedCompressionAttempt = false;
|
|
@@ -358787,19 +358818,7 @@ var GeminiClient = class {
|
|
|
358787
358818
|
try {
|
|
358788
358819
|
const userMemory = this.config.getUserMemory();
|
|
358789
358820
|
const systemInstruction = getCoreSystemPrompt(this.config, userMemory);
|
|
358790
|
-
|
|
358791
|
-
const config2 = { ...this.generateContentConfig };
|
|
358792
|
-
if (isThinkingSupported(model)) {
|
|
358793
|
-
config2.thinkingConfig = {
|
|
358794
|
-
includeThoughts: true,
|
|
358795
|
-
thinkingBudget: DEFAULT_THINKING_MODE
|
|
358796
|
-
};
|
|
358797
|
-
}
|
|
358798
|
-
return new GeminiChat(this.config, {
|
|
358799
|
-
systemInstruction,
|
|
358800
|
-
...config2,
|
|
358801
|
-
tools
|
|
358802
|
-
}, history, resumedSessionData);
|
|
358821
|
+
return new GeminiChat(this.config, systemInstruction, tools, history, resumedSessionData);
|
|
358803
358822
|
} catch (error) {
|
|
358804
358823
|
await reportError(error, "Error initializing Gemini chat session.", history, "startChat");
|
|
358805
358824
|
throw new Error(`Failed to initialize chat: ${getErrorMessage(error)}`);
|
|
@@ -359006,7 +359025,7 @@ var GeminiClient = class {
|
|
|
359006
359025
|
this.currentSequenceModel = modelToUse;
|
|
359007
359026
|
yield { type: GeminiEventType.ModelInfo, value: modelToUse };
|
|
359008
359027
|
}
|
|
359009
|
-
const resultStream = turn.run(modelToUse, request3, linkedSignal);
|
|
359028
|
+
const resultStream = turn.run({ model: modelToUse }, request3, linkedSignal);
|
|
359010
359029
|
for await (const event of resultStream) {
|
|
359011
359030
|
if (this.loopDetector.addAndCheck(event)) {
|
|
359012
359031
|
yield { type: GeminiEventType.LoopDetected };
|
|
@@ -363951,7 +363970,9 @@ var GitService = class {
|
|
|
363951
363970
|
try {
|
|
363952
363971
|
const repo = this.shadowGitRepository;
|
|
363953
363972
|
await repo.add(".");
|
|
363954
|
-
const commitResult = await repo.commit(message
|
|
363973
|
+
const commitResult = await repo.commit(message, {
|
|
363974
|
+
"--no-verify": null
|
|
363975
|
+
});
|
|
363955
363976
|
return commitResult.commit;
|
|
363956
363977
|
} catch (error) {
|
|
363957
363978
|
throw new Error(`Failed to create checkpoint snapshot: ${error instanceof Error ? error.message : "Unknown error"}. Checkpointing may not be working properly.`);
|
|
@@ -364292,7 +364313,7 @@ var ModelRouterService = class {
|
|
|
364292
364313
|
let decision;
|
|
364293
364314
|
try {
|
|
364294
364315
|
decision = await this.strategy.route(context2, this.config, this.config.getBaseLlmClient());
|
|
364295
|
-
if (decision.model === DEFAULT_GEMINI_MODEL && this.config.getPreviewFeatures() && decision.metadata.source
|
|
364316
|
+
if (decision.model === DEFAULT_GEMINI_MODEL && this.config.getPreviewFeatures() && !decision.metadata.source.includes("override")) {
|
|
364296
364317
|
decision.model = PREVIEW_GEMINI_MODEL;
|
|
364297
364318
|
decision.metadata.source += " (Preview Model)";
|
|
364298
364319
|
decision.metadata.reasoning += " (Upgraded to Preview Model)";
|
|
@@ -364347,10 +364368,14 @@ var JsonStreamEventType;
|
|
|
364347
364368
|
// packages/core/dist/src/services/modelConfigService.js
|
|
364348
364369
|
var ModelConfigService = class {
|
|
364349
364370
|
config;
|
|
364371
|
+
runtimeAliases = {};
|
|
364350
364372
|
// TODO(12597): Process config to build a typed alias hierarchy.
|
|
364351
364373
|
constructor(config2) {
|
|
364352
364374
|
this.config = config2;
|
|
364353
364375
|
}
|
|
364376
|
+
registerRuntimeModelConfig(aliasName, alias) {
|
|
364377
|
+
this.runtimeAliases[aliasName] = alias;
|
|
364378
|
+
}
|
|
364354
364379
|
resolveAlias(aliasName, aliases2, visited = /* @__PURE__ */ new Set()) {
|
|
364355
364380
|
if (visited.has(aliasName)) {
|
|
364356
364381
|
throw new Error(`Circular alias dependency: ${[...visited, aliasName].join(" -> ")}`);
|
|
@@ -364374,10 +364399,11 @@ var ModelConfigService = class {
|
|
|
364374
364399
|
internalGetResolvedConfig(context2) {
|
|
364375
364400
|
const config2 = this.config || {};
|
|
364376
364401
|
const { aliases: aliases2 = {}, overrides = [] } = config2;
|
|
364402
|
+
const allAliases = { ...aliases2, ...this.runtimeAliases };
|
|
364377
364403
|
let baseModel = context2.model;
|
|
364378
364404
|
let resolvedConfig = {};
|
|
364379
|
-
if (
|
|
364380
|
-
const resolvedAlias = this.resolveAlias(context2.model,
|
|
364405
|
+
if (allAliases[context2.model]) {
|
|
364406
|
+
const resolvedAlias = this.resolveAlias(context2.model, allAliases);
|
|
364381
364407
|
baseModel = resolvedAlias.modelConfig.model;
|
|
364382
364408
|
resolvedConfig = this.deepMerge(resolvedConfig, resolvedAlias.modelConfig.generateContentConfig);
|
|
364383
364409
|
}
|
|
@@ -364479,8 +364505,7 @@ var DEFAULT_MODEL_CONFIGS = {
|
|
|
364479
364505
|
modelConfig: {
|
|
364480
364506
|
generateContentConfig: {
|
|
364481
364507
|
thinkingConfig: {
|
|
364482
|
-
includeThoughts: true
|
|
364483
|
-
thinkingBudget: -1
|
|
364508
|
+
includeThoughts: true
|
|
364484
364509
|
},
|
|
364485
364510
|
temperature: 1,
|
|
364486
364511
|
topP: 0.95,
|
|
@@ -364488,25 +364513,51 @@ var DEFAULT_MODEL_CONFIGS = {
|
|
|
364488
364513
|
}
|
|
364489
364514
|
}
|
|
364490
364515
|
},
|
|
364516
|
+
"chat-base-2.5": {
|
|
364517
|
+
extends: "chat-base",
|
|
364518
|
+
modelConfig: {
|
|
364519
|
+
generateContentConfig: {
|
|
364520
|
+
thinkingConfig: {
|
|
364521
|
+
thinkingBudget: DEFAULT_THINKING_MODE
|
|
364522
|
+
}
|
|
364523
|
+
}
|
|
364524
|
+
}
|
|
364525
|
+
},
|
|
364526
|
+
"chat-base-3": {
|
|
364527
|
+
extends: "chat-base",
|
|
364528
|
+
modelConfig: {
|
|
364529
|
+
generateContentConfig: {
|
|
364530
|
+
thinkingConfig: {
|
|
364531
|
+
thinkingLevel: ThinkingLevel.HIGH
|
|
364532
|
+
}
|
|
364533
|
+
}
|
|
364534
|
+
}
|
|
364535
|
+
},
|
|
364491
364536
|
// Because `gemini-2.5-pro` and related model configs are "user-facing"
|
|
364492
364537
|
// today, i.e. they could be passed via `--model`, we have to be careful to
|
|
364493
364538
|
// ensure these model configs can be used interactively.
|
|
364494
364539
|
// TODO(joshualitt): Introduce internal base configs for the various models,
|
|
364495
364540
|
// note: we will have to think carefully about names.
|
|
364541
|
+
"gemini-3-pro-preview": {
|
|
364542
|
+
extends: "chat-base-3",
|
|
364543
|
+
modelConfig: {
|
|
364544
|
+
model: "gemini-3-pro-preview"
|
|
364545
|
+
}
|
|
364546
|
+
},
|
|
364496
364547
|
"gemini-2.5-pro": {
|
|
364497
|
-
extends: "chat-base",
|
|
364548
|
+
extends: "chat-base-2.5",
|
|
364498
364549
|
modelConfig: {
|
|
364499
364550
|
model: "gemini-2.5-pro"
|
|
364500
364551
|
}
|
|
364501
364552
|
},
|
|
364502
364553
|
"gemini-2.5-flash": {
|
|
364503
|
-
extends: "chat-base",
|
|
364554
|
+
extends: "chat-base-2.5",
|
|
364504
364555
|
modelConfig: {
|
|
364505
364556
|
model: "gemini-2.5-flash"
|
|
364506
364557
|
}
|
|
364507
364558
|
},
|
|
364508
364559
|
"gemini-2.5-flash-lite": {
|
|
364509
|
-
extends: "chat-base",
|
|
364560
|
+
extends: "chat-base-2.5",
|
|
364510
364561
|
modelConfig: {
|
|
364511
364562
|
model: "gemini-2.5-flash-lite"
|
|
364512
364563
|
}
|
|
@@ -365027,6 +365078,9 @@ var PolicyEngine = class {
|
|
|
365027
365078
|
};
|
|
365028
365079
|
|
|
365029
365080
|
// packages/core/dist/src/agents/registry.js
|
|
365081
|
+
function getModelConfigAlias(definition) {
|
|
365082
|
+
return `${definition.name}-config`;
|
|
365083
|
+
}
|
|
365030
365084
|
var AgentRegistry = class {
|
|
365031
365085
|
config;
|
|
365032
365086
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
@@ -365076,6 +365130,21 @@ var AgentRegistry = class {
|
|
|
365076
365130
|
debugLogger.log(`[AgentRegistry] Overriding agent '${definition.name}'`);
|
|
365077
365131
|
}
|
|
365078
365132
|
this.agents.set(definition.name, definition);
|
|
365133
|
+
const modelConfig = definition.modelConfig;
|
|
365134
|
+
const runtimeAlias = {
|
|
365135
|
+
modelConfig: {
|
|
365136
|
+
model: modelConfig.model,
|
|
365137
|
+
generateContentConfig: {
|
|
365138
|
+
temperature: modelConfig.temp,
|
|
365139
|
+
topP: modelConfig.top_p,
|
|
365140
|
+
thinkingConfig: {
|
|
365141
|
+
includeThoughts: true,
|
|
365142
|
+
thinkingBudget: modelConfig.thinkingBudget ?? -1
|
|
365143
|
+
}
|
|
365144
|
+
}
|
|
365145
|
+
}
|
|
365146
|
+
};
|
|
365147
|
+
this.config.modelConfigService.registerRuntimeModelConfig(getModelConfigAlias(definition), runtimeAlias);
|
|
365079
365148
|
}
|
|
365080
365149
|
/**
|
|
365081
365150
|
* Retrieves an agent definition by name.
|
|
@@ -365137,10 +365206,12 @@ async function executeToolCall(config2, toolCallRequest, abortSignal) {
|
|
|
365137
365206
|
const scheduler3 = new CoreToolScheduler({
|
|
365138
365207
|
config: config2,
|
|
365139
365208
|
getPreferredEditor: () => void 0,
|
|
365140
|
-
onEditorClose: () => {
|
|
365141
|
-
},
|
|
365142
365209
|
onAllToolCallsComplete: async (completedToolCalls) => {
|
|
365143
|
-
|
|
365210
|
+
if (completedToolCalls.length > 0) {
|
|
365211
|
+
resolve14(completedToolCalls[0]);
|
|
365212
|
+
} else {
|
|
365213
|
+
reject(new Error("No completed tool calls returned."));
|
|
365214
|
+
}
|
|
365144
365215
|
}
|
|
365145
365216
|
});
|
|
365146
365217
|
scheduler3.schedule(toolCallRequest, abortSignal).catch((error) => {
|
|
@@ -366779,14 +366850,13 @@ var AgentExecutor = class _AgentExecutor {
|
|
|
366779
366850
|
* @returns The model's response, including any tool calls or text.
|
|
366780
366851
|
*/
|
|
366781
366852
|
async callModel(chat, message, tools, signal, promptId) {
|
|
366782
|
-
|
|
366783
|
-
|
|
366784
|
-
|
|
366785
|
-
|
|
366786
|
-
|
|
366787
|
-
|
|
366788
|
-
};
|
|
366789
|
-
const responseStream = await chat.sendMessageStream(this.definition.modelConfig.model, messageParams, promptId);
|
|
366853
|
+
if (tools.length > 0) {
|
|
366854
|
+
chat.setTools([{ functionDeclarations: tools }]);
|
|
366855
|
+
}
|
|
366856
|
+
const responseStream = await chat.sendMessageStream({
|
|
366857
|
+
model: getModelConfigAlias(this.definition),
|
|
366858
|
+
overrideScope: this.definition.name
|
|
366859
|
+
}, message.parts || [], promptId, signal);
|
|
366790
366860
|
const functionCalls = [];
|
|
366791
366861
|
let textResponse = "";
|
|
366792
366862
|
for await (const resp of responseStream) {
|
|
@@ -366812,25 +366882,20 @@ var AgentExecutor = class _AgentExecutor {
|
|
|
366812
366882
|
}
|
|
366813
366883
|
/** Initializes a `GeminiChat` instance for the agent run. */
|
|
366814
366884
|
async createChatObject(inputs) {
|
|
366815
|
-
const { promptConfig
|
|
366885
|
+
const { promptConfig } = this.definition;
|
|
366816
366886
|
if (!promptConfig.systemPrompt && !promptConfig.initialMessages) {
|
|
366817
366887
|
throw new Error("PromptConfig must define either `systemPrompt` or `initialMessages`.");
|
|
366818
366888
|
}
|
|
366819
366889
|
const startHistory = this.applyTemplateToInitialMessages(promptConfig.initialMessages ?? [], inputs);
|
|
366820
366890
|
const systemInstruction = promptConfig.systemPrompt ? await this.buildSystemPrompt(inputs) : void 0;
|
|
366821
366891
|
try {
|
|
366822
|
-
|
|
366823
|
-
|
|
366824
|
-
|
|
366825
|
-
|
|
366826
|
-
|
|
366827
|
-
|
|
366828
|
-
|
|
366829
|
-
};
|
|
366830
|
-
if (systemInstruction) {
|
|
366831
|
-
generationConfig.systemInstruction = systemInstruction;
|
|
366832
|
-
}
|
|
366833
|
-
return new GeminiChat(this.runtimeContext, generationConfig, startHistory);
|
|
366892
|
+
return new GeminiChat(
|
|
366893
|
+
this.runtimeContext,
|
|
366894
|
+
systemInstruction,
|
|
366895
|
+
[],
|
|
366896
|
+
// set in `callModel`,
|
|
366897
|
+
startHistory
|
|
366898
|
+
);
|
|
366834
366899
|
} catch (error) {
|
|
366835
366900
|
await reportError(error, `Error initializing Gemini chat for agent ${this.definition.name}.`, startHistory, "startChat");
|
|
366836
366901
|
throw new Error(`Failed to create chat object: ${error}`);
|
|
@@ -367512,7 +367577,7 @@ async function getClientMetadata() {
|
|
|
367512
367577
|
clientMetadataPromise = (async () => ({
|
|
367513
367578
|
ideName: "IDE_UNSPECIFIED",
|
|
367514
367579
|
pluginType: "GEMINI",
|
|
367515
|
-
ideVersion: "0.
|
|
367580
|
+
ideVersion: "0.19.0-nightly.20251121.5982abeff",
|
|
367516
367581
|
platform: getPlatform(),
|
|
367517
367582
|
updateChannel: await getReleaseChannel(__dirname5)
|
|
367518
367583
|
}))();
|
|
@@ -369783,7 +369848,7 @@ var logger4 = {
|
|
|
369783
369848
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
369784
369849
|
warn: (...args2) => debugLogger.warn("[WARN] [ImportProcessor]", ...args2),
|
|
369785
369850
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
369786
|
-
error: (...args2) =>
|
|
369851
|
+
error: (...args2) => debugLogger.error("[ERROR] [ImportProcessor]", ...args2)
|
|
369787
369852
|
};
|
|
369788
369853
|
async function findProjectRoot(startDir) {
|
|
369789
369854
|
let currentDir = path60.resolve(startDir);
|
|
@@ -372266,6 +372331,21 @@ async function createTransportWithOAuth(mcpServerName, mcpServerConfig, accessTo
|
|
|
372266
372331
|
return null;
|
|
372267
372332
|
}
|
|
372268
372333
|
}
|
|
372334
|
+
var LenientJsonSchemaValidator = class {
|
|
372335
|
+
ajvValidator = new AjvJsonSchemaValidator();
|
|
372336
|
+
getValidator(schema) {
|
|
372337
|
+
try {
|
|
372338
|
+
return this.ajvValidator.getValidator(schema);
|
|
372339
|
+
} catch (error) {
|
|
372340
|
+
debugLogger.warn(`Failed to compile MCP tool output schema (${schema?.["$id"] ?? "<no $id>"}): ${error instanceof Error ? error.message : String(error)}. Skipping output validation for this tool.`);
|
|
372341
|
+
return (input) => ({
|
|
372342
|
+
valid: true,
|
|
372343
|
+
data: input,
|
|
372344
|
+
errorMessage: void 0
|
|
372345
|
+
});
|
|
372346
|
+
}
|
|
372347
|
+
}
|
|
372348
|
+
};
|
|
372269
372349
|
function populateMcpServerCommand(mcpServers, mcpServerCommand) {
|
|
372270
372350
|
if (mcpServerCommand) {
|
|
372271
372351
|
const cmd = mcpServerCommand;
|
|
@@ -372408,6 +372488,9 @@ async function connectToMcpServer(mcpServerName, mcpServerConfig, debugMode, wor
|
|
|
372408
372488
|
const mcpClient = new Client({
|
|
372409
372489
|
name: "gemini-cli-mcp-client",
|
|
372410
372490
|
version: "0.0.1"
|
|
372491
|
+
}, {
|
|
372492
|
+
// Use a tolerant validator so bad output schemas don't block discovery.
|
|
372493
|
+
jsonSchemaValidator: new LenientJsonSchemaValidator()
|
|
372411
372494
|
});
|
|
372412
372495
|
mcpClient.registerCapabilities({
|
|
372413
372496
|
roots: {
|
|
@@ -373452,6 +373535,16 @@ var Config = class {
|
|
|
373452
373535
|
getGeminiClient() {
|
|
373453
373536
|
return this.geminiClient;
|
|
373454
373537
|
}
|
|
373538
|
+
/**
|
|
373539
|
+
* Updates the system instruction with the latest user memory.
|
|
373540
|
+
* Whenever the user memory (GEMINI.md files) is updated.
|
|
373541
|
+
*/
|
|
373542
|
+
async updateSystemInstructionIfInitialized() {
|
|
373543
|
+
const geminiClient = this.getGeminiClient();
|
|
373544
|
+
if (geminiClient?.isInitialized()) {
|
|
373545
|
+
await geminiClient.updateSystemInstruction();
|
|
373546
|
+
}
|
|
373547
|
+
}
|
|
373455
373548
|
getModelRouterService() {
|
|
373456
373549
|
return this.modelRouterService;
|
|
373457
373550
|
}
|
|
@@ -374174,6 +374267,11 @@ var PreCompressTrigger;
|
|
|
374174
374267
|
PreCompressTrigger2["Auto"] = "auto";
|
|
374175
374268
|
})(PreCompressTrigger || (PreCompressTrigger = {}));
|
|
374176
374269
|
|
|
374270
|
+
// packages/core/dist/src/utils/stdio.js
|
|
374271
|
+
init_events();
|
|
374272
|
+
var originalStdoutWrite = process.stdout.write.bind(process.stdout);
|
|
374273
|
+
var originalStderrWrite = process.stderr.write.bind(process.stderr);
|
|
374274
|
+
|
|
374177
374275
|
// packages/core/dist/index.js
|
|
374178
374276
|
init_keychain_token_storage();
|
|
374179
374277
|
|
|
@@ -375055,9 +375153,7 @@ var Task = class _Task {
|
|
|
375055
375153
|
onAllToolCallsComplete: this._schedulerAllToolCallsComplete.bind(this),
|
|
375056
375154
|
onToolCallsUpdate: this._schedulerToolCallsUpdate.bind(this),
|
|
375057
375155
|
getPreferredEditor: () => "vscode",
|
|
375058
|
-
config: this.config
|
|
375059
|
-
onEditorClose: () => {
|
|
375060
|
-
}
|
|
375156
|
+
config: this.config
|
|
375061
375157
|
});
|
|
375062
375158
|
return scheduler3;
|
|
375063
375159
|
}
|