osagent 0.1.26 → 0.1.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +146 -62
- package/package.json +1 -1
package/dist/cli.js
CHANGED
|
@@ -53251,14 +53251,14 @@ var init_node = __esm({
|
|
|
53251
53251
|
Mode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
|
|
53252
53252
|
Mode2["MODE_DYNAMIC"] = "MODE_DYNAMIC";
|
|
53253
53253
|
})(Mode || (Mode = {}));
|
|
53254
|
-
(function(
|
|
53255
|
-
|
|
53256
|
-
|
|
53257
|
-
|
|
53258
|
-
|
|
53259
|
-
|
|
53260
|
-
|
|
53261
|
-
|
|
53254
|
+
(function(AuthType4) {
|
|
53255
|
+
AuthType4["AUTH_TYPE_UNSPECIFIED"] = "AUTH_TYPE_UNSPECIFIED";
|
|
53256
|
+
AuthType4["NO_AUTH"] = "NO_AUTH";
|
|
53257
|
+
AuthType4["API_KEY_AUTH"] = "API_KEY_AUTH";
|
|
53258
|
+
AuthType4["HTTP_BASIC_AUTH"] = "HTTP_BASIC_AUTH";
|
|
53259
|
+
AuthType4["GOOGLE_SERVICE_ACCOUNT_AUTH"] = "GOOGLE_SERVICE_ACCOUNT_AUTH";
|
|
53260
|
+
AuthType4["OAUTH"] = "OAUTH";
|
|
53261
|
+
AuthType4["OIDC_AUTH"] = "OIDC_AUTH";
|
|
53262
53262
|
})(AuthType || (AuthType = {}));
|
|
53263
53263
|
(function(ApiSpec2) {
|
|
53264
53264
|
ApiSpec2["API_SPEC_UNSPECIFIED"] = "API_SPEC_UNSPECIFIED";
|
|
@@ -146147,27 +146147,31 @@ function createContentGeneratorConfig(config, authType, generationConfig) {
|
|
|
146147
146147
|
return {
|
|
146148
146148
|
...newContentGeneratorConfig,
|
|
146149
146149
|
model: newContentGeneratorConfig?.model || DEFAULT_OLLAMA_CODER_MODEL,
|
|
146150
|
-
|
|
146151
|
-
|
|
146152
|
-
apiKey:
|
|
146150
|
+
// Force Ollama Cloud endpoint explicitly
|
|
146151
|
+
baseUrl: "https://api.ollama.com",
|
|
146152
|
+
apiKey: process.env["OLLAMA_API_KEY"]
|
|
146153
146153
|
};
|
|
146154
146154
|
}
|
|
146155
146155
|
if (authType === AuthType2.OLLAMA_LOCAL) {
|
|
146156
146156
|
return {
|
|
146157
146157
|
...newContentGeneratorConfig,
|
|
146158
146158
|
model: newContentGeneratorConfig?.model || DEFAULT_OLLAMA_CODER_MODEL,
|
|
146159
|
-
|
|
146159
|
+
// Force localhost endpoint explicitly
|
|
146160
|
+
baseUrl: "http://localhost:11434/v1",
|
|
146160
146161
|
// Local Ollama doesn't require API key
|
|
146161
|
-
apiKey:
|
|
146162
|
+
apiKey: "ollama"
|
|
146162
146163
|
};
|
|
146163
146164
|
}
|
|
146164
146165
|
if (authType === AuthType2.USE_GROQ) {
|
|
146166
|
+
const groqApiKey = process.env["GROQ_API_KEY"];
|
|
146165
146167
|
return {
|
|
146166
146168
|
...newContentGeneratorConfig,
|
|
146167
146169
|
model: newContentGeneratorConfig?.model || "moonshotai/kimi-k2-instruct-0905",
|
|
146168
|
-
|
|
146169
|
-
|
|
146170
|
-
apiKey:
|
|
146170
|
+
// Force GROQ endpoint explicitly - this overrides any inherited baseUrl
|
|
146171
|
+
baseUrl: "https://api.groq.com/openai/v1",
|
|
146172
|
+
apiKey: groqApiKey,
|
|
146173
|
+
// Disable cache control for GROQ (not supported)
|
|
146174
|
+
disableCacheControl: true
|
|
146171
146175
|
};
|
|
146172
146176
|
}
|
|
146173
146177
|
return {
|
|
@@ -146176,7 +146180,7 @@ function createContentGeneratorConfig(config, authType, generationConfig) {
|
|
|
146176
146180
|
};
|
|
146177
146181
|
}
|
|
146178
146182
|
async function createContentGenerator(config, gcConfig, sessionId2, isInitialAuth) {
|
|
146179
|
-
const version2 = "0.1.
|
|
146183
|
+
const version2 = "0.1.27";
|
|
146180
146184
|
const userAgent2 = `OSAgent/${version2} (${process.platform}; ${process.arch})`;
|
|
146181
146185
|
const baseHeaders = {
|
|
146182
146186
|
"User-Agent": userAgent2
|
|
@@ -146251,16 +146255,16 @@ var init_contentGenerator = __esm({
|
|
|
146251
146255
|
init_models();
|
|
146252
146256
|
init_installationManager();
|
|
146253
146257
|
init_loggingContentGenerator();
|
|
146254
|
-
(function(
|
|
146255
|
-
|
|
146256
|
-
|
|
146257
|
-
|
|
146258
|
-
|
|
146259
|
-
|
|
146260
|
-
|
|
146261
|
-
|
|
146262
|
-
|
|
146263
|
-
|
|
146258
|
+
(function(AuthType4) {
|
|
146259
|
+
AuthType4["LOGIN_WITH_OSAGENT"] = "oauth-personal";
|
|
146260
|
+
AuthType4["USE_OSA"] = "OSA-api-key";
|
|
146261
|
+
AuthType4["USE_VERTEX_AI"] = "vertex-ai";
|
|
146262
|
+
AuthType4["CLOUD_SHELL"] = "cloud-shell";
|
|
146263
|
+
AuthType4["USE_OPENAI"] = "openai";
|
|
146264
|
+
AuthType4["OSA_OAUTH"] = "OSA-oauth";
|
|
146265
|
+
AuthType4["OLLAMA_CLOUD"] = "ollama-cloud";
|
|
146266
|
+
AuthType4["OLLAMA_LOCAL"] = "ollama-local";
|
|
146267
|
+
AuthType4["USE_GROQ"] = "groq";
|
|
146264
146268
|
})(AuthType2 || (AuthType2 = {}));
|
|
146265
146269
|
__name(createContentGeneratorConfig, "createContentGeneratorConfig");
|
|
146266
146270
|
__name(createContentGenerator, "createContentGenerator");
|
|
@@ -309775,7 +309779,7 @@ __name(getPackageJson, "getPackageJson");
|
|
|
309775
309779
|
// packages/cli/src/utils/version.ts
|
|
309776
309780
|
async function getCliVersion() {
|
|
309777
309781
|
const pkgJson = await getPackageJson();
|
|
309778
|
-
return "0.1.
|
|
309782
|
+
return "0.1.27";
|
|
309779
309783
|
}
|
|
309780
309784
|
__name(getCliVersion, "getCliVersion");
|
|
309781
309785
|
|
|
@@ -313944,8 +313948,8 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
|
|
|
313944
313948
|
|
|
313945
313949
|
// packages/cli/src/generated/git-commit.ts
|
|
313946
313950
|
init_esbuild_shims();
|
|
313947
|
-
var GIT_COMMIT_INFO2 = "
|
|
313948
|
-
var CLI_VERSION2 = "0.1.
|
|
313951
|
+
var GIT_COMMIT_INFO2 = "fbe9649";
|
|
313952
|
+
var CLI_VERSION2 = "0.1.27";
|
|
313949
313953
|
|
|
313950
313954
|
// packages/cli/src/utils/systemInfo.ts
|
|
313951
313955
|
async function getNpmVersion() {
|
|
@@ -317601,7 +317605,10 @@ var AVAILABLE_MODELS_OSA = [
|
|
|
317601
317605
|
return t2(
|
|
317602
317606
|
"Qwen3-Coder 480B Cloud - Most powerful agentic coding model with 256K context (Recommended)"
|
|
317603
317607
|
);
|
|
317604
|
-
}
|
|
317608
|
+
},
|
|
317609
|
+
contextWindow: 262144,
|
|
317610
|
+
// 256K
|
|
317611
|
+
tokensPerSecond: 100
|
|
317605
317612
|
},
|
|
317606
317613
|
{
|
|
317607
317614
|
id: "qwen3-coder:30b",
|
|
@@ -317610,7 +317617,10 @@ var AVAILABLE_MODELS_OSA = [
|
|
|
317610
317617
|
return t2(
|
|
317611
317618
|
"Qwen3-Coder 30B - Efficient coding model with 30B params, only 3.3B activated"
|
|
317612
317619
|
);
|
|
317613
|
-
}
|
|
317620
|
+
},
|
|
317621
|
+
contextWindow: 131072,
|
|
317622
|
+
// 131K
|
|
317623
|
+
tokensPerSecond: 150
|
|
317614
317624
|
},
|
|
317615
317625
|
{
|
|
317616
317626
|
id: MAINLINE_VLM,
|
|
@@ -317620,7 +317630,10 @@ var AVAILABLE_MODELS_OSA = [
|
|
|
317620
317630
|
"Vision model for multimodal tasks"
|
|
317621
317631
|
);
|
|
317622
317632
|
},
|
|
317623
|
-
isVision: true
|
|
317633
|
+
isVision: true,
|
|
317634
|
+
contextWindow: 131072,
|
|
317635
|
+
// 131K
|
|
317636
|
+
tokensPerSecond: 50
|
|
317624
317637
|
}
|
|
317625
317638
|
];
|
|
317626
317639
|
var AVAILABLE_MODELS_GROQ = [
|
|
@@ -317631,37 +317644,42 @@ var AVAILABLE_MODELS_GROQ = [
|
|
|
317631
317644
|
return t2(
|
|
317632
317645
|
"Kimi K2 0905 - Best coding model with 256K context, ~200 tok/s on GROQ"
|
|
317633
317646
|
);
|
|
317634
|
-
}
|
|
317647
|
+
},
|
|
317648
|
+
contextWindow: 262144,
|
|
317649
|
+
// 256K
|
|
317650
|
+
tokensPerSecond: 200
|
|
317635
317651
|
},
|
|
317636
317652
|
{
|
|
317637
317653
|
id: "moonshotai/kimi-k2-instruct",
|
|
317638
|
-
label: "Kimi K2 Instruct",
|
|
317654
|
+
label: "Kimi K2 Instruct (Legacy)",
|
|
317639
317655
|
get description() {
|
|
317640
317656
|
return t2(
|
|
317641
|
-
"Kimi K2 Instruct - 131K context,
|
|
317657
|
+
"Kimi K2 Instruct - 131K context (deprecated, use 0905 instead)"
|
|
317642
317658
|
);
|
|
317643
|
-
}
|
|
317659
|
+
},
|
|
317660
|
+
contextWindow: 131072,
|
|
317661
|
+
// 131K
|
|
317662
|
+
tokensPerSecond: 200
|
|
317644
317663
|
},
|
|
317645
317664
|
{
|
|
317646
317665
|
id: "llama-3.3-70b-versatile",
|
|
317647
317666
|
label: "Llama 3.3 70B",
|
|
317648
317667
|
get description() {
|
|
317649
317668
|
return t2("Llama 3.3 70B - Versatile model for general coding tasks");
|
|
317650
|
-
}
|
|
317669
|
+
},
|
|
317670
|
+
contextWindow: 131072,
|
|
317671
|
+
// 131K
|
|
317672
|
+
tokensPerSecond: 280
|
|
317651
317673
|
},
|
|
317652
317674
|
{
|
|
317653
317675
|
id: "llama-3.1-8b-instant",
|
|
317654
317676
|
label: "Llama 3.1 8B Instant",
|
|
317655
317677
|
get description() {
|
|
317656
317678
|
return t2("Llama 3.1 8B - Ultra-fast for simple tasks");
|
|
317657
|
-
}
|
|
317658
|
-
|
|
317659
|
-
|
|
317660
|
-
|
|
317661
|
-
label: "Mixtral 8x7B",
|
|
317662
|
-
get description() {
|
|
317663
|
-
return t2("Mixtral 8x7B MoE - 32K context, good balance of speed/quality");
|
|
317664
|
-
}
|
|
317679
|
+
},
|
|
317680
|
+
contextWindow: 131072,
|
|
317681
|
+
// 131K
|
|
317682
|
+
tokensPerSecond: 560
|
|
317665
317683
|
}
|
|
317666
317684
|
];
|
|
317667
317685
|
var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
@@ -317671,7 +317689,11 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317671
317689
|
get description() {
|
|
317672
317690
|
return t2("Qwen2.5-Coder 32B - Best local coding model (requires ~20GB VRAM)");
|
|
317673
317691
|
},
|
|
317674
|
-
isLocal: true
|
|
317692
|
+
isLocal: true,
|
|
317693
|
+
contextWindow: 131072,
|
|
317694
|
+
// 128K
|
|
317695
|
+
tokensPerSecond: 30
|
|
317696
|
+
// Varies by hardware
|
|
317675
317697
|
},
|
|
317676
317698
|
{
|
|
317677
317699
|
id: "qwen2.5-coder:14b",
|
|
@@ -317679,7 +317701,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317679
317701
|
get description() {
|
|
317680
317702
|
return t2("Qwen2.5-Coder 14B - Great local coding model (requires ~10GB VRAM)");
|
|
317681
317703
|
},
|
|
317682
|
-
isLocal: true
|
|
317704
|
+
isLocal: true,
|
|
317705
|
+
contextWindow: 131072,
|
|
317706
|
+
// 128K
|
|
317707
|
+
tokensPerSecond: 50
|
|
317683
317708
|
},
|
|
317684
317709
|
{
|
|
317685
317710
|
id: "qwen2.5-coder:7b",
|
|
@@ -317687,7 +317712,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317687
317712
|
get description() {
|
|
317688
317713
|
return t2("Qwen2.5-Coder 7B - Good local coding model (requires ~5GB VRAM)");
|
|
317689
317714
|
},
|
|
317690
|
-
isLocal: true
|
|
317715
|
+
isLocal: true,
|
|
317716
|
+
contextWindow: 131072,
|
|
317717
|
+
// 128K
|
|
317718
|
+
tokensPerSecond: 80
|
|
317691
317719
|
},
|
|
317692
317720
|
{
|
|
317693
317721
|
id: "codellama:34b",
|
|
@@ -317695,7 +317723,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317695
317723
|
get description() {
|
|
317696
317724
|
return t2("CodeLlama 34B - Meta coding model (requires ~20GB VRAM)");
|
|
317697
317725
|
},
|
|
317698
|
-
isLocal: true
|
|
317726
|
+
isLocal: true,
|
|
317727
|
+
contextWindow: 16384,
|
|
317728
|
+
// 16K
|
|
317729
|
+
tokensPerSecond: 25
|
|
317699
317730
|
},
|
|
317700
317731
|
{
|
|
317701
317732
|
id: "deepseek-coder-v2:16b",
|
|
@@ -317703,7 +317734,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317703
317734
|
get description() {
|
|
317704
317735
|
return t2("DeepSeek Coder V2 16B - Efficient MoE coding model");
|
|
317705
317736
|
},
|
|
317706
|
-
isLocal: true
|
|
317737
|
+
isLocal: true,
|
|
317738
|
+
contextWindow: 131072,
|
|
317739
|
+
// 128K
|
|
317740
|
+
tokensPerSecond: 60
|
|
317707
317741
|
},
|
|
317708
317742
|
{
|
|
317709
317743
|
id: "llama3.2:latest",
|
|
@@ -317711,7 +317745,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
|
|
|
317711
317745
|
get description() {
|
|
317712
317746
|
return t2("Llama 3.2 - General purpose model with coding capabilities");
|
|
317713
317747
|
},
|
|
317714
|
-
isLocal: true
|
|
317748
|
+
isLocal: true,
|
|
317749
|
+
contextWindow: 131072,
|
|
317750
|
+
// 128K
|
|
317751
|
+
tokensPerSecond: 70
|
|
317715
317752
|
}
|
|
317716
317753
|
];
|
|
317717
317754
|
function getOpenAIAvailableModelFromEnv() {
|
|
@@ -317842,11 +317879,8 @@ var providerCommand = {
|
|
|
317842
317879
|
const apiKey = process.env["GROQ_API_KEY"];
|
|
317843
317880
|
if (!apiKey) {
|
|
317844
317881
|
return {
|
|
317845
|
-
type: "
|
|
317846
|
-
|
|
317847
|
-
content: t2(`GROQ API key required. Set GROQ_API_KEY environment variable.
|
|
317848
|
-
|
|
317849
|
-
Get your API key at: https://console.groq.com/keys`)
|
|
317882
|
+
type: "start_auth",
|
|
317883
|
+
authType: AuthType2.USE_GROQ
|
|
317850
317884
|
};
|
|
317851
317885
|
}
|
|
317852
317886
|
try {
|
|
@@ -317923,9 +317957,8 @@ Use /model to select a local model.`)
|
|
|
317923
317957
|
const apiKey = process.env["OPENAI_API_KEY"];
|
|
317924
317958
|
if (!apiKey) {
|
|
317925
317959
|
return {
|
|
317926
|
-
type: "
|
|
317927
|
-
|
|
317928
|
-
content: t2(`OpenAI API key required. Set OPENAI_API_KEY environment variable.`)
|
|
317960
|
+
type: "start_auth",
|
|
317961
|
+
authType: AuthType2.USE_OPENAI
|
|
317929
317962
|
};
|
|
317930
317963
|
}
|
|
317931
317964
|
try {
|
|
@@ -351462,6 +351495,31 @@ var DialogManager = /* @__PURE__ */ __name(({
|
|
|
351462
351495
|
}
|
|
351463
351496
|
);
|
|
351464
351497
|
}
|
|
351498
|
+
if (uiState.pendingAuthType === AuthType2.USE_GROQ) {
|
|
351499
|
+
return /* @__PURE__ */ (0, import_jsx_runtime86.jsx)(
|
|
351500
|
+
OpenAIKeyPrompt,
|
|
351501
|
+
{
|
|
351502
|
+
onSubmit: (apiKey, _baseUrl, model) => {
|
|
351503
|
+
process34.env["GROQ_API_KEY"] = apiKey;
|
|
351504
|
+
uiActions.handleAuthSelect(AuthType2.USE_GROQ, "User" /* User */, {
|
|
351505
|
+
apiKey,
|
|
351506
|
+
// Always use GROQ endpoint - ignore user-provided baseUrl
|
|
351507
|
+
baseUrl: "https://api.groq.com/openai/v1",
|
|
351508
|
+
model: model || "moonshotai/kimi-k2-instruct-0905"
|
|
351509
|
+
});
|
|
351510
|
+
},
|
|
351511
|
+
onCancel: () => {
|
|
351512
|
+
uiActions.cancelAuthentication();
|
|
351513
|
+
uiActions.setAuthState("updating" /* Updating */);
|
|
351514
|
+
},
|
|
351515
|
+
defaultApiKey: process34.env["GROQ_API_KEY"] || "",
|
|
351516
|
+
defaultBaseUrl: "https://api.groq.com/openai/v1",
|
|
351517
|
+
defaultModel: "moonshotai/kimi-k2-instruct-0905",
|
|
351518
|
+
providerName: "GROQ",
|
|
351519
|
+
apiKeyUrl: "https://console.groq.com/keys"
|
|
351520
|
+
}
|
|
351521
|
+
);
|
|
351522
|
+
}
|
|
351465
351523
|
if (uiState.pendingAuthType === AuthType2.OSA_OAUTH) {
|
|
351466
351524
|
return /* @__PURE__ */ (0, import_jsx_runtime86.jsx)(
|
|
351467
351525
|
OSAOAuthProgress,
|
|
@@ -355164,6 +355222,18 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
|
|
|
355164
355222
|
await performAuth(authType, scope, credentials);
|
|
355165
355223
|
return;
|
|
355166
355224
|
}
|
|
355225
|
+
if (authType === AuthType2.USE_GROQ) {
|
|
355226
|
+
if (credentials) {
|
|
355227
|
+
config.updateCredentials({
|
|
355228
|
+
apiKey: credentials.apiKey,
|
|
355229
|
+
// Always use GROQ endpoint - ignore user-provided baseUrl
|
|
355230
|
+
baseUrl: "https://api.groq.com/openai/v1",
|
|
355231
|
+
model: credentials.model || "moonshotai/kimi-k2-instruct-0905"
|
|
355232
|
+
});
|
|
355233
|
+
await performAuth(authType, scope, credentials);
|
|
355234
|
+
}
|
|
355235
|
+
return;
|
|
355236
|
+
}
|
|
355167
355237
|
await performAuth(authType, scope);
|
|
355168
355238
|
},
|
|
355169
355239
|
[config, performAuth]
|
|
@@ -355171,6 +355241,12 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
|
|
|
355171
355241
|
const openAuthDialog = (0, import_react106.useCallback)(() => {
|
|
355172
355242
|
setIsAuthDialogOpen(true);
|
|
355173
355243
|
}, []);
|
|
355244
|
+
const startAuthForProvider = (0, import_react106.useCallback)((authType) => {
|
|
355245
|
+
setPendingAuthType(authType);
|
|
355246
|
+
setAuthError(null);
|
|
355247
|
+
setIsAuthDialogOpen(false);
|
|
355248
|
+
setIsAuthenticating(true);
|
|
355249
|
+
}, []);
|
|
355174
355250
|
const cancelAuthentication = (0, import_react106.useCallback)(() => {
|
|
355175
355251
|
if (isAuthenticating && pendingAuthType === AuthType2.OSA_OAUTH) {
|
|
355176
355252
|
cancelOSAAuth();
|
|
@@ -355214,6 +355290,7 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
|
|
|
355214
355290
|
OSAAuthState,
|
|
355215
355291
|
handleAuthSelect,
|
|
355216
355292
|
openAuthDialog,
|
|
355293
|
+
startAuthForProvider,
|
|
355217
355294
|
cancelAuthentication
|
|
355218
355295
|
};
|
|
355219
355296
|
}, "useAuthCommand");
|
|
@@ -356100,6 +356177,10 @@ var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem
|
|
|
356100
356177
|
true
|
|
356101
356178
|
);
|
|
356102
356179
|
}
|
|
356180
|
+
case "start_auth": {
|
|
356181
|
+
actions.startAuthForProvider(result.authType);
|
|
356182
|
+
return { type: "handled" };
|
|
356183
|
+
}
|
|
356103
356184
|
default: {
|
|
356104
356185
|
const unhandled = result;
|
|
356105
356186
|
throw new Error(
|
|
@@ -362832,6 +362913,7 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
|
|
|
362832
362913
|
OSAAuthState,
|
|
362833
362914
|
handleAuthSelect,
|
|
362834
362915
|
openAuthDialog,
|
|
362916
|
+
startAuthForProvider,
|
|
362835
362917
|
cancelAuthentication
|
|
362836
362918
|
} = useAuthCommand(settings, config, historyManager.addItem);
|
|
362837
362919
|
const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
|
|
@@ -362921,7 +363003,8 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
|
|
|
362921
363003
|
addConfirmUpdateExtensionRequest,
|
|
362922
363004
|
openSubagentCreateDialog,
|
|
362923
363005
|
openAgentsManagerDialog,
|
|
362924
|
-
_showQuitConfirmation: showQuitConfirmation
|
|
363006
|
+
_showQuitConfirmation: showQuitConfirmation,
|
|
363007
|
+
startAuthForProvider
|
|
362925
363008
|
}),
|
|
362926
363009
|
[
|
|
362927
363010
|
openAuthDialog,
|
|
@@ -362938,7 +363021,8 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
|
|
|
362938
363021
|
addConfirmUpdateExtensionRequest,
|
|
362939
363022
|
showQuitConfirmation,
|
|
362940
363023
|
openSubagentCreateDialog,
|
|
362941
|
-
openAgentsManagerDialog
|
|
363024
|
+
openAgentsManagerDialog,
|
|
363025
|
+
startAuthForProvider
|
|
362942
363026
|
]
|
|
362943
363027
|
);
|
|
362944
363028
|
const {
|