osagent 0.1.25 → 0.1.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +654 -109
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -53251,14 +53251,14 @@ var init_node = __esm({
53251
53251
  Mode2["MODE_UNSPECIFIED"] = "MODE_UNSPECIFIED";
53252
53252
  Mode2["MODE_DYNAMIC"] = "MODE_DYNAMIC";
53253
53253
  })(Mode || (Mode = {}));
53254
- (function(AuthType3) {
53255
- AuthType3["AUTH_TYPE_UNSPECIFIED"] = "AUTH_TYPE_UNSPECIFIED";
53256
- AuthType3["NO_AUTH"] = "NO_AUTH";
53257
- AuthType3["API_KEY_AUTH"] = "API_KEY_AUTH";
53258
- AuthType3["HTTP_BASIC_AUTH"] = "HTTP_BASIC_AUTH";
53259
- AuthType3["GOOGLE_SERVICE_ACCOUNT_AUTH"] = "GOOGLE_SERVICE_ACCOUNT_AUTH";
53260
- AuthType3["OAUTH"] = "OAUTH";
53261
- AuthType3["OIDC_AUTH"] = "OIDC_AUTH";
53254
+ (function(AuthType4) {
53255
+ AuthType4["AUTH_TYPE_UNSPECIFIED"] = "AUTH_TYPE_UNSPECIFIED";
53256
+ AuthType4["NO_AUTH"] = "NO_AUTH";
53257
+ AuthType4["API_KEY_AUTH"] = "API_KEY_AUTH";
53258
+ AuthType4["HTTP_BASIC_AUTH"] = "HTTP_BASIC_AUTH";
53259
+ AuthType4["GOOGLE_SERVICE_ACCOUNT_AUTH"] = "GOOGLE_SERVICE_ACCOUNT_AUTH";
53260
+ AuthType4["OAUTH"] = "OAUTH";
53261
+ AuthType4["OIDC_AUTH"] = "OIDC_AUTH";
53262
53262
  })(AuthType || (AuthType = {}));
53263
53263
  (function(ApiSpec2) {
53264
53264
  ApiSpec2["API_SPEC_UNSPECIFIED"] = "API_SPEC_UNSPECIFIED";
@@ -142653,6 +142653,60 @@ var init_ollama = __esm({
142653
142653
  }
142654
142654
  });
142655
142655
 
142656
+ // packages/core/dist/src/core/openaiContentGenerator/provider/groq.js
142657
+ var GroqOpenAICompatibleProvider;
142658
+ var init_groq = __esm({
142659
+ "packages/core/dist/src/core/openaiContentGenerator/provider/groq.js"() {
142660
+ "use strict";
142661
+ init_esbuild_shims();
142662
+ init_default();
142663
+ GroqOpenAICompatibleProvider = class _GroqOpenAICompatibleProvider extends DefaultOpenAICompatibleProvider {
142664
+ static {
142665
+ __name(this, "GroqOpenAICompatibleProvider");
142666
+ }
142667
+ constructor(contentGeneratorConfig, cliConfig) {
142668
+ super(contentGeneratorConfig, cliConfig);
142669
+ }
142670
+ /**
142671
+ * Check if this is a GROQ provider based on baseUrl or API key prefix
142672
+ */
142673
+ static isGroqProvider(contentGeneratorConfig) {
142674
+ const baseURL = contentGeneratorConfig.baseUrl || "";
142675
+ const apiKey = contentGeneratorConfig.apiKey || "";
142676
+ return baseURL.includes("groq.com") || baseURL.includes("api.groq.com") || apiKey.startsWith("gsk_");
142677
+ }
142678
+ /**
142679
+ * Check if model is a Kimi model from Moonshot AI
142680
+ */
142681
+ static isKimiModel(model) {
142682
+ return model.includes("kimi") || model.includes("moonshotai");
142683
+ }
142684
+ buildHeaders() {
142685
+ const baseHeaders = super.buildHeaders();
142686
+ return {
142687
+ ...baseHeaders
142688
+ // GROQ-specific headers if any
142689
+ };
142690
+ }
142691
+ buildRequest(request4, _userPromptId) {
142692
+ const result = { ...request4 };
142693
+ if (_GroqOpenAICompatibleProvider.isKimiModel(result.model)) {
142694
+ if (result.max_tokens && result.max_tokens > 16384) {
142695
+ result.max_tokens = 16384;
142696
+ }
142697
+ }
142698
+ return result;
142699
+ }
142700
+ /**
142701
+ * Get GROQ base URL
142702
+ */
142703
+ static getGroqBaseUrl() {
142704
+ return "https://api.groq.com/openai/v1";
142705
+ }
142706
+ };
142707
+ }
142708
+ });
142709
+
142656
142710
  // packages/core/dist/src/core/openaiContentGenerator/provider/index.js
142657
142711
  var init_provider = __esm({
142658
142712
  "packages/core/dist/src/core/openaiContentGenerator/provider/index.js"() {
@@ -142664,6 +142718,7 @@ var init_provider = __esm({
142664
142718
  init_openrouter();
142665
142719
  init_anthropic();
142666
142720
  init_ollama();
142721
+ init_groq();
142667
142722
  init_default();
142668
142723
  }
142669
142724
  });
@@ -145860,6 +145915,7 @@ __export(openaiContentGenerator_exports, {
145860
145915
  DeepSeekOpenAICompatibleProvider: () => DeepSeekOpenAICompatibleProvider,
145861
145916
  DefaultTelemetryService: () => DefaultTelemetryService,
145862
145917
  EnhancedErrorHandler: () => EnhancedErrorHandler,
145918
+ GroqOpenAICompatibleProvider: () => GroqOpenAICompatibleProvider,
145863
145919
  OllamaOpenAICompatibleProvider: () => OllamaOpenAICompatibleProvider,
145864
145920
  OpenAIContentConverter: () => OpenAIContentConverter,
145865
145921
  OpenAIContentGenerator: () => OpenAIContentGenerator,
@@ -145891,6 +145947,9 @@ function determineProvider(contentGeneratorConfig, cliConfig) {
145891
145947
  if (OllamaOpenAICompatibleProvider.isOllamaProvider(config)) {
145892
145948
  return new OllamaOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
145893
145949
  }
145950
+ if (GroqOpenAICompatibleProvider.isGroqProvider(config)) {
145951
+ return new GroqOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
145952
+ }
145894
145953
  return new DefaultOpenAICompatibleProvider(contentGeneratorConfig, cliConfig);
145895
145954
  }
145896
145955
  var init_openaiContentGenerator2 = __esm({
@@ -146088,18 +146147,31 @@ function createContentGeneratorConfig(config, authType, generationConfig) {
146088
146147
  return {
146089
146148
  ...newContentGeneratorConfig,
146090
146149
  model: newContentGeneratorConfig?.model || DEFAULT_OLLAMA_CODER_MODEL,
146091
- baseUrl: newContentGeneratorConfig?.baseUrl || "https://api.ollama.com",
146092
- // API key from OLLAMA_API_KEY env var or settings
146093
- apiKey: newContentGeneratorConfig?.apiKey || process.env["OLLAMA_API_KEY"]
146150
+ // Force Ollama Cloud endpoint explicitly
146151
+ baseUrl: "https://api.ollama.com",
146152
+ apiKey: process.env["OLLAMA_API_KEY"]
146094
146153
  };
146095
146154
  }
146096
146155
  if (authType === AuthType2.OLLAMA_LOCAL) {
146097
146156
  return {
146098
146157
  ...newContentGeneratorConfig,
146099
146158
  model: newContentGeneratorConfig?.model || DEFAULT_OLLAMA_CODER_MODEL,
146100
- baseUrl: newContentGeneratorConfig?.baseUrl || "http://localhost:11434/v1",
146159
+ // Force localhost endpoint explicitly
146160
+ baseUrl: "http://localhost:11434/v1",
146101
146161
  // Local Ollama doesn't require API key
146102
- apiKey: newContentGeneratorConfig?.apiKey || "ollama"
146162
+ apiKey: "ollama"
146163
+ };
146164
+ }
146165
+ if (authType === AuthType2.USE_GROQ) {
146166
+ const groqApiKey = process.env["GROQ_API_KEY"];
146167
+ return {
146168
+ ...newContentGeneratorConfig,
146169
+ model: newContentGeneratorConfig?.model || "moonshotai/kimi-k2-instruct-0905",
146170
+ // Force GROQ endpoint explicitly - this overrides any inherited baseUrl
146171
+ baseUrl: "https://api.groq.com/openai/v1",
146172
+ apiKey: groqApiKey,
146173
+ // Disable cache control for GROQ (not supported)
146174
+ disableCacheControl: true
146103
146175
  };
146104
146176
  }
146105
146177
  return {
@@ -146108,7 +146180,7 @@ function createContentGeneratorConfig(config, authType, generationConfig) {
146108
146180
  };
146109
146181
  }
146110
146182
  async function createContentGenerator(config, gcConfig, sessionId2, isInitialAuth) {
146111
- const version2 = "0.1.25";
146183
+ const version2 = "0.1.27";
146112
146184
  const userAgent2 = `OSAgent/${version2} (${process.platform}; ${process.arch})`;
146113
146185
  const baseHeaders = {
146114
146186
  "User-Agent": userAgent2
@@ -146154,6 +146226,13 @@ async function createContentGenerator(config, gcConfig, sessionId2, isInitialAut
146154
146226
  const { createOpenAIContentGenerator: createOpenAIContentGenerator2 } = await Promise.resolve().then(() => (init_openaiContentGenerator2(), openaiContentGenerator_exports));
146155
146227
  return createOpenAIContentGenerator2(config, gcConfig);
146156
146228
  }
146229
+ if (config.authType === AuthType2.USE_GROQ) {
146230
+ if (!config.apiKey) {
146231
+ throw new Error("GROQ API key is required. Set GROQ_API_KEY environment variable.");
146232
+ }
146233
+ const { createOpenAIContentGenerator: createOpenAIContentGenerator2 } = await Promise.resolve().then(() => (init_openaiContentGenerator2(), openaiContentGenerator_exports));
146234
+ return createOpenAIContentGenerator2(config, gcConfig);
146235
+ }
146157
146236
  if (config.authType === AuthType2.OSA_OAUTH) {
146158
146237
  const { getOSAOAuthClient: getOSAOauthClient } = await Promise.resolve().then(() => (init_qwenOAuth2(), qwenOAuth2_exports));
146159
146238
  const { OSAContentGenerator: OSAContentGenerator2 } = await Promise.resolve().then(() => (init_qwenContentGenerator(), qwenContentGenerator_exports));
@@ -146176,15 +146255,16 @@ var init_contentGenerator = __esm({
146176
146255
  init_models();
146177
146256
  init_installationManager();
146178
146257
  init_loggingContentGenerator();
146179
- (function(AuthType3) {
146180
- AuthType3["LOGIN_WITH_OSAGENT"] = "oauth-personal";
146181
- AuthType3["USE_OSA"] = "OSA-api-key";
146182
- AuthType3["USE_VERTEX_AI"] = "vertex-ai";
146183
- AuthType3["CLOUD_SHELL"] = "cloud-shell";
146184
- AuthType3["USE_OPENAI"] = "openai";
146185
- AuthType3["OSA_OAUTH"] = "OSA-oauth";
146186
- AuthType3["OLLAMA_CLOUD"] = "ollama-cloud";
146187
- AuthType3["OLLAMA_LOCAL"] = "ollama-local";
146258
+ (function(AuthType4) {
146259
+ AuthType4["LOGIN_WITH_OSAGENT"] = "oauth-personal";
146260
+ AuthType4["USE_OSA"] = "OSA-api-key";
146261
+ AuthType4["USE_VERTEX_AI"] = "vertex-ai";
146262
+ AuthType4["CLOUD_SHELL"] = "cloud-shell";
146263
+ AuthType4["USE_OPENAI"] = "openai";
146264
+ AuthType4["OSA_OAUTH"] = "OSA-oauth";
146265
+ AuthType4["OLLAMA_CLOUD"] = "ollama-cloud";
146266
+ AuthType4["OLLAMA_LOCAL"] = "ollama-local";
146267
+ AuthType4["USE_GROQ"] = "groq";
146188
146268
  })(AuthType2 || (AuthType2 = {}));
146189
146269
  __name(createContentGeneratorConfig, "createContentGeneratorConfig");
146190
146270
  __name(createContentGenerator, "createContentGenerator");
@@ -309699,7 +309779,7 @@ __name(getPackageJson, "getPackageJson");
309699
309779
  // packages/cli/src/utils/version.ts
309700
309780
  async function getCliVersion() {
309701
309781
  const pkgJson = await getPackageJson();
309702
- return "0.1.25";
309782
+ return "0.1.27";
309703
309783
  }
309704
309784
  __name(getCliVersion, "getCliVersion");
309705
309785
 
@@ -313868,7 +313948,8 @@ var formatDuration = /* @__PURE__ */ __name((milliseconds) => {
313868
313948
 
313869
313949
  // packages/cli/src/generated/git-commit.ts
313870
313950
  init_esbuild_shims();
313871
- var GIT_COMMIT_INFO2 = "4703098";
313951
+ var GIT_COMMIT_INFO2 = "fbe9649";
313952
+ var CLI_VERSION2 = "0.1.27";
313872
313953
 
313873
313954
  // packages/cli/src/utils/systemInfo.ts
313874
313955
  async function getNpmVersion() {
@@ -314700,14 +314781,14 @@ init_esbuild_shims();
314700
314781
  var consultCommand = {
314701
314782
  name: "consult",
314702
314783
  get description() {
314703
- return t2("Manage consultation mode - gather context through questions");
314784
+ return t2("Manage consultation - always-active context gathering");
314704
314785
  },
314705
314786
  kind: "built-in" /* BUILT_IN */,
314706
314787
  subCommands: [
314707
314788
  {
314708
- name: "on",
314789
+ name: "active",
314709
314790
  get description() {
314710
- return t2("Enable active consultation mode");
314791
+ return t2("Set to active mode - questions shown but agent continues");
314711
314792
  },
314712
314793
  kind: "built-in" /* BUILT_IN */,
314713
314794
  action: /* @__PURE__ */ __name(async (context2) => {
@@ -314715,37 +314796,60 @@ var consultCommand = {
314715
314796
  return {
314716
314797
  type: "message",
314717
314798
  messageType: "info",
314718
- content: t2("Consultation mode: ACTIVE\nThe agent will ask clarifying questions as you work.\nQuestions appear below the status bar.")
314799
+ content: t2(`Consultation mode: ACTIVE
314800
+
314801
+ Questions are shown as you work, but the agent continues operating.
314802
+ Answer questions when convenient to improve results.`)
314719
314803
  };
314720
314804
  }, "action")
314721
314805
  },
314722
314806
  {
314723
- name: "off",
314807
+ name: "blocking",
314724
314808
  get description() {
314725
- return t2("Disable consultation mode");
314809
+ return t2("Set to blocking mode - high priority questions pause the agent");
314726
314810
  },
314727
314811
  kind: "built-in" /* BUILT_IN */,
314728
314812
  action: /* @__PURE__ */ __name(async (context2) => {
314729
- context2.services.consultation?.setConsultationMode("passive");
314813
+ context2.services.consultation?.setConsultationMode("blocking");
314730
314814
  return {
314731
314815
  type: "message",
314732
314816
  messageType: "info",
314733
- content: t2("Consultation mode: OFF\nNo questions will be asked.")
314817
+ content: t2(`Consultation mode: BLOCKING
314818
+
314819
+ High-priority questions (architecture, critical decisions) will PAUSE
314820
+ the agent until you answer. This ensures important decisions get your input.`)
314734
314821
  };
314735
314822
  }, "action")
314736
314823
  },
314737
314824
  {
314738
- name: "blocking",
314825
+ name: "status",
314739
314826
  get description() {
314740
- return t2("Enable blocking mode - high priority questions pause the agent");
314827
+ return t2("Show current consultation status and collected context");
314741
314828
  },
314742
314829
  kind: "built-in" /* BUILT_IN */,
314743
314830
  action: /* @__PURE__ */ __name(async (context2) => {
314744
- context2.services.consultation?.setConsultationMode("blocking");
314831
+ const consultation = context2.services.consultation;
314832
+ const mode = consultation?.consultationMode || "blocking";
314833
+ const contextStr = consultation?.getContextForAgent() || "";
314834
+ const questionCount = consultation?.questionQueue?.length || 0;
314835
+ let statusContent = `Consultation Status
314836
+ \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501
314837
+
314838
+ Mode: ${mode.toUpperCase()}
314839
+ Pending Questions: ${questionCount}
314840
+ `;
314841
+ if (contextStr) {
314842
+ statusContent += `
314843
+ \u2501\u2501\u2501 Collected Context \u2501\u2501\u2501
314844
+ ${contextStr}`;
314845
+ } else {
314846
+ statusContent += `
314847
+ No context collected yet. Answer questions as they appear.`;
314848
+ }
314745
314849
  return {
314746
314850
  type: "message",
314747
314851
  messageType: "info",
314748
- content: t2("Consultation mode: BLOCKING\nHigh priority questions will pause the agent until answered.\nThis ensures critical decisions get your input.")
314852
+ content: t2(statusContent)
314749
314853
  };
314750
314854
  }, "action")
314751
314855
  },
@@ -314814,34 +314918,40 @@ ${contextStr}`)
314814
314918
  return {
314815
314919
  type: "message",
314816
314920
  messageType: "info",
314817
- content: t2("Added 3 demo consultation questions. Check the consultation panel below the status bar.")
314921
+ content: t2("Added 3 demo consultation questions. Check the consultation panel.")
314818
314922
  };
314819
314923
  }, "action")
314820
314924
  }
314821
314925
  ],
314822
314926
  action: /* @__PURE__ */ __name(async (context2, args) => {
314823
314927
  const argTrimmed = args.trim().toLowerCase();
314824
- const currentMode = context2.services.consultation?.consultationMode || "active";
314928
+ const currentMode = context2.services.consultation?.consultationMode || "blocking";
314825
314929
  if (!argTrimmed) {
314826
314930
  return {
314827
314931
  type: "message",
314828
314932
  messageType: "info",
314829
- content: t2(`Consultation Mode
314933
+ content: t2(`Consultation System (Always Active)
314934
+ \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501
314830
314935
 
314831
- The consultation system gathers context from you as you work:
314832
- - Questions appear below the status bar
314833
- - Answer questions to improve agent responses
314834
- - Context is used across your session
314936
+ The consultation system continuously gathers context from you
314937
+ to improve the orchestrator's understanding of your project
314938
+ and requirements. This is a core feature that cannot be disabled.
314939
+
314940
+ Current mode: ${currentMode.toUpperCase()}
314835
314941
 
314836
314942
  Commands:
314837
- /consult on - Active mode (questions shown, agent continues)
314838
- /consult blocking - Blocking mode (high-priority questions pause agent)
314839
- /consult off - Disable consultation
314840
- /consult context - View collected context
314943
+ /consult active - Questions shown, agent continues
314944
+ /consult blocking - High-priority questions pause agent (recommended)
314945
+ /consult status - View status and collected context
314946
+ /consult context - View collected context only
314841
314947
  /consult clear - Clear all context
314842
314948
  /consult demo - Add demo questions for testing
314843
314949
 
314844
- Current mode: ${currentMode}`)
314950
+ How it works:
314951
+ 1. The orchestrator analyzes your prompts
314952
+ 2. Relevant questions are generated automatically
314953
+ 3. Your answers inform development decisions
314954
+ 4. Context persists across your session`)
314845
314955
  };
314846
314956
  }
314847
314957
  return {
@@ -317495,7 +317605,10 @@ var AVAILABLE_MODELS_OSA = [
317495
317605
  return t2(
317496
317606
  "Qwen3-Coder 480B Cloud - Most powerful agentic coding model with 256K context (Recommended)"
317497
317607
  );
317498
- }
317608
+ },
317609
+ contextWindow: 262144,
317610
+ // 256K
317611
+ tokensPerSecond: 100
317499
317612
  },
317500
317613
  {
317501
317614
  id: "qwen3-coder:30b",
@@ -317504,7 +317617,10 @@ var AVAILABLE_MODELS_OSA = [
317504
317617
  return t2(
317505
317618
  "Qwen3-Coder 30B - Efficient coding model with 30B params, only 3.3B activated"
317506
317619
  );
317507
- }
317620
+ },
317621
+ contextWindow: 131072,
317622
+ // 131K
317623
+ tokensPerSecond: 150
317508
317624
  },
317509
317625
  {
317510
317626
  id: MAINLINE_VLM,
@@ -317514,7 +317630,56 @@ var AVAILABLE_MODELS_OSA = [
317514
317630
  "Vision model for multimodal tasks"
317515
317631
  );
317516
317632
  },
317517
- isVision: true
317633
+ isVision: true,
317634
+ contextWindow: 131072,
317635
+ // 131K
317636
+ tokensPerSecond: 50
317637
+ }
317638
+ ];
317639
+ var AVAILABLE_MODELS_GROQ = [
317640
+ {
317641
+ id: "moonshotai/kimi-k2-instruct-0905",
317642
+ label: "Kimi K2 0905 (Recommended)",
317643
+ get description() {
317644
+ return t2(
317645
+ "Kimi K2 0905 - Best coding model with 256K context, ~200 tok/s on GROQ"
317646
+ );
317647
+ },
317648
+ contextWindow: 262144,
317649
+ // 256K
317650
+ tokensPerSecond: 200
317651
+ },
317652
+ {
317653
+ id: "moonshotai/kimi-k2-instruct",
317654
+ label: "Kimi K2 Instruct (Legacy)",
317655
+ get description() {
317656
+ return t2(
317657
+ "Kimi K2 Instruct - 131K context (deprecated, use 0905 instead)"
317658
+ );
317659
+ },
317660
+ contextWindow: 131072,
317661
+ // 131K
317662
+ tokensPerSecond: 200
317663
+ },
317664
+ {
317665
+ id: "llama-3.3-70b-versatile",
317666
+ label: "Llama 3.3 70B",
317667
+ get description() {
317668
+ return t2("Llama 3.3 70B - Versatile model for general coding tasks");
317669
+ },
317670
+ contextWindow: 131072,
317671
+ // 131K
317672
+ tokensPerSecond: 280
317673
+ },
317674
+ {
317675
+ id: "llama-3.1-8b-instant",
317676
+ label: "Llama 3.1 8B Instant",
317677
+ get description() {
317678
+ return t2("Llama 3.1 8B - Ultra-fast for simple tasks");
317679
+ },
317680
+ contextWindow: 131072,
317681
+ // 131K
317682
+ tokensPerSecond: 560
317518
317683
  }
317519
317684
  ];
317520
317685
  var AVAILABLE_MODELS_OLLAMA_LOCAL = [
@@ -317524,7 +317689,11 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317524
317689
  get description() {
317525
317690
  return t2("Qwen2.5-Coder 32B - Best local coding model (requires ~20GB VRAM)");
317526
317691
  },
317527
- isLocal: true
317692
+ isLocal: true,
317693
+ contextWindow: 131072,
317694
+ // 128K
317695
+ tokensPerSecond: 30
317696
+ // Varies by hardware
317528
317697
  },
317529
317698
  {
317530
317699
  id: "qwen2.5-coder:14b",
@@ -317532,7 +317701,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317532
317701
  get description() {
317533
317702
  return t2("Qwen2.5-Coder 14B - Great local coding model (requires ~10GB VRAM)");
317534
317703
  },
317535
- isLocal: true
317704
+ isLocal: true,
317705
+ contextWindow: 131072,
317706
+ // 128K
317707
+ tokensPerSecond: 50
317536
317708
  },
317537
317709
  {
317538
317710
  id: "qwen2.5-coder:7b",
@@ -317540,7 +317712,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317540
317712
  get description() {
317541
317713
  return t2("Qwen2.5-Coder 7B - Good local coding model (requires ~5GB VRAM)");
317542
317714
  },
317543
- isLocal: true
317715
+ isLocal: true,
317716
+ contextWindow: 131072,
317717
+ // 128K
317718
+ tokensPerSecond: 80
317544
317719
  },
317545
317720
  {
317546
317721
  id: "codellama:34b",
@@ -317548,7 +317723,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317548
317723
  get description() {
317549
317724
  return t2("CodeLlama 34B - Meta coding model (requires ~20GB VRAM)");
317550
317725
  },
317551
- isLocal: true
317726
+ isLocal: true,
317727
+ contextWindow: 16384,
317728
+ // 16K
317729
+ tokensPerSecond: 25
317552
317730
  },
317553
317731
  {
317554
317732
  id: "deepseek-coder-v2:16b",
@@ -317556,7 +317734,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317556
317734
  get description() {
317557
317735
  return t2("DeepSeek Coder V2 16B - Efficient MoE coding model");
317558
317736
  },
317559
- isLocal: true
317737
+ isLocal: true,
317738
+ contextWindow: 131072,
317739
+ // 128K
317740
+ tokensPerSecond: 60
317560
317741
  },
317561
317742
  {
317562
317743
  id: "llama3.2:latest",
@@ -317564,7 +317745,10 @@ var AVAILABLE_MODELS_OLLAMA_LOCAL = [
317564
317745
  get description() {
317565
317746
  return t2("Llama 3.2 - General purpose model with coding capabilities");
317566
317747
  },
317567
- isLocal: true
317748
+ isLocal: true,
317749
+ contextWindow: 131072,
317750
+ // 128K
317751
+ tokensPerSecond: 70
317568
317752
  }
317569
317753
  ];
317570
317754
  function getOpenAIAvailableModelFromEnv() {
@@ -317579,6 +317763,8 @@ function getAvailableModelsForAuthType(authType) {
317579
317763
  return AVAILABLE_MODELS_OSA;
317580
317764
  case AuthType2.OLLAMA_LOCAL:
317581
317765
  return AVAILABLE_MODELS_OLLAMA_LOCAL;
317766
+ case AuthType2.USE_GROQ:
317767
+ return AVAILABLE_MODELS_GROQ;
317582
317768
  case AuthType2.USE_OPENAI: {
317583
317769
  const openAIModel = getOpenAIAvailableModelFromEnv();
317584
317770
  return openAIModel ? [openAIModel] : [];
@@ -317666,6 +317852,243 @@ var permissionsCommand = {
317666
317852
  }), "action")
317667
317853
  };
317668
317854
 
317855
+ // packages/cli/src/ui/commands/providerCommand.ts
317856
+ init_esbuild_shims();
317857
+ var providerCommand = {
317858
+ name: "provider",
317859
+ get description() {
317860
+ return t2("Switch AI provider (GROQ, Ollama, OpenAI, etc.)");
317861
+ },
317862
+ kind: "built-in" /* BUILT_IN */,
317863
+ subCommands: [
317864
+ {
317865
+ name: "groq",
317866
+ get description() {
317867
+ return t2("Switch to GROQ (fast inference with Kimi K2)");
317868
+ },
317869
+ kind: "built-in" /* BUILT_IN */,
317870
+ action: /* @__PURE__ */ __name(async (context2) => {
317871
+ const { config } = context2.services;
317872
+ if (!config) {
317873
+ return {
317874
+ type: "message",
317875
+ messageType: "error",
317876
+ content: t2("Configuration not available.")
317877
+ };
317878
+ }
317879
+ const apiKey = process.env["GROQ_API_KEY"];
317880
+ if (!apiKey) {
317881
+ return {
317882
+ type: "start_auth",
317883
+ authType: AuthType2.USE_GROQ
317884
+ };
317885
+ }
317886
+ try {
317887
+ await config.refreshAuth(AuthType2.USE_GROQ);
317888
+ return {
317889
+ type: "message",
317890
+ messageType: "info",
317891
+ content: t2(`Switched to GROQ provider.
317892
+
317893
+ Model: moonshotai/kimi-k2-instruct-0905
317894
+ Speed: ~200 tok/s
317895
+ Context: 256K tokens
317896
+
317897
+ Use /model to change models within GROQ.`)
317898
+ };
317899
+ } catch (error) {
317900
+ return {
317901
+ type: "message",
317902
+ messageType: "error",
317903
+ content: t2(`Failed to switch to GROQ: ${error instanceof Error ? error.message : String(error)}`)
317904
+ };
317905
+ }
317906
+ }, "action")
317907
+ },
317908
+ {
317909
+ name: "ollama",
317910
+ get description() {
317911
+ return t2("Switch to local Ollama");
317912
+ },
317913
+ kind: "built-in" /* BUILT_IN */,
317914
+ action: /* @__PURE__ */ __name(async (context2) => {
317915
+ const { config } = context2.services;
317916
+ if (!config) {
317917
+ return {
317918
+ type: "message",
317919
+ messageType: "error",
317920
+ content: t2("Configuration not available.")
317921
+ };
317922
+ }
317923
+ try {
317924
+ await config.refreshAuth(AuthType2.OLLAMA_LOCAL);
317925
+ return {
317926
+ type: "message",
317927
+ messageType: "info",
317928
+ content: t2(`Switched to local Ollama provider.
317929
+
317930
+ Make sure Ollama is running: ollama serve
317931
+ Use /model to select a local model.`)
317932
+ };
317933
+ } catch (error) {
317934
+ return {
317935
+ type: "message",
317936
+ messageType: "error",
317937
+ content: t2(`Failed to switch to Ollama: ${error instanceof Error ? error.message : String(error)}`)
317938
+ };
317939
+ }
317940
+ }, "action")
317941
+ },
317942
+ {
317943
+ name: "openai",
317944
+ get description() {
317945
+ return t2("Switch to OpenAI compatible provider");
317946
+ },
317947
+ kind: "built-in" /* BUILT_IN */,
317948
+ action: /* @__PURE__ */ __name(async (context2) => {
317949
+ const { config } = context2.services;
317950
+ if (!config) {
317951
+ return {
317952
+ type: "message",
317953
+ messageType: "error",
317954
+ content: t2("Configuration not available.")
317955
+ };
317956
+ }
317957
+ const apiKey = process.env["OPENAI_API_KEY"];
317958
+ if (!apiKey) {
317959
+ return {
317960
+ type: "start_auth",
317961
+ authType: AuthType2.USE_OPENAI
317962
+ };
317963
+ }
317964
+ try {
317965
+ await config.refreshAuth(AuthType2.USE_OPENAI);
317966
+ return {
317967
+ type: "message",
317968
+ messageType: "info",
317969
+ content: t2(`Switched to OpenAI provider.
317970
+
317971
+ Set OPENAI_BASE_URL for custom endpoints.
317972
+ Use /model to select a model.`)
317973
+ };
317974
+ } catch (error) {
317975
+ return {
317976
+ type: "message",
317977
+ messageType: "error",
317978
+ content: t2(`Failed to switch to OpenAI: ${error instanceof Error ? error.message : String(error)}`)
317979
+ };
317980
+ }
317981
+ }, "action")
317982
+ },
317983
+ {
317984
+ name: "status",
317985
+ get description() {
317986
+ return t2("Show current provider status");
317987
+ },
317988
+ kind: "built-in" /* BUILT_IN */,
317989
+ action: /* @__PURE__ */ __name(async (context2) => {
317990
+ const { config } = context2.services;
317991
+ if (!config) {
317992
+ return {
317993
+ type: "message",
317994
+ messageType: "error",
317995
+ content: t2("Configuration not available.")
317996
+ };
317997
+ }
317998
+ const authType = config.getAuthType();
317999
+ const contentGeneratorConfig = config.getContentGeneratorConfig();
318000
+ const model = contentGeneratorConfig?.model || "unknown";
318001
+ const baseUrl = contentGeneratorConfig?.baseUrl || "default";
318002
+ let providerName = "Unknown";
318003
+ switch (authType) {
318004
+ case AuthType2.USE_GROQ:
318005
+ providerName = "GROQ";
318006
+ break;
318007
+ case AuthType2.OLLAMA_LOCAL:
318008
+ providerName = "Ollama (Local)";
318009
+ break;
318010
+ case AuthType2.OLLAMA_CLOUD:
318011
+ providerName = "Ollama (Cloud)";
318012
+ break;
318013
+ case AuthType2.USE_OPENAI:
318014
+ providerName = "OpenAI Compatible";
318015
+ break;
318016
+ case AuthType2.OSA_OAUTH:
318017
+ providerName = "OSAgent Cloud";
318018
+ break;
318019
+ default:
318020
+ providerName = String(authType);
318021
+ }
318022
+ return {
318023
+ type: "message",
318024
+ messageType: "info",
318025
+ content: t2(`Provider Status
318026
+ \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501
318027
+ Provider: ${providerName}
318028
+ Model: ${model}
318029
+ Base URL: ${baseUrl}
318030
+
318031
+ Commands:
318032
+ /provider groq - Switch to GROQ (Kimi K2)
318033
+ /provider ollama - Switch to local Ollama
318034
+ /provider openai - Switch to OpenAI compatible
318035
+ /model - Change model within current provider`)
318036
+ };
318037
+ }, "action")
318038
+ }
318039
+ ],
318040
+ action: /* @__PURE__ */ __name(async (context2) => {
318041
+ const { config } = context2.services;
318042
+ if (!config) {
318043
+ return {
318044
+ type: "message",
318045
+ messageType: "error",
318046
+ content: t2("Configuration not available.")
318047
+ };
318048
+ }
318049
+ const authType = config.getAuthType();
318050
+ let providerName = "Unknown";
318051
+ switch (authType) {
318052
+ case AuthType2.USE_GROQ:
318053
+ providerName = "GROQ";
318054
+ break;
318055
+ case AuthType2.OLLAMA_LOCAL:
318056
+ providerName = "Ollama (Local)";
318057
+ break;
318058
+ case AuthType2.OLLAMA_CLOUD:
318059
+ providerName = "Ollama (Cloud)";
318060
+ break;
318061
+ case AuthType2.USE_OPENAI:
318062
+ providerName = "OpenAI Compatible";
318063
+ break;
318064
+ case AuthType2.OSA_OAUTH:
318065
+ providerName = "OSAgent Cloud";
318066
+ break;
318067
+ default:
318068
+ providerName = String(authType);
318069
+ }
318070
+ return {
318071
+ type: "message",
318072
+ messageType: "info",
318073
+ content: t2(`AI Provider Management
318074
+ \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501
318075
+
318076
+ Current Provider: ${providerName}
318077
+
318078
+ Available Providers:
318079
+ /provider groq - GROQ (Kimi K2, ~200 tok/s, 256K context)
318080
+ /provider ollama - Local Ollama (qwen2.5-coder, etc.)
318081
+ /provider openai - OpenAI compatible APIs
318082
+ /provider status - Show detailed status
318083
+
318084
+ Environment Variables:
318085
+ GROQ_API_KEY - For GROQ provider
318086
+ OPENAI_API_KEY - For OpenAI provider
318087
+ OPENAI_BASE_URL - Custom endpoint URL`)
318088
+ };
318089
+ }, "action")
318090
+ };
318091
+
317669
318092
  // packages/cli/src/ui/commands/quitCommand.ts
317670
318093
  init_esbuild_shims();
317671
318094
  var quitConfirmCommand = {
@@ -319844,6 +320267,7 @@ var BuiltinCommandLoader = class {
319844
320267
  memoryCommand,
319845
320268
  modelCommand,
319846
320269
  ...this.config?.getFolderTrust() ? [permissionsCommand] : [],
320270
+ providerCommand,
319847
320271
  quitCommand,
319848
320272
  quitConfirmCommand,
319849
320273
  resetCommand,
@@ -347435,7 +347859,7 @@ init_esbuild_shims();
347435
347859
 
347436
347860
  // packages/cli/src/ui/components/AsciiArt.ts
347437
347861
  init_esbuild_shims();
347438
- var longAsciiLogo = `
347862
+ var getLongAsciiLogo = /* @__PURE__ */ __name(() => `
347439
347863
  \u250F\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513
347440
347864
  \u2503 \u2503
347441
347865
  \u2503 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2503
@@ -347446,10 +347870,10 @@ var longAsciiLogo = `
347446
347870
  \u2503 \u255A\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u2503
347447
347871
  \u2503 \u2503
347448
347872
  \u2503\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2503
347449
- \u2503 \u25B8 KERNEL-LEVEL AI \u25B8 AUTONOMOUS CODING \u25B8 v0.1.11 \u2503
347873
+ \u2503 \u25B8 AUTONOMOUS DEVELOPMENT \u25B8 INTELLIGENT ORCHESTRATION \u25B8 v${CLI_VERSION2.padEnd(6)}\u2503
347450
347874
  \u2517\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u251B
347451
- `;
347452
- var shortAsciiLogo = `
347875
+ `, "getLongAsciiLogo");
347876
+ var getShortAsciiLogo = /* @__PURE__ */ __name(() => `
347453
347877
  \u250F\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513
347454
347878
  \u2503 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2503
347455
347879
  \u2503 \u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557 \u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255D \u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557 \u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255D \u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255D \u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551 \u255A\u2550\u2588\u2588\u2554\u2550\u255D\u2503
@@ -347458,16 +347882,21 @@ var shortAsciiLogo = `
347458
347882
  \u2503 \u255A\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255D \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551 \u255A\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255D \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551 \u255A\u2588\u2588\u2588\u2588\u2551 \u2588\u2588\u2551 \u2503
347459
347883
  \u2503 \u255A\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u2550\u2550\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u255A\u2550\u2550\u2550\u255D \u255A\u2550\u255D \u2503
347460
347884
  \u2523\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u252B
347461
- \u2503 \u25B8 AUTONOMOUS CODING INTELLIGENCE v0.1.11 \u2503
347885
+ \u2503 \u25B8 CONTEXT-AWARE ORCHESTRATION v${CLI_VERSION2.padEnd(6)} \u2503
347462
347886
  \u2517\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u251B
347463
- `;
347464
- var tinyAsciiLogo = `
347887
+ `, "getShortAsciiLogo");
347888
+ var getTinyAsciiLogo = /* @__PURE__ */ __name(() => `
347465
347889
  \u250F\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513
347466
347890
  \u2503 \u25C6 O S A G E N T \u25C6 \u2503
347467
347891
  \u2523\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u252B
347468
- \u2503 \u25B8 v0.1.11 \u25B8 READY \u2503
347892
+ \u2503 \u25B8 v${CLI_VERSION2.padEnd(6)} \u25B8 READY \u2503
347469
347893
  \u2517\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u251B
347470
- `;
347894
+ `, "getTinyAsciiLogo");
347895
+ var getMinimalLogo = /* @__PURE__ */ __name(() => getTinyAsciiLogo(), "getMinimalLogo");
347896
+ var longAsciiLogo = getLongAsciiLogo();
347897
+ var shortAsciiLogo = getShortAsciiLogo();
347898
+ var tinyAsciiLogo = getTinyAsciiLogo();
347899
+ var minimalLogo = getMinimalLogo();
347471
347900
 
347472
347901
  // packages/cli/src/ui/components/Header.tsx
347473
347902
  var import_jsx_runtime59 = __toESM(require_jsx_runtime(), 1);
@@ -347517,22 +347946,24 @@ var import_jsx_runtime60 = __toESM(require_jsx_runtime(), 1);
347517
347946
  var Tips = /* @__PURE__ */ __name(({ config }) => {
347518
347947
  const OSAMdFileCount = config.getOSAMdFileCount();
347519
347948
  return /* @__PURE__ */ (0, import_jsx_runtime60.jsxs)(Box_default, { flexDirection: "column", children: [
347520
- /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("Tips for getting started:") }),
347521
- /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("1. Ask questions, edit files, or run commands.") }),
347522
- /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("2. Be specific for the best results.") }),
347949
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, bold: true, children: t2("Autonomous Development System") }),
347950
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.secondary, dimColor: true, children: t2("Consultation, context tracking, and orchestration are always active.") }),
347951
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("1. Describe what you want to build - I'll analyze and ask clarifying questions.") }),
347952
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("2. Answer questions as they appear to improve results.") }),
347953
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { color: theme.text.primary, children: t2("3. Your roadmap is tracked and saved for continuity.") }),
347523
347954
  OSAMdFileCount === 0 && /* @__PURE__ */ (0, import_jsx_runtime60.jsxs)(Text3, { color: theme.text.primary, children: [
347524
- "3. Create",
347955
+ "4. Create",
347525
347956
  " ",
347526
347957
  /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { bold: true, color: theme.text.accent, children: "OSAGENT.md" }),
347527
347958
  " ",
347528
- t2("files to customize your interactions with OSAgent.")
347959
+ t2("to provide project context.")
347529
347960
  ] }),
347530
347961
  /* @__PURE__ */ (0, import_jsx_runtime60.jsxs)(Text3, { color: theme.text.primary, children: [
347531
- OSAMdFileCount === 0 ? "4." : "3.",
347962
+ OSAMdFileCount === 0 ? "5." : "4.",
347532
347963
  " ",
347533
- /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { bold: true, color: theme.text.accent, children: "/help" }),
347964
+ /* @__PURE__ */ (0, import_jsx_runtime60.jsx)(Text3, { bold: true, color: theme.text.accent, children: "/consult status" }),
347534
347965
  " ",
347535
- t2("for more information.")
347966
+ t2("to view collected context.")
347536
347967
  ] })
347537
347968
  ] });
347538
347969
  }, "Tips");
@@ -351064,6 +351495,31 @@ var DialogManager = /* @__PURE__ */ __name(({
351064
351495
  }
351065
351496
  );
351066
351497
  }
351498
+ if (uiState.pendingAuthType === AuthType2.USE_GROQ) {
351499
+ return /* @__PURE__ */ (0, import_jsx_runtime86.jsx)(
351500
+ OpenAIKeyPrompt,
351501
+ {
351502
+ onSubmit: (apiKey, _baseUrl, model) => {
351503
+ process34.env["GROQ_API_KEY"] = apiKey;
351504
+ uiActions.handleAuthSelect(AuthType2.USE_GROQ, "User" /* User */, {
351505
+ apiKey,
351506
+ // Always use GROQ endpoint - ignore user-provided baseUrl
351507
+ baseUrl: "https://api.groq.com/openai/v1",
351508
+ model: model || "moonshotai/kimi-k2-instruct-0905"
351509
+ });
351510
+ },
351511
+ onCancel: () => {
351512
+ uiActions.cancelAuthentication();
351513
+ uiActions.setAuthState("updating" /* Updating */);
351514
+ },
351515
+ defaultApiKey: process34.env["GROQ_API_KEY"] || "",
351516
+ defaultBaseUrl: "https://api.groq.com/openai/v1",
351517
+ defaultModel: "moonshotai/kimi-k2-instruct-0905",
351518
+ providerName: "GROQ",
351519
+ apiKeyUrl: "https://console.groq.com/keys"
351520
+ }
351521
+ );
351522
+ }
351067
351523
  if (uiState.pendingAuthType === AuthType2.OSA_OAUTH) {
351068
351524
  return /* @__PURE__ */ (0, import_jsx_runtime86.jsx)(
351069
351525
  OSAOAuthProgress,
@@ -353990,11 +354446,11 @@ function useConsultation() {
353990
354446
  }
353991
354447
  __name(useConsultation, "useConsultation");
353992
354448
  function ConsultationProvider({ children }) {
353993
- const [isActive, setIsActive] = (0, import_react101.useState)(true);
354449
+ const isActive = true;
353994
354450
  const [currentQuestion, setCurrentQuestion] = (0, import_react101.useState)(null);
353995
354451
  const [questionQueue, setQuestionQueue] = (0, import_react101.useState)([]);
353996
354452
  const [isBlocking, setIsBlocking] = (0, import_react101.useState)(false);
353997
- const [consultationMode, setConsultationModeState] = (0, import_react101.useState)("active");
354453
+ const [consultationMode, setConsultationModeState] = (0, import_react101.useState)("blocking");
353998
354454
  const [collectedContext, setCollectedContext] = (0, import_react101.useState)({
353999
354455
  questions: [],
354000
354456
  answers: /* @__PURE__ */ new Map(),
@@ -354031,6 +354487,7 @@ function ConsultationProvider({ children }) {
354031
354487
  setCollectedContext((prev) => {
354032
354488
  const newAnswers = new Map(prev.answers);
354033
354489
  newAnswers.set(questionId, answer);
354490
+ const answeredQuestion = prev.questions.find((q) => q.id === questionId);
354034
354491
  const updatedQuestions = prev.questions.map(
354035
354492
  (q) => q.id === questionId ? { ...q, answered: true, answer } : q
354036
354493
  );
@@ -354038,8 +354495,11 @@ function ConsultationProvider({ children }) {
354038
354495
  ...prev,
354039
354496
  answers: newAnswers,
354040
354497
  questions: updatedQuestions,
354041
- projectContext: [...prev.projectContext, `Q: ${prev.questions.find((q) => q.id === questionId)?.question}
354042
- A: ${answer}`]
354498
+ projectContext: [
354499
+ ...prev.projectContext,
354500
+ `Q: ${answeredQuestion?.question}
354501
+ A: ${answer}`
354502
+ ]
354043
354503
  };
354044
354504
  });
354045
354505
  setQuestionQueue((prev) => prev.filter((q) => q.id !== questionId));
@@ -354048,10 +354508,14 @@ A: ${answer}`]
354048
354508
  setCurrentQuestion(next);
354049
354509
  if (!next) {
354050
354510
  setIsBlocking(false);
354511
+ } else if (consultationMode === "blocking" && next.priority === "high") {
354512
+ setIsBlocking(true);
354513
+ } else {
354514
+ setIsBlocking(false);
354051
354515
  }
354052
354516
  return prev.slice(1);
354053
354517
  });
354054
- }, []);
354518
+ }, [consultationMode]);
354055
354519
  const skipQuestion = (0, import_react101.useCallback)((questionId) => {
354056
354520
  setQuestionQueue((prev) => {
354057
354521
  const filtered = prev.filter((q) => q.id !== questionId);
@@ -354059,17 +354523,22 @@ A: ${answer}`]
354059
354523
  setCurrentQuestion(next);
354060
354524
  if (!next) {
354061
354525
  setIsBlocking(false);
354526
+ } else if (consultationMode === "blocking" && next.priority === "high") {
354527
+ setIsBlocking(true);
354528
+ } else {
354529
+ setIsBlocking(false);
354062
354530
  }
354063
354531
  return filtered.slice(1);
354064
354532
  });
354065
- }, []);
354533
+ }, [consultationMode]);
354066
354534
  const setConsultationMode = (0, import_react101.useCallback)((mode) => {
354067
354535
  setConsultationModeState(mode);
354068
- setIsActive(mode !== "passive");
354069
- if (mode !== "blocking") {
354536
+ if (mode === "blocking" && currentQuestion?.priority === "high") {
354537
+ setIsBlocking(true);
354538
+ } else if (mode === "active") {
354070
354539
  setIsBlocking(false);
354071
354540
  }
354072
- }, []);
354541
+ }, [currentQuestion]);
354073
354542
  const getContextForAgent = (0, import_react101.useCallback)(() => {
354074
354543
  const contextParts = [];
354075
354544
  if (collectedContext.projectContext.length > 0) {
@@ -354082,6 +354551,13 @@ A: ${answer}`]
354082
354551
  contextParts.push(`- ${key}: ${value2}`);
354083
354552
  }
354084
354553
  }
354554
+ const unansweredQuestions = collectedContext.questions.filter((q) => !q.answered);
354555
+ if (unansweredQuestions.length > 0) {
354556
+ contextParts.push("\n## Pending Questions\n");
354557
+ for (const q of unansweredQuestions) {
354558
+ contextParts.push(`- [${q.priority}] ${q.question}`);
354559
+ }
354560
+ }
354085
354561
  return contextParts.join("\n");
354086
354562
  }, [collectedContext]);
354087
354563
  const clearContext = (0, import_react101.useCallback)(() => {
@@ -354098,6 +354574,23 @@ A: ${answer}`]
354098
354574
  const setBlocking2 = (0, import_react101.useCallback)((blocking) => {
354099
354575
  setIsBlocking(blocking);
354100
354576
  }, []);
354577
+ const addUserContext = (0, import_react101.useCallback)((context2, category) => {
354578
+ setCollectedContext((prev) => {
354579
+ if (category) {
354580
+ return {
354581
+ ...prev,
354582
+ userPreferences: {
354583
+ ...prev.userPreferences,
354584
+ [category]: context2
354585
+ }
354586
+ };
354587
+ }
354588
+ return {
354589
+ ...prev,
354590
+ projectContext: [...prev.projectContext, context2]
354591
+ };
354592
+ });
354593
+ }, []);
354101
354594
  const value = {
354102
354595
  isActive,
354103
354596
  currentQuestion,
@@ -354111,7 +354604,8 @@ A: ${answer}`]
354111
354604
  setConsultationMode,
354112
354605
  getContextForAgent,
354113
354606
  clearContext,
354114
- setBlocking: setBlocking2
354607
+ setBlocking: setBlocking2,
354608
+ addUserContext
354115
354609
  };
354116
354610
  return /* @__PURE__ */ (0, import_jsx_runtime106.jsx)(ConsultationContext.Provider, { value, children });
354117
354611
  }
@@ -354120,9 +354614,12 @@ __name(ConsultationProvider, "ConsultationProvider");
354120
354614
  // packages/cli/src/ui/components/TaskMasterPanel.tsx
354121
354615
  var import_jsx_runtime107 = __toESM(require_jsx_runtime(), 1);
354122
354616
  var STATUS_ICONS2 = {
354123
- pending: "\u25CB",
354617
+ pending: "\u2610",
354618
+ // Empty checkbox
354124
354619
  in_progress: "\u25D0",
354125
- completed: "\u25CF"
354620
+ // Half-filled
354621
+ completed: "\u2611"
354622
+ // Checked checkbox
354126
354623
  };
354127
354624
  function formatTokenCount(count) {
354128
354625
  if (count >= 1e6) {
@@ -354200,7 +354697,7 @@ var TaskMasterPanel = /* @__PURE__ */ __name(({
354200
354697
  const totalCount = todos.length;
354201
354698
  const progressPercent = totalCount > 0 ? Math.round(completedCount / totalCount * 100) : 0;
354202
354699
  const allQuestions = currentQuestion ? [currentQuestion, ...questionQueue.filter((q) => q.id !== currentQuestion.id)] : questionQueue;
354203
- if (!isResponding && todos.length === 0 && subagentComments.length === 0) {
354700
+ if (todos.length === 0 && !isResponding && subagentComments.length === 0) {
354204
354701
  return null;
354205
354702
  }
354206
354703
  return /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Box_default, { flexDirection: "column", marginBottom: 1, children: [
@@ -354275,32 +354772,36 @@ var TaskMasterPanel = /* @__PURE__ */ __name(({
354275
354772
  ] })
354276
354773
  ] }),
354277
354774
  todos.length > 0 && /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Box_default, { flexDirection: "column", marginTop: 1, paddingLeft: 1, children: [
354278
- todos.slice(0, compact ? 3 : 5).map((todo, idx) => {
354775
+ todos.slice(0, compact ? 5 : 10).map((todo, idx) => {
354279
354776
  const isCompleted = todo.status === "completed";
354280
354777
  const isInProgress = todo.status === "in_progress";
354281
- const isLast = idx === Math.min(todos.length, compact ? 3 : 5) - 1;
354282
- const color = isCompleted ? theme.text.secondary : isInProgress ? theme.status.success : theme.text.primary;
354778
+ const isLast = idx === Math.min(todos.length, compact ? 5 : 10) - 1;
354779
+ const iconColor = isInProgress ? Colors.AccentYellow : isCompleted ? theme.text.secondary : theme.text.primary;
354780
+ const textColor = isInProgress ? Colors.AccentYellow : isCompleted ? theme.text.secondary : theme.text.primary;
354283
354781
  return /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Box_default, { children: [
354284
- /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Text3, { color, children: [
354285
- isLast ? "\u2514\u2500" : "\u251C\u2500",
354286
- " ",
354287
- isInProgress ? "\u25D0" : STATUS_ICONS2[todo.status],
354782
+ /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Text3, { color: theme.text.secondary, children: [
354783
+ isLast ? "\u2514" : "\u251C",
354784
+ " "
354785
+ ] }),
354786
+ /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Text3, { color: iconColor, children: [
354787
+ STATUS_ICONS2[todo.status],
354288
354788
  " "
354289
354789
  ] }),
354290
354790
  /* @__PURE__ */ (0, import_jsx_runtime107.jsx)(
354291
354791
  Text3,
354292
354792
  {
354293
- color,
354793
+ color: textColor,
354294
354794
  dimColor: isCompleted,
354295
354795
  strikethrough: isCompleted,
354796
+ bold: isInProgress,
354296
354797
  children: isInProgress && todo.activeForm ? todo.activeForm : todo.content
354297
354798
  }
354298
354799
  )
354299
354800
  ] }, todo.id);
354300
354801
  }),
354301
- todos.length > (compact ? 3 : 5) && /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Text3, { color: theme.text.secondary, children: [
354302
- "\u2514\u2500 ... and ",
354303
- todos.length - (compact ? 3 : 5),
354802
+ todos.length > (compact ? 5 : 10) && /* @__PURE__ */ (0, import_jsx_runtime107.jsxs)(Text3, { color: theme.text.secondary, children: [
354803
+ "\u2514 ... and ",
354804
+ todos.length - (compact ? 5 : 10),
354304
354805
  " more"
354305
354806
  ] })
354306
354807
  ] })
@@ -354721,6 +355222,18 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
354721
355222
  await performAuth(authType, scope, credentials);
354722
355223
  return;
354723
355224
  }
355225
+ if (authType === AuthType2.USE_GROQ) {
355226
+ if (credentials) {
355227
+ config.updateCredentials({
355228
+ apiKey: credentials.apiKey,
355229
+ // Always use GROQ endpoint - ignore user-provided baseUrl
355230
+ baseUrl: "https://api.groq.com/openai/v1",
355231
+ model: credentials.model || "moonshotai/kimi-k2-instruct-0905"
355232
+ });
355233
+ await performAuth(authType, scope, credentials);
355234
+ }
355235
+ return;
355236
+ }
354724
355237
  await performAuth(authType, scope);
354725
355238
  },
354726
355239
  [config, performAuth]
@@ -354728,6 +355241,12 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
354728
355241
  const openAuthDialog = (0, import_react106.useCallback)(() => {
354729
355242
  setIsAuthDialogOpen(true);
354730
355243
  }, []);
355244
+ const startAuthForProvider = (0, import_react106.useCallback)((authType) => {
355245
+ setPendingAuthType(authType);
355246
+ setAuthError(null);
355247
+ setIsAuthDialogOpen(false);
355248
+ setIsAuthenticating(true);
355249
+ }, []);
354731
355250
  const cancelAuthentication = (0, import_react106.useCallback)(() => {
354732
355251
  if (isAuthenticating && pendingAuthType === AuthType2.OSA_OAUTH) {
354733
355252
  cancelOSAAuth();
@@ -354771,6 +355290,7 @@ var useAuthCommand = /* @__PURE__ */ __name((settings, config, addItem) => {
354771
355290
  OSAAuthState,
354772
355291
  handleAuthSelect,
354773
355292
  openAuthDialog,
355293
+ startAuthForProvider,
354774
355294
  cancelAuthentication
354775
355295
  };
354776
355296
  }, "useAuthCommand");
@@ -355247,7 +355767,7 @@ var McpPromptLoader = class {
355247
355767
  };
355248
355768
 
355249
355769
  // packages/cli/src/ui/hooks/slashCommandProcessor.ts
355250
- var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem, clearItems, loadHistory, refreshStatic, toggleVimEnabled, setIsProcessing, setOSAMdFileCount, actions, extensionsUpdateState, isConfigInitialized) => {
355770
+ var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem, clearItems, loadHistory, refreshStatic, toggleVimEnabled, setIsProcessing, setOSAMdFileCount, actions, extensionsUpdateState, isConfigInitialized, consultation) => {
355251
355771
  const session = useSessionStats();
355252
355772
  const [commands, setCommands] = (0, import_react112.useState)([]);
355253
355773
  const [reloadTrigger, setReloadTrigger] = (0, import_react112.useState)(0);
@@ -355345,7 +355865,8 @@ var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem
355345
355865
  config,
355346
355866
  settings,
355347
355867
  git: gitService,
355348
- logger: logger6
355868
+ logger: logger6,
355869
+ consultation
355349
355870
  },
355350
355871
  ui: {
355351
355872
  addItem,
@@ -355377,6 +355898,7 @@ var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem
355377
355898
  settings,
355378
355899
  gitService,
355379
355900
  logger6,
355901
+ consultation,
355380
355902
  loadHistory,
355381
355903
  addItem,
355382
355904
  clearItems,
@@ -355655,6 +356177,10 @@ var useSlashCommandProcessor = /* @__PURE__ */ __name((config, settings, addItem
355655
356177
  true
355656
356178
  );
355657
356179
  }
356180
+ case "start_auth": {
356181
+ actions.startAuthForProvider(result.authType);
356182
+ return { type: "handled" };
356183
+ }
355658
356184
  default: {
355659
356185
  const unhandled = result;
355660
356186
  throw new Error(
@@ -361552,9 +362078,6 @@ function useConsultationGenerator({
361552
362078
  const lastPromptRef = (0, import_react127.useRef)(void 0);
361553
362079
  const questionsGeneratedRef = (0, import_react127.useRef)(false);
361554
362080
  (0, import_react127.useEffect)(() => {
361555
- if (!isActive || consultationMode === "passive") {
361556
- return;
361557
- }
361558
362081
  if (streamingState === "responding" /* Responding */ && currentPrompt && currentPrompt !== lastPromptRef.current && !questionsGeneratedRef.current) {
361559
362082
  lastPromptRef.current = currentPrompt;
361560
362083
  questionsGeneratedRef.current = true;
@@ -362271,6 +362794,7 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
362271
362794
  const { stdout } = use_stdout_default();
362272
362795
  const { stats: sessionStats } = useSessionStats();
362273
362796
  const branchName = useGitBranchName(config.getTargetDir());
362797
+ const consultationContext = useConsultation();
362274
362798
  const mainControlsRef = (0, import_react139.useRef)(null);
362275
362799
  const originalTitleRef = (0, import_react139.useRef)(
362276
362800
  computeWindowTitle(basename14(config.getTargetDir()))
@@ -362389,6 +362913,7 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
362389
362913
  OSAAuthState,
362390
362914
  handleAuthSelect,
362391
362915
  openAuthDialog,
362916
+ startAuthForProvider,
362392
362917
  cancelAuthentication
362393
362918
  } = useAuthCommand(settings, config, historyManager.addItem);
362394
362919
  const { proQuotaRequest, handleProQuotaChoice } = useQuotaAndFallback({
@@ -362478,7 +363003,8 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
362478
363003
  addConfirmUpdateExtensionRequest,
362479
363004
  openSubagentCreateDialog,
362480
363005
  openAgentsManagerDialog,
362481
- _showQuitConfirmation: showQuitConfirmation
363006
+ _showQuitConfirmation: showQuitConfirmation,
363007
+ startAuthForProvider
362482
363008
  }),
362483
363009
  [
362484
363010
  openAuthDialog,
@@ -362495,7 +363021,8 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
362495
363021
  addConfirmUpdateExtensionRequest,
362496
363022
  showQuitConfirmation,
362497
363023
  openSubagentCreateDialog,
362498
- openAgentsManagerDialog
363024
+ openAgentsManagerDialog,
363025
+ startAuthForProvider
362499
363026
  ]
362500
363027
  );
362501
363028
  const {
@@ -362518,7 +363045,17 @@ var AppContainer = /* @__PURE__ */ __name((props) => {
362518
363045
  setOSAMdFileCount,
362519
363046
  slashCommandActions,
362520
363047
  extensionsUpdateStateInternal,
362521
- isConfigInitialized
363048
+ isConfigInitialized,
363049
+ // Consultation service for command context - always active
363050
+ {
363051
+ isActive: consultationContext.isActive,
363052
+ consultationMode: consultationContext.consultationMode,
363053
+ setConsultationMode: consultationContext.setConsultationMode,
363054
+ addQuestion: consultationContext.addQuestion,
363055
+ getContextForAgent: consultationContext.getContextForAgent,
363056
+ clearContext: consultationContext.clearContext,
363057
+ questionQueue: consultationContext.questionQueue
363058
+ }
362522
363059
  );
362523
363060
  const handleVisionSwitchRequired = (0, import_react139.useCallback)(
362524
363061
  async (_query) => new Promise((resolve25, reject) => {
@@ -362757,17 +363294,25 @@ ${queuedText}` : queuedText;
362757
363294
  ]);
362758
363295
  const [idePromptAnswered, setIdePromptAnswered] = (0, import_react139.useState)(false);
362759
363296
  const [currentIDE, setCurrentIDE] = (0, import_react139.useState)(null);
363297
+ const idePromptShownRef = (0, import_react139.useRef)(false);
362760
363298
  (0, import_react139.useEffect)(() => {
362761
363299
  const getIde = /* @__PURE__ */ __name(async () => {
362762
363300
  const ideClient = await IdeClient.getInstance();
362763
363301
  const currentIde = ideClient.getCurrentIde();
362764
- setCurrentIDE(currentIde || null);
363302
+ if (!idePromptShownRef.current) {
363303
+ setCurrentIDE(currentIde || null);
363304
+ }
362765
363305
  }, "getIde");
362766
363306
  getIde();
362767
363307
  }, []);
362768
363308
  const shouldShowIdePrompt = Boolean(
362769
- currentIDE && !config.getIdeMode() && !settings.merged.ide?.hasSeenNudge && !idePromptAnswered
363309
+ !idePromptShownRef.current && currentIDE && !config.getIdeMode() && !settings.merged.ide?.hasSeenNudge && !idePromptAnswered
362770
363310
  );
363311
+ (0, import_react139.useEffect)(() => {
363312
+ if (shouldShowIdePrompt) {
363313
+ idePromptShownRef.current = true;
363314
+ }
363315
+ }, [shouldShowIdePrompt]);
362771
363316
  const [showErrorDetails, setShowErrorDetails] = (0, import_react139.useState)(false);
362772
363317
  const [showToolDescriptions, setShowToolDescriptions] = (0, import_react139.useState)(false);
362773
363318
  const [ctrlCPressedOnce, setCtrlCPressedOnce] = (0, import_react139.useState)(false);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "osagent",
3
- "version": "0.1.25",
3
+ "version": "0.1.27",
4
4
  "description": "OS Agent - AI-powered CLI for autonomous coding with Ollama Cloud and Qwen models",
5
5
  "author": "Roberto Luna",
6
6
  "license": "Apache-2.0",