@hopper-agent/cli 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/src/cli.js CHANGED
@@ -9,10 +9,10 @@ import { resolve, basename, isAbsolute, join, dirname } from 'node:path';
9
9
  import { fileURLToPath } from 'node:url';
10
10
  import { spawn } from 'node:child_process';
11
11
  const __filename = fileURLToPath(import.meta.url);
12
- import { ChatSurface, handleSlashCommand } from '@hopper-agent/tui';
12
+ import { ChatSurface, handleSlashCommand, COLD_START_BANNER } from '@hopper-agent/tui';
13
13
  import { getTheme } from '@hopper-agent/render';
14
14
  import { TurnLoop, SettingsManager, DEFAULT_SETTINGS, CronManager, TelegramGateway, ChannelRegistry, ChannelRouter, HeartbeatExecutor, HeartbeatDelivery, pickNextSchedule, scheduleLabel } from '@hopper-agent/core';
15
- import { AnthropicProvider, OpenAIProvider } from '@hopper-agent/providers';
15
+ import { AnthropicProvider, OpenAIProvider, OllamaProvider, GoogleGeminiProvider } from '@hopper-agent/providers';
16
16
  import { allTools } from '@hopper-agent/tools';
17
17
  import { createMcpTools } from '@hopper-agent/mcp';
18
18
  const VERSION = '0.5.1';
@@ -140,7 +140,8 @@ Usage:
140
140
  hopper-agent Interactive TUI
141
141
  hopper-agent "<prompt>" One-shot prompt
142
142
  hopper-agent -p "<prompt>" Headless mode, stdout answer
143
- hopper-agent resume [<id>] Resume session
143
+ hopper-agent resume Resume last session
144
+ hopper-agent resume <id> Resume specific session
144
145
  hopper-agent sessions list|show|rm Session management
145
146
  hopper-agent --version Show version
146
147
 
@@ -398,7 +399,15 @@ function formatDuration(ms) {
398
399
  const s = String(totalSeconds % 60).padStart(2, '0');
399
400
  return `${h}:${m}:${s}`;
400
401
  }
401
- function estimateCost(model, inputTokens, outputTokens) {
402
+ function estimateCost(model, inputTokens, outputTokens, customCost) {
403
+ if (customCost) {
404
+ const parts = customCost.split(';').map(Number);
405
+ const inC = parts[0];
406
+ const outC = parts[1];
407
+ if (inC !== undefined && outC !== undefined && !isNaN(inC) && !isNaN(outC)) {
408
+ return (inputTokens * inC + outputTokens * outC) / 1_000_000;
409
+ }
410
+ }
402
411
  // Rough per-million-token pricing in USD; fall back to Sonnet rates.
403
412
  const pricing = {
404
413
  'claude-opus': { in: 15, out: 75 },
@@ -409,10 +418,14 @@ function estimateCost(model, inputTokens, outputTokens) {
409
418
  const p = pricing[key];
410
419
  return (inputTokens * p.in + outputTokens * p.out) / 1_000_000;
411
420
  }
412
- function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeName, system, settings, projects, cronManager: externalCronManager, onReminderHolder, telegramChatIdRef, telegramSubmitRef, telegramGateway, channelRegistry, channelRouter, activeChannelRef, mcpToolsRef, mcpClientsRef, onMcpReconnect, contextWindowSize }) {
421
+ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeName, system, settings, projects, cronManager: externalCronManager, onReminderHolder, telegramChatIdRef, telegramSubmitRef, telegramGateway, channelRegistry, channelRouter, activeChannelRef, mcpToolsRef, mcpClientsRef, onMcpReconnect, contextWindowSize, resumeSession, resumeSessionId }) {
413
422
  const [messages, setMessages] = useState([]);
414
423
  // Multi-channel: per-channel message storage (keyed by channel ID)
415
- const [channelMessages, setChannelMessages] = useState({});
424
+ // 'main' channel starts with the cold-start banner as the first message so it
425
+ // scrolls naturally with the conversation rather than being pinned to a header.
426
+ const [channelMessages, setChannelMessages] = useState({
427
+ main: [{ role: 'assistant', content: COLD_START_BANNER }],
428
+ });
416
429
  const [activeChannelId, setActiveChannelId] = useState('main');
417
430
  // Per-channel TurnLoop instances
418
431
  const channelTurnLoopsRef = useRef(new Map());
@@ -431,6 +444,9 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
431
444
  const [status, setStatus] = useState('idle');
432
445
  const startedAt = useRef(Date.now());
433
446
  const loopRef = useRef(null);
447
+ // Persist TurnLoop messages across turns so slash commands can read them
448
+ // even when loopRef.current is cleared (mode change, provider change, etc.)
449
+ const sessionMessagesRef = useRef([]);
434
450
  const busyRef = useRef(false);
435
451
  const toolGroupIndexRef = useRef(null);
436
452
  const planFileRef = useRef(null);
@@ -516,7 +532,7 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
516
532
  return () => clearInterval(id);
517
533
  }, []);
518
534
  const theme = getTheme(currentTheme);
519
- const applySlashResult = useCallback((result, agentPrompt) => {
535
+ const applySlashResult = useCallback(async (result, agentPrompt) => {
520
536
  const push = (content) => {
521
537
  // Slash command results must go to the per-channel messages array
522
538
  // since that's what the ChatSurface renders.
@@ -548,6 +564,14 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
548
564
  settingsMgr.current.update({ model: result.model });
549
565
  push(result.message);
550
566
  break;
567
+ case 'update_model_cost':
568
+ settingsMgr.current.update({ MODEL_COST: result.modelCost });
569
+ push(result.message);
570
+ break;
571
+ case 'update_context_window':
572
+ settingsMgr.current.update({ MODEL_CONTEXT_WINDOW: result.contextWindow });
573
+ push(result.message);
574
+ break;
551
575
  case 'update_provider': {
552
576
  const newProvider = result.provider;
553
577
  settingsMgr.current.update({ MODEL_PROVIDER: newProvider });
@@ -803,9 +827,141 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
803
827
  push(result.message ?? 'Switched to Stats tab');
804
828
  break;
805
829
  }
830
+ case 'compact': {
831
+ const compact = result.compact;
832
+ const { messages, depth } = compact;
833
+ const originalSystem = currentSystem;
834
+ if (!streamProviderHolder.current?.complete) {
835
+ push('Compaction requires a provider that supports the complete() method.');
836
+ return;
837
+ }
838
+ // Build compaction prompt based on depth
839
+ const conversationText = messages.map(m => {
840
+ if (m.role === 'user')
841
+ return `USER: ${m.content}`;
842
+ if (m.role === 'assistant')
843
+ return `ASSISTANT: ${m.content.map((b) => {
844
+ if (b.type === 'text')
845
+ return b.text;
846
+ if (b.type === 'thinking')
847
+ return b.thinking;
848
+ if (b.type === 'tool-use')
849
+ return `[tool: ${b.name}(${JSON.stringify(b.input).slice(0, 100)})]`;
850
+ return '';
851
+ }).join('\n')}`;
852
+ if (m.role === 'tool')
853
+ return `TOOL RESULT (${m.toolUseId}): ${m.content.slice(0, 500)}`;
854
+ return '';
855
+ }).join('\n');
856
+ const compactionPrompt = depth === 'brief'
857
+ ? `You are compacting a conversation between a user and an AI coding agent.
858
+ Your job is to reduce the conversation to a single meaningful turn.
859
+
860
+ RULES:
861
+ - Reduce to approximately 2-4 messages (1 user request + 1-3 assistant/tool responses)
862
+ - Keep the FIRST user message (session intent)
863
+ - Keep the LAST user message (current intent)
864
+ - For assistant messages: keep only text that contains decisions, findings, or key facts
865
+ - For tool calls: keep only tool name + brief input — no need for full output
866
+ - For tool results: keep only if the output was critical to a decision (≤200 chars)
867
+
868
+ OUTPUT FORMAT:
869
+ Respond ONLY with a valid JSON array of Message objects. Use this exact format:
870
+
871
+ [{"role":"user","content":"User message text"},
872
+ {"role":"assistant","content":[{"type":"text","text":"Assistant response"}]},
873
+ {"role":"assistant","content":[{"type":"tool-use","id":"tu_1","name":"Read","input":{"path":"main.py"}}]},
874
+ {"role":"tool","toolUseId":"tu_1","content":"Tool output text"}]
875
+
876
+ CRITICAL: Output ONLY the JSON array. No markdown, no explanation, no code fences.`
877
+ : `You are compacting a conversation between a user and an AI coding agent.
878
+ Your job is to reduce the conversation size while preserving all important context.
879
+
880
+ RULES:
881
+ - Reduce to approximately 50% of the original message count
882
+ - Keep ALL user messages (they define the intent)
883
+ - For assistant messages with tool calls: keep the tool name and a brief description of what it did, but truncate long outputs
884
+ - For assistant text: keep full content unless very long (>200 chars), then summarize
885
+ - For tool results: if >500 chars, keep the first 300 chars + "[truncated, N chars total]"
886
+ - NEVER drop entire turns — always keep at least one response per user message
887
+
888
+ OUTPUT FORMAT:
889
+ Respond ONLY with a valid JSON array of Message objects. Use this exact format:
890
+
891
+ [{"role":"user","content":"User message text"},
892
+ {"role":"assistant","content":[{"type":"text","text":"Assistant response"}]},
893
+ {"role":"assistant","content":[{"type":"thinking","thinking":"thinking text","signature":"sig"}]},
894
+ {"role":"assistant","content":[{"type":"tool-use","id":"tu_1","name":"Read","input":{"path":"main.py"}}]},
895
+ {"role":"tool","toolUseId":"tu_1","content":"Tool output text"}]
896
+
897
+ Each message has: role, content (string for user/tool, array for assistant).
898
+ Assistant blocks: {type:"text",text:"..."} | {type:"thinking",thinking:"...",signature:"..."} | {type:"tool-use",id:"...",name:"...",input:{...}}
899
+
900
+ CRITICAL: Output ONLY the JSON array. No markdown, no explanation, no code fences.`;
901
+ try {
902
+ const provider = streamProviderHolder.current;
903
+ const completeResult = await provider.complete({
904
+ messages: messages,
905
+ system: compactionPrompt,
906
+ tools: [],
907
+ model: currentModel,
908
+ });
909
+ if (!completeResult.text) {
910
+ push('Compaction failed: no compacted messages generated.');
911
+ return;
912
+ }
913
+ // Try to extract JSON from the response (LLM may add explanation text)
914
+ let jsonStr = completeResult.text.trim();
915
+ const firstBracket = jsonStr.indexOf('[');
916
+ const lastBracket = jsonStr.lastIndexOf(']');
917
+ if (firstBracket >= 0 && lastBracket > firstBracket) {
918
+ jsonStr = jsonStr.slice(firstBracket, lastBracket + 1);
919
+ }
920
+ // Parse and validate the compacted messages
921
+ let compactedMessages;
922
+ try {
923
+ compactedMessages = JSON.parse(jsonStr);
924
+ if (!Array.isArray(compactedMessages) || compactedMessages.length === 0) {
925
+ push('Compaction failed: invalid response format.');
926
+ return;
927
+ }
928
+ }
929
+ catch {
930
+ push('Compaction failed: could not parse compacted messages. The model did not return valid JSON.');
931
+ return;
932
+ }
933
+ // Build summary for display: extract any non-JSON text or generate a brief summary
934
+ const summaryText = `Compacted conversation to ${compactedMessages.length} messages (${depth}).\nThe agent will continue with the condensed context.`;
935
+ // Build new system prompt: original system + compacted messages summary
936
+ const compactedText = compactedMessages.map((m) => {
937
+ if (m.role === 'user')
938
+ return `USER: ${m.content}`;
939
+ if (m.role === 'assistant')
940
+ return `ASSISTANT: ${m.content.map((b) => (b.type === 'text' ? b.text : '')).join('\n')}`;
941
+ if (m.role === 'tool')
942
+ return `TOOL RESULT (${m.toolUseId}): ${m.content}`;
943
+ return '';
944
+ }).join('\n');
945
+ const newSystem = originalSystem + '\n\n# Conversation Summary\n\n' + compactedText;
946
+ // Null out old loop so next turn creates fresh loop with compacted state
947
+ loopRef.current = null;
948
+ // Update state
949
+ setCurrentSystem(newSystem);
950
+ sessionMessagesRef.current = compactedMessages;
951
+ setChannelMessages((prev) => ({
952
+ ...prev,
953
+ [activeChannelId]: compactedMessages,
954
+ }));
955
+ push(summaryText);
956
+ }
957
+ catch (err) {
958
+ push(`Compaction failed: ${err instanceof Error ? err.message : String(err)}`);
959
+ }
960
+ break;
961
+ }
806
962
  }
807
963
  }, []);
808
- const onSlashCommand = useCallback((input) => {
964
+ const onSlashCommand = useCallback(async (input) => {
809
965
  // Echo the submitted slash command as a user message so the transcript
810
966
  // shows what the user typed alongside the assistant's reply.
811
967
  const echoed = input.args ? `/${input.command} ${input.args}` : `/${input.command}`;
@@ -823,13 +979,15 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
823
979
  cwd: currentCwd,
824
980
  inputTokens,
825
981
  outputTokens,
982
+ messages: loopRef.current?.getMessages() ?? sessionMessagesRef.current,
983
+ channelMessages: channelMessages[activeChannelId],
826
984
  cronManager: cronManagerRef.current ?? undefined,
827
985
  mcpClients: mcpClientsRef.current,
828
986
  contextWindowSize: contextWindowSize ?? 200_000,
829
987
  thinkingBudget: ({ low: 2000, medium: 6000, high: 16000, max: 32000, auto: 0 })[currentEffort ?? 'auto'] ?? 0,
830
988
  };
831
989
  const result = handleSlashCommand(input.command, input.args, ctx);
832
- applySlashResult(result, input.args);
990
+ await applySlashResult(result, input.args);
833
991
  // Spawn external agent subprocess after the slash command result is applied.
834
992
  if (result.type === 'start_agent' && result.agentId) {
835
993
  const agentId = result.agentId;
@@ -938,20 +1096,26 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
938
1096
  '2': { name: 'openrouter', key: 'OPENROUTER_API_KEY', url: 'OPENROUTER_URL' },
939
1097
  '3': { name: 'openai', key: 'OPENAI_API_KEY', url: 'OPENAI_URL' },
940
1098
  '4': { name: 'anthropic', key: 'ANTHROPIC_API_KEY', url: 'ANTHROPIC_URL' },
1099
+ '5': { name: 'google', key: 'GOOGLE_API_KEY', url: 'GOOGLE_URL' },
1100
+ '6': { name: 'ollama', key: 'MODEL_API_KEY', url: 'MODEL_URL' },
941
1101
  };
942
1102
  const entry = providerMap[choice];
943
1103
  if (!entry) {
944
- setChannelMessages((prev) => ({ ...prev, [targetChannel]: [...(prev[targetChannel] || []), { role: 'assistant', content: 'Invalid choice. Please enter 1-4.\n 1) local\n 2) openrouter\n 3) openai\n 4) anthropic' }] }));
945
- setMessages((prev) => [...prev, { role: 'assistant', content: 'Invalid choice. Please enter 1-4.\n 1) local\n 2) openrouter\n 3) openai\n 4) anthropic' }]);
1104
+ setChannelMessages((prev) => ({ ...prev, [targetChannel]: [...(prev[targetChannel] || []), { role: 'assistant', content: 'Invalid choice. Please enter 1-6.\n 1) local\n 2) openrouter\n 3) openai\n 4) anthropic\n 5) google\n 6) ollama' }] }));
1105
+ setMessages((prev) => [...prev, { role: 'assistant', content: 'Invalid choice. Please enter 1-6.\n 1) local\n 2) openrouter\n 3) openai\n 4) anthropic\n 5) google\n 6) ollama' }]);
946
1106
  return;
947
1107
  }
948
1108
  const updates = { MODEL_PROVIDER: entry.name };
949
1109
  updates[entry.key] = '';
950
- updates[entry.url] = entry.name === 'openrouter' ? 'https://openrouter.ai/api' : `https://api.${entry.name}.com/v1`;
1110
+ updates[entry.url] = entry.name === 'openrouter' ? 'https://openrouter.ai/api'
1111
+ : entry.name === 'anthropic' ? 'https://api.anthropic.com'
1112
+ : entry.name === 'google' ? 'https://generativelanguage.googleapis.com/v1beta'
1113
+ : `https://api.${entry.name}.com/v1`;
951
1114
  settingsMgr.current.update(updates);
952
- if (entry.name === 'local') {
953
- wizardRef.current = { phase: 'apiKey', provider: entry.name };
954
- pushToChannel(targetChannel, `${entry.name}: Enter your API key:`);
1115
+ if (entry.name === 'ollama') {
1116
+ // Ollama doesn't need API key skip to URL
1117
+ wizardRef.current = { phase: 'url', provider: entry.name };
1118
+ pushToChannel(targetChannel, `${entry.name}: What is your Ollama API URL?\n Default: http://localhost:11434/v1\n (Press Enter for default):`);
955
1119
  }
956
1120
  else {
957
1121
  wizardRef.current = { phase: 'apiKey', provider: entry.name };
@@ -960,15 +1124,25 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
960
1124
  return;
961
1125
  }
962
1126
  if (wizard.phase === 'apiKey') {
963
- const keyName = wizard.provider === 'local' ? 'MODEL_API_KEY'
1127
+ const keyName = wizard.provider === 'local' || wizard.provider === 'ollama' ? 'MODEL_API_KEY'
964
1128
  : wizard.provider === 'openrouter' ? 'OPENROUTER_API_KEY'
965
1129
  : wizard.provider === 'openai' ? 'OPENAI_API_KEY'
966
- : 'ANTHROPIC_API_KEY';
1130
+ : wizard.provider === 'google' ? 'GOOGLE_API_KEY'
1131
+ : 'ANTHROPIC_API_KEY';
967
1132
  settingsMgr.current.update({ [keyName]: input });
968
1133
  if (wizard.provider === 'local') {
969
1134
  wizardRef.current = { phase: 'url', provider: wizard.provider };
970
1135
  pushToChannel(targetChannel, `API key configured.\nWhat is your local API URL?\n Default: http://localhost:8080/v1\n (Press Enter for default):`);
971
1136
  }
1137
+ else if (wizard.provider === 'ollama') {
1138
+ settingsMgr.current.update({ MODEL_URL: 'http://localhost:11434/v1' });
1139
+ wizardRef.current = { phase: 'model', provider: wizard.provider };
1140
+ pushToChannel(targetChannel, `URL configured: http://localhost:11434/v1\nWhich model do you want to use?\n Examples: llama3.3, mistral, phi3, qwen2.5\n Enter model name:`);
1141
+ }
1142
+ else if (wizard.provider === 'google') {
1143
+ wizardRef.current = { phase: 'model', provider: wizard.provider };
1144
+ pushToChannel(targetChannel, `API key configured.\nWhich model do you want to use?\n Examples: gemini-2.5-flash, gemini-2.5-pro, gemini-2.0-flash\n Enter model name:`);
1145
+ }
972
1146
  else {
973
1147
  wizardRef.current = { phase: 'model', provider: wizard.provider };
974
1148
  pushToChannel(targetChannel, `API key configured.\nWhich model do you want to use?\n Examples: claude-sonnet-4-6, gpt-4o, o1, llama-3.1-405b\n Enter model name:`);
@@ -976,10 +1150,14 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
976
1150
  return;
977
1151
  }
978
1152
  if (wizard.phase === 'url') {
979
- const url = input || 'http://localhost:8080/v1';
1153
+ const defaultUrl = wizard.provider === 'ollama' ? 'http://localhost:11434/v1' : 'http://localhost:8080/v1';
1154
+ const url = input || defaultUrl;
980
1155
  settingsMgr.current.update({ MODEL_URL: url });
1156
+ const modelExamples = wizard.provider === 'ollama'
1157
+ ? 'llama3.3, mistral, phi3, qwen2.5'
1158
+ : 'claude-sonnet-4-6, gpt-4o, o1, llama-3.1-405b';
981
1159
  wizardRef.current = { phase: 'model', provider: wizard.provider };
982
- pushToChannel(targetChannel, `URL configured: ${url}\nWhich model do you want to use?\n Examples: claude-sonnet-4-6, gpt-4o, o1, llama-3.1-405b\n Enter model name:`);
1160
+ pushToChannel(targetChannel, `URL configured: ${url}\nWhich model do you want to use?\n Examples: ${modelExamples}\n Enter model name:`);
983
1161
  return;
984
1162
  }
985
1163
  if (wizard.phase === 'model') {
@@ -1054,6 +1232,8 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
1054
1232
  effort: currentEffort,
1055
1233
  system: currentSystem + buildModePreamble(currentMode, currentCwd),
1056
1234
  sessionId: channel?.sessionId,
1235
+ resume: resumeSession,
1236
+ resumeSessionId: resumeSessionId,
1057
1237
  onApprovalAsk: (req) => new Promise((resolve) => {
1058
1238
  setPendingApproval({
1059
1239
  toolName: req.name,
@@ -1080,6 +1260,7 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
1080
1260
  });
1081
1261
  channelTurnLoops.set(targetChannel, loop);
1082
1262
  }
1263
+ loopRef.current = loop;
1083
1264
  const channelMsgs = channelMessages[targetChannel] || [];
1084
1265
  setChannelMessages((prev) => ({
1085
1266
  ...prev,
@@ -1143,6 +1324,16 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
1143
1324
  globalThis.__hopperSessionId = e.id;
1144
1325
  }
1145
1326
  }),
1327
+ loop.eventBus.subscribe('session-resumed', (e) => {
1328
+ if (e.type === 'session-resumed' && e.turnsRecovered > 0) {
1329
+ const bannerText = `Context restored: ${e.turnsRecovered} turn(s) from session ${e.id}`;
1330
+ setMessages(prev => [...prev, { role: 'system', content: bannerText }]);
1331
+ setChannelMessages((prev) => ({
1332
+ ...prev,
1333
+ [targetChannel]: [...(prev[targetChannel] || []), { role: 'system', content: bannerText }],
1334
+ }));
1335
+ }
1336
+ }),
1146
1337
  loop.eventBus.subscribe('usage', (e) => {
1147
1338
  if (e.type === 'usage') {
1148
1339
  setInputTokens(e.inputTokens);
@@ -1232,9 +1423,12 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
1232
1423
  if (runResult) {
1233
1424
  channelRegistry.updateSession(targetChannel, runResult.sessionId);
1234
1425
  }
1426
+ // Persist full TurnLoop messages for slash commands (e.g. /context messages)
1427
+ sessionMessagesRef.current = loop.getMessages();
1235
1428
  unsubs.forEach(u => u());
1236
1429
  toolGroupIndexRef.current = null;
1237
1430
  busyRef.current = false;
1431
+ loopRef.current = null;
1238
1432
  setStatus('idle');
1239
1433
  // Route response back to Telegram if this is a Telegram channel
1240
1434
  const telegramChatId = channelRouter.getTelegramChatId(targetChannel);
@@ -1416,7 +1610,7 @@ function App({ provider, streamProviderHolder, model, approvalMode, cwd, themeNa
1416
1610
  setMessages(prev => [...prev, { role: 'system', content: `Active project switched to ${p.label} (${p.projectPath})` }]);
1417
1611
  }, []);
1418
1612
  const project = basename(currentCwd);
1419
- const cost = estimateCost(currentModel, inputTokens ?? 0, outputTokens ?? 0);
1613
+ const cost = estimateCost(currentModel, inputTokens ?? 0, outputTokens ?? 0, settingsMgr.current?.get().MODEL_COST);
1420
1614
  // Build channel entries for the Channels tab
1421
1615
  const channelList = channelRegistry.list();
1422
1616
  const channelTabEntries = channelList.map(ch => ({
@@ -1448,7 +1642,7 @@ function createProvider(settings) {
1448
1642
  process.env.OPENAI_API_KEY ||
1449
1643
  '';
1450
1644
  const openaiUrl = (typeof settings.OPENAI_URL === 'string' ? settings.OPENAI_URL.trim() : '') ||
1451
- process.env.OPENAI_BASE_URL ||
1645
+ process.env.OPENAI_URL ||
1452
1646
  '';
1453
1647
  if (!openaiKey) {
1454
1648
  console.error('hopper-agent: no OpenAI API key configured.\n' +
@@ -1473,31 +1667,47 @@ function createProvider(settings) {
1473
1667
  const p = new AnthropicProvider(orKey, orUrl);
1474
1668
  return { name: 'openrouter', stream: p.stream.bind(p), check: p.check.bind(p) };
1475
1669
  }
1476
- if (providerName === 'local') {
1477
- const localKey = (typeof settings.MODEL_API_KEY === 'string' ? settings.MODEL_API_KEY.trim() : '') ||
1670
+ if (providerName === 'ollama' || providerName === 'local') {
1671
+ const key = (typeof settings.MODEL_API_KEY === 'string' ? settings.MODEL_API_KEY.trim() : '') ||
1478
1672
  process.env.MODEL_API_KEY ||
1479
1673
  '';
1480
- const localUrl = (typeof settings.MODEL_URL === 'string' ? settings.MODEL_URL.trim() : '') ||
1674
+ const url = (typeof settings.MODEL_URL === 'string' ? settings.MODEL_URL.trim() : '') ||
1481
1675
  process.env.MODEL_URL ||
1482
1676
  '';
1483
- if (!localKey && !localUrl) {
1484
- console.error('hopper-agent: no local provider config.\n' +
1485
- ' Run `hopper-agent` and use `/init` to configure, or set "MODEL_API_KEY" and "MODEL_URL" in ~/.hopper-agent/settings.json.');
1677
+ if (!url) {
1678
+ console.error('hopper-agent: no local provider URL configured.\n' +
1679
+ ' Run `hopper-agent` and use `/init` to configure, or set "MODEL_URL" in ~/.hopper-agent/settings.json.');
1486
1680
  return null;
1487
1681
  }
1488
- const p = new AnthropicProvider(localKey || undefined, localUrl || undefined, { local: true });
1489
- return { name: 'local', stream: p.stream.bind(p), check: p.check.bind(p) };
1682
+ const p = new OllamaProvider(key || undefined, url || undefined);
1683
+ const displayName = providerName === 'ollama' ? 'ollama' : 'local';
1684
+ return { name: displayName, stream: p.stream.bind(p), check: p.check.bind(p) };
1685
+ }
1686
+ if (providerName === 'google') {
1687
+ const googleKey = (typeof settings.GOOGLE_API_KEY === 'string' ? settings.GOOGLE_API_KEY.trim() : '') ||
1688
+ process.env.GOOGLE_API_KEY ||
1689
+ '';
1690
+ const googleUrl = (typeof settings.GOOGLE_URL === 'string' ? settings.GOOGLE_URL.trim() : '') ||
1691
+ process.env.GOOGLE_URL ||
1692
+ '';
1693
+ if (!googleKey) {
1694
+ console.error('hopper-agent: no Google API key configured.\n' +
1695
+ ' Run `hopper-agent` and use `/init` to configure, or set "GOOGLE_API_KEY" in ~/.hopper-agent/settings.json.');
1696
+ return null;
1697
+ }
1698
+ const p = new GoogleGeminiProvider(googleKey, googleUrl || undefined);
1699
+ return { name: p.name, stream: p.stream.bind(p), check: p.check.bind(p) };
1490
1700
  }
1491
1701
  // anthropic (default)
1492
- const fromSettingsKey = typeof settings.MODEL_API_KEY === 'string' ? settings.MODEL_API_KEY.trim() : '';
1493
- const fromSettingsUrl = typeof settings.MODEL_URL === 'string' ? settings.MODEL_URL.trim() : '';
1702
+ const fromSettingsKey = typeof settings.ANTHROPIC_API_KEY === 'string' ? settings.ANTHROPIC_API_KEY.trim() : '';
1703
+ const fromSettingsUrl = typeof settings.ANTHROPIC_URL === 'string' ? settings.ANTHROPIC_URL.trim() : '';
1494
1704
  const apiKey = fromSettingsKey || process.env.ANTHROPIC_API_KEY || '';
1495
- const baseUrl = fromSettingsUrl || process.env.ANTHROPIC_BASE_URL || '';
1705
+ const baseUrl = fromSettingsUrl || process.env.ANTHROPIC_URL || '';
1496
1706
  if (!apiKey) {
1497
1707
  console.error('hopper-agent: no API key configured.\n' +
1498
- ` Looked at ~/.hopper-agent/settings.json (MODEL_API_KEY = ${fromSettingsKey ? '[set]' : '[missing/empty]'}),\n` +
1708
+ ` Looked at ~/.hopper-agent/settings.json (ANTHROPIC_API_KEY = ${fromSettingsKey ? '[set]' : '[missing/empty]'}),\n` +
1499
1709
  ` and ANTHROPIC_API_KEY env var (${process.env.ANTHROPIC_API_KEY ? '[set]' : '[missing]'}).\n` +
1500
- ' Run `hopper-agent` and use `/init` to configure, or set "MODEL_API_KEY" in ~/.hopper-agent/settings.json.');
1710
+ ' Run `hopper-agent` and use `/init` to configure, or set "ANTHROPIC_API_KEY" in ~/.hopper-agent/settings.json.');
1501
1711
  return null;
1502
1712
  }
1503
1713
  const p = new AnthropicProvider(apiKey, baseUrl || undefined);
@@ -1742,17 +1952,12 @@ async function main() {
1742
1952
  const result = await loop.run(args.prompt);
1743
1953
  process.exit(result.reason === 'error' ? 1 : 0);
1744
1954
  }
1745
- // Switch to the alternate screen buffer so the TUI owns the full window and
1746
- // the header stays pinned at the top. Restore on exit. We also override the
1747
- // terminal's default background via OSC 11 so the unpainted cells (padding,
1748
- // gaps between widgets) match the theme instead of showing whatever color
1749
- // the user's terminal profile happens to use.
1955
+ // Override the terminal's default background via OSC 11 so the unpainted
1956
+ // cells (padding, gaps between widgets) match the theme instead of showing
1957
+ // whatever color the user's terminal profile happens to use.
1750
1958
  const theme = getTheme(themeName);
1751
- const ENTER_ALT = '\x1b[?1049h\x1b[H';
1752
- const LEAVE_ALT = '\x1b[?1049l';
1753
- const SET_BG = `\x1b]11;${theme.background}\x07`;
1754
1959
  const RESET_BG = '\x1b]111\x07';
1755
- process.stdout.write(ENTER_ALT + SET_BG);
1960
+ process.stdout.write(`\x1b]11;${theme.background}\x07`);
1756
1961
  let restored = false;
1757
1962
  const restore = () => {
1758
1963
  if (restored)
@@ -1762,7 +1967,7 @@ async function main() {
1762
1967
  for (const client of mcpRawClientsRef.current) {
1763
1968
  client.disconnect().catch(() => { });
1764
1969
  }
1765
- process.stdout.write(RESET_BG + LEAVE_ALT);
1970
+ process.stdout.write(RESET_BG);
1766
1971
  const id = globalThis.__hopperSessionId;
1767
1972
  if (id) {
1768
1973
  process.stdout.write(`\nSession saved. Resume with: hopper-agent resume ${id}\n`);
@@ -1865,7 +2070,7 @@ async function main() {
1865
2070
  }
1866
2071
  });
1867
2072
  console.error(`[cron] Checker started. Pending: ${cronManager.pendingCount}`);
1868
- render(_jsx(App, { provider: streamProviderHolder.current, streamProviderHolder: streamProviderHolder, model: model, approvalMode: approvalMode, cwd: cwd, themeName: themeName, system: system, settings: settings, projects: claudeProjects, cronManager: cronManager, onReminderHolder: onReminderHolder, telegramChatIdRef: telegramChatIdRef, telegramSubmitRef: telegramSubmitRef, telegramGateway: telegramGateway, channelRegistry: channelRegistry, channelRouter: channelRouter, activeChannelRef: activeChannelRef, mcpToolsRef: mcpToolsRef, mcpClientsRef: mcpClientsRef, onMcpReconnect: reconnectMcp, contextWindowSize: 200_000 }), {
2073
+ render(_jsx(App, { provider: streamProviderHolder.current, streamProviderHolder: streamProviderHolder, model: model, approvalMode: approvalMode, cwd: cwd, themeName: themeName, system: system, settings: settings, projects: claudeProjects, cronManager: cronManager, onReminderHolder: onReminderHolder, telegramChatIdRef: telegramChatIdRef, telegramSubmitRef: telegramSubmitRef, telegramGateway: telegramGateway, channelRegistry: channelRegistry, channelRouter: channelRouter, activeChannelRef: activeChannelRef, mcpToolsRef: mcpToolsRef, mcpClientsRef: mcpClientsRef, onMcpReconnect: reconnectMcp, contextWindowSize: 200_000, resumeSession: args.resume, resumeSessionId: args.resume ? args.prompt : undefined }), {
1869
2074
  exitOnCtrlC: false,
1870
2075
  });
1871
2076
  }