@makefinks/daemon 0.9.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +60 -14
  2. package/package.json +4 -2
  3. package/src/ai/copilot-client.ts +775 -0
  4. package/src/ai/daemon-ai.ts +32 -234
  5. package/src/ai/model-config.ts +55 -14
  6. package/src/ai/providers/capabilities.ts +16 -0
  7. package/src/ai/providers/copilot-provider.ts +632 -0
  8. package/src/ai/providers/openrouter-provider.ts +217 -0
  9. package/src/ai/providers/registry.ts +14 -0
  10. package/src/ai/providers/types.ts +31 -0
  11. package/src/ai/system-prompt.ts +16 -0
  12. package/src/ai/tools/subagents.ts +1 -1
  13. package/src/ai/tools/tool-registry.ts +22 -1
  14. package/src/ai/tools/write-file.ts +51 -0
  15. package/src/app/components/AppOverlays.tsx +9 -1
  16. package/src/app/components/ConversationPane.tsx +8 -2
  17. package/src/components/ModelMenu.tsx +202 -140
  18. package/src/components/OnboardingOverlay.tsx +147 -1
  19. package/src/components/SettingsMenu.tsx +27 -1
  20. package/src/components/TokenUsageDisplay.tsx +5 -3
  21. package/src/components/tool-layouts/layouts/index.ts +1 -0
  22. package/src/components/tool-layouts/layouts/write-file.tsx +117 -0
  23. package/src/hooks/daemon-event-handlers.ts +61 -14
  24. package/src/hooks/keyboard-handlers.ts +109 -28
  25. package/src/hooks/use-app-callbacks.ts +141 -43
  26. package/src/hooks/use-app-context-builder.ts +5 -0
  27. package/src/hooks/use-app-controller.ts +31 -2
  28. package/src/hooks/use-app-copilot-models-loader.ts +45 -0
  29. package/src/hooks/use-app-display-state.ts +24 -2
  30. package/src/hooks/use-app-model.ts +103 -17
  31. package/src/hooks/use-app-preferences-bootstrap.ts +54 -10
  32. package/src/hooks/use-bootstrap-controller.ts +5 -0
  33. package/src/hooks/use-daemon-events.ts +8 -2
  34. package/src/hooks/use-daemon-keyboard.ts +19 -6
  35. package/src/hooks/use-daemon-runtime-controller.ts +4 -0
  36. package/src/hooks/use-menu-keyboard.ts +6 -1
  37. package/src/state/app-context.tsx +6 -0
  38. package/src/types/index.ts +24 -1
  39. package/src/utils/copilot-models.ts +77 -0
  40. package/src/utils/preferences.ts +3 -0
@@ -0,0 +1,217 @@
1
+ import { createOpenRouter } from "@openrouter/ai-sdk-provider";
2
+ import { type ModelMessage, ToolLoopAgent, generateText, stepCountIs } from "ai";
3
+ import { getDaemonManager } from "../../state/daemon-state";
4
+ import { getRuntimeContext } from "../../state/runtime-context";
5
+ import type { ReasoningEffort, ToolApprovalRequest } from "../../types";
6
+ import { debug, toolDebug } from "../../utils/debug-logger";
7
+ import { getOpenRouterReportedCost } from "../../utils/openrouter-reported-cost";
8
+ import { getWorkspacePath } from "../../utils/workspace-manager";
9
+ import { extractFinalAssistantText } from "../message-utils";
10
+ import { buildOpenRouterChatSettings, getResponseModel } from "../model-config";
11
+ import { sanitizeMessagesForInput } from "../sanitize-messages";
12
+ import { buildDaemonSystemPrompt } from "../system-prompt";
13
+ import { coordinateToolApprovals } from "../tool-approval-coordinator";
14
+ import { getCachedToolAvailability, getDaemonTools } from "../tools/index";
15
+ import { createToolAvailabilitySnapshot, resolveToolAvailability } from "../tools/tool-registry";
16
+ import { getProviderCapabilities } from "./capabilities";
17
+ import type { LlmProviderAdapter, ProviderStreamRequest, ProviderStreamResult } from "./types";
18
+
19
+ const openrouter = createOpenRouter();
20
+ const MAX_AGENT_STEPS = 100;
21
+
22
+ function normalizeStreamError(error: unknown): Error {
23
+ if (error instanceof Error) return error;
24
+ if (error && typeof error === "object" && "message" in error) {
25
+ const message = (error as { message?: unknown }).message;
26
+ if (typeof message === "string") return new Error(message);
27
+ }
28
+ return new Error(String(error));
29
+ }
30
+
31
+ async function createDaemonAgent(
32
+ interactionMode: ProviderStreamRequest["interactionMode"] = "text",
33
+ reasoningEffort?: ReasoningEffort,
34
+ memoryInjection?: string
35
+ ) {
36
+ const openRouterReasoningEffort = reasoningEffort === "xhigh" ? "high" : reasoningEffort;
37
+ const modelConfig = buildOpenRouterChatSettings(
38
+ openRouterReasoningEffort ? { reasoning: { effort: openRouterReasoningEffort } } : undefined
39
+ );
40
+
41
+ const { sessionId } = getRuntimeContext();
42
+ const tools = await getDaemonTools();
43
+ const toolAvailability =
44
+ getCachedToolAvailability() ?? (await resolveToolAvailability(getDaemonManager().toolToggles));
45
+
46
+ const workspacePath = sessionId ? getWorkspacePath(sessionId) : undefined;
47
+
48
+ return new ToolLoopAgent({
49
+ model: openrouter.chat(getResponseModel(), modelConfig),
50
+ instructions: buildDaemonSystemPrompt({
51
+ mode: interactionMode,
52
+ toolAvailability: createToolAvailabilitySnapshot(toolAvailability),
53
+ workspacePath,
54
+ memoryInjection,
55
+ }),
56
+ tools,
57
+ stopWhen: stepCountIs(MAX_AGENT_STEPS),
58
+ prepareStep: async ({ messages }) => ({
59
+ messages: sanitizeMessagesForInput(messages),
60
+ }),
61
+ });
62
+ }
63
+
64
+ async function streamOpenRouterResponse(
65
+ request: ProviderStreamRequest
66
+ ): Promise<ProviderStreamResult | null> {
67
+ const {
68
+ userMessage,
69
+ callbacks,
70
+ conversationHistory,
71
+ interactionMode,
72
+ abortSignal,
73
+ reasoningEffort,
74
+ memoryInjection,
75
+ } = request;
76
+
77
+ const messages: ModelMessage[] = [...conversationHistory];
78
+ messages.push({ role: "user" as const, content: userMessage });
79
+
80
+ const agent = await createDaemonAgent(interactionMode, reasoningEffort, memoryInjection);
81
+
82
+ let currentMessages = messages;
83
+ let fullText = "";
84
+ let streamError: Error | null = null;
85
+ let allResponseMessages: ModelMessage[] = [];
86
+
87
+ while (true) {
88
+ const stream = await agent.stream({
89
+ messages: currentMessages,
90
+ });
91
+
92
+ const pendingApprovals: ToolApprovalRequest[] = [];
93
+
94
+ for await (const part of stream.fullStream) {
95
+ if (abortSignal?.aborted) {
96
+ return null;
97
+ }
98
+
99
+ if (part.type === "error") {
100
+ const err = normalizeStreamError(part.error);
101
+ streamError = err;
102
+ debug.error("agent-stream-error", {
103
+ message: err.message,
104
+ error: part.error,
105
+ });
106
+ callbacks.onError?.(err);
107
+ } else if (part.type === "abort") {
108
+ return null;
109
+ } else if (part.type === "reasoning-delta") {
110
+ callbacks.onReasoningToken?.(part.text);
111
+ } else if (part.type === "text-delta") {
112
+ fullText += part.text;
113
+ callbacks.onToken?.(part.text);
114
+ } else if (part.type === "tool-input-start") {
115
+ callbacks.onToolCallStart?.(part.toolName, part.id);
116
+ } else if (part.type === "tool-call") {
117
+ callbacks.onToolCall?.(part.toolName, part.input, part.toolCallId);
118
+ } else if (part.type === "tool-result") {
119
+ callbacks.onToolResult?.(part.toolName, part.output, part.toolCallId);
120
+ } else if (part.type === "tool-error") {
121
+ const errorMessage = part.error instanceof Error ? part.error.message : String(part.error);
122
+ toolDebug.error("tool-error", {
123
+ toolName: part.toolName,
124
+ toolCallId: part.toolCallId,
125
+ input: part.input,
126
+ error: errorMessage,
127
+ });
128
+ callbacks.onToolResult?.(part.toolName, { error: errorMessage, input: part.input }, part.toolCallId);
129
+ } else if (part.type === "tool-approval-request") {
130
+ const approvalRequest: ToolApprovalRequest = {
131
+ approvalId: part.approvalId,
132
+ toolName: part.toolCall.toolName,
133
+ toolCallId: part.toolCall.toolCallId,
134
+ input: part.toolCall.input,
135
+ };
136
+ pendingApprovals.push(approvalRequest);
137
+ callbacks.onToolApprovalRequest?.(approvalRequest);
138
+ } else if (part.type === "finish-step") {
139
+ if (part.usage && callbacks.onStepUsage) {
140
+ const reportedCost = getOpenRouterReportedCost(part.providerMetadata);
141
+
142
+ callbacks.onStepUsage({
143
+ promptTokens: part.usage.inputTokens ?? 0,
144
+ completionTokens: part.usage.outputTokens ?? 0,
145
+ totalTokens: part.usage.totalTokens ?? 0,
146
+ reasoningTokens: part.usage.outputTokenDetails?.reasoningTokens ?? 0,
147
+ cachedInputTokens: part.usage.inputTokenDetails?.cacheReadTokens ?? 0,
148
+ cost: reportedCost,
149
+ });
150
+ }
151
+ }
152
+ }
153
+
154
+ if (streamError) {
155
+ return null;
156
+ }
157
+
158
+ const rawResponseMessages = await stream.response.then((response) => response.messages);
159
+ const responseMessages = sanitizeMessagesForInput(rawResponseMessages);
160
+ allResponseMessages = [...allResponseMessages, ...responseMessages];
161
+ currentMessages = [...currentMessages, ...responseMessages];
162
+
163
+ if (pendingApprovals.length > 0 && callbacks.onAwaitingApprovals) {
164
+ const { toolMessage } = await coordinateToolApprovals({
165
+ pendingApprovals,
166
+ requestApprovals: callbacks.onAwaitingApprovals,
167
+ });
168
+
169
+ if (toolMessage) {
170
+ currentMessages = [...currentMessages, toolMessage];
171
+ }
172
+
173
+ continue;
174
+ }
175
+
176
+ break;
177
+ }
178
+
179
+ if (streamError) {
180
+ return null;
181
+ }
182
+
183
+ const finalText = extractFinalAssistantText(allResponseMessages);
184
+ if (!fullText && allResponseMessages.length === 0) {
185
+ callbacks.onError?.(new Error("Model returned empty response. Check API key and model availability."));
186
+ return null;
187
+ }
188
+
189
+ return {
190
+ fullText,
191
+ responseMessages: allResponseMessages,
192
+ finalText,
193
+ };
194
+ }
195
+
196
+ async function generateOpenRouterSessionTitle(firstMessage: string): Promise<string> {
197
+ const result = await generateText({
198
+ model: openrouter.chat(getResponseModel(), buildOpenRouterChatSettings()),
199
+ system:
200
+ 'You are a title generator. Generate a very short, descriptive title (3-6 words) for a conversation based on the user\'s first message. The title should capture the main topic or intent. Do not use quotes, punctuation, or prefixes like "Title:". Just output the title text directly.',
201
+ messages: [
202
+ {
203
+ role: "user",
204
+ content: `Generate a short descriptive title for the following message <message>${firstMessage}</message>`,
205
+ },
206
+ ],
207
+ });
208
+
209
+ return result.text.trim() || "New Session";
210
+ }
211
+
212
+ export const openRouterProviderAdapter: LlmProviderAdapter = {
213
+ id: "openrouter",
214
+ capabilities: getProviderCapabilities("openrouter"),
215
+ streamResponse: streamOpenRouterResponse,
216
+ generateSessionTitle: generateOpenRouterSessionTitle,
217
+ };
@@ -0,0 +1,14 @@
1
+ import type { LlmProvider } from "../../types";
2
+ import { getModelProvider } from "../model-config";
3
+ import { copilotProviderAdapter } from "./copilot-provider";
4
+ import { openRouterProviderAdapter } from "./openrouter-provider";
5
+ import type { LlmProviderAdapter } from "./types";
6
+
7
+ const PROVIDER_ADAPTERS: Record<LlmProvider, LlmProviderAdapter> = {
8
+ openrouter: openRouterProviderAdapter,
9
+ copilot: copilotProviderAdapter,
10
+ };
11
+
12
+ export function getProviderAdapter(provider: LlmProvider = getModelProvider()): LlmProviderAdapter {
13
+ return PROVIDER_ADAPTERS[provider];
14
+ }
@@ -0,0 +1,31 @@
1
+ import type { ModelMessage } from "ai";
2
+ import type { LlmProvider, ReasoningEffort, StreamCallbacks, TokenUsage } from "../../types";
3
+ import type { InteractionMode } from "../system-prompt";
4
+
5
+ export interface ProviderCapabilities {
6
+ supportsSubagentTool: boolean;
7
+ }
8
+
9
+ export interface ProviderStreamRequest {
10
+ userMessage: string;
11
+ callbacks: StreamCallbacks;
12
+ conversationHistory: ModelMessage[];
13
+ interactionMode: InteractionMode;
14
+ abortSignal?: AbortSignal;
15
+ reasoningEffort?: ReasoningEffort;
16
+ memoryInjection?: string;
17
+ }
18
+
19
+ export interface ProviderStreamResult {
20
+ fullText: string;
21
+ responseMessages: ModelMessage[];
22
+ usage?: TokenUsage;
23
+ finalText?: string;
24
+ }
25
+
26
+ export interface LlmProviderAdapter {
27
+ id: LlmProvider;
28
+ capabilities: ProviderCapabilities;
29
+ streamResponse: (request: ProviderStreamRequest) => Promise<ProviderStreamResult | null>;
30
+ generateSessionTitle: (firstMessage: string) => Promise<string>;
31
+ }
@@ -9,6 +9,7 @@ export type InteractionMode = "text" | "voice";
9
9
 
10
10
  export interface ToolAvailability {
11
11
  readFile: boolean;
12
+ writeFile: boolean;
12
13
  runBash: boolean;
13
14
  webSearch: boolean;
14
15
  fetchUrls: boolean;
@@ -64,6 +65,7 @@ export function buildDaemonSystemPrompt(options: SystemPromptOptions = {}): stri
64
65
  function normalizeToolAvailability(toolAvailability?: Partial<ToolAvailability>): ToolAvailability {
65
66
  return {
66
67
  readFile: toolAvailability?.readFile ?? true,
68
+ writeFile: toolAvailability?.writeFile ?? true,
67
69
  runBash: toolAvailability?.runBash ?? true,
68
70
  webSearch: toolAvailability?.webSearch ?? true,
69
71
  fetchUrls: toolAvailability?.fetchUrls ?? true,
@@ -243,6 +245,19 @@ Fetch multiple URLs in one call:
243
245
  By default it reads up to 2000 lines from the start when no offset/limit are provided.
244
246
  For partial reads, you must provide both a 0-based line offset and a line limit.
245
247
  `,
248
+ writeFile: `
249
+ ### 'writeFile' (local file writer)
250
+ Use this to write content to files. Creates new files or overwrites existing ones.
251
+ Automatically creates parent directories if they don't exist.
252
+
253
+ **CRITICAL: Always report the correct file location to the user**
254
+ - When you write a file, explicitly tell the user the full path where it was saved
255
+ - If the file is in the workspace, say "I have saved it to my workspace at: [full path]"
256
+ - If the file is in the current working directory, say "I have saved it to: [path]"
257
+ - Do NOT give commands like "cat filename" or "open filename" unless the file is actually in the current working directory
258
+ - For files in the workspace, give the full path: "cat /full/path/to/file" or tell the user to navigate there first
259
+ `,
260
+
246
261
  subagent: `
247
262
  ### 'subagent'
248
263
  Call this tool to spawn subagents for specific tasks.
@@ -260,6 +275,7 @@ function buildToolDefinitions(availability: ToolAvailability): string {
260
275
  if (availability.groundingManager) blocks.push(TOOL_SECTIONS.groundingManager);
261
276
  if (availability.runBash) blocks.push(TOOL_SECTIONS.runBash);
262
277
  if (availability.readFile) blocks.push(TOOL_SECTIONS.readFile);
278
+ if (availability.writeFile) blocks.push(TOOL_SECTIONS.writeFile);
263
279
  if (availability.subagent) blocks.push(TOOL_SECTIONS.subagent);
264
280
 
265
281
  const webNote =
@@ -14,7 +14,6 @@ import { getOpenRouterReportedCost } from "../../utils/openrouter-reported-cost"
14
14
  import { getMcpManager } from "../mcp/mcp-manager";
15
15
  import { extractFinalAssistantText } from "../message-utils";
16
16
  import { buildOpenRouterChatSettings, getSubagentModel } from "../model-config";
17
- import { buildToolSet } from "./tool-registry";
18
17
 
19
18
  // OpenRouter client for subagents
20
19
  const openrouter = createOpenRouter();
@@ -33,6 +32,7 @@ async function getSubagentTools(): Promise<ToolSet> {
33
32
  if (!cachedSubagentBaseTools) {
34
33
  cachedSubagentBaseTools = (async () => {
35
34
  const toggles = getDaemonManager().toolToggles;
35
+ const { buildToolSet } = await import("./tool-registry");
36
36
  const { tools } = await buildToolSet(toggles, {
37
37
  omit: ["groundingManager", "subagent"],
38
38
  });
@@ -8,7 +8,9 @@ import { runBash } from "./run-bash";
8
8
  import { subagent } from "./subagents";
9
9
  import { todoManager } from "./todo-manager";
10
10
  import { webSearch } from "./web-search";
11
+ import { writeFile } from "./write-file";
11
12
 
13
+ import { getProviderCapabilities } from "../providers/capabilities";
12
14
  import type { ToolToggleId, ToolToggles } from "../../types";
13
15
  import { detectLocalPlaywrightChromium } from "../../utils/js-rendering";
14
16
 
@@ -40,13 +42,14 @@ type ToolGateResult = {
40
42
 
41
43
  const TOOL_REGISTRY: ToolEntry[] = [
42
44
  { id: "readFile", toggleKey: "readFile", tool: readFile },
45
+ { id: "writeFile", toggleKey: "writeFile", tool: writeFile },
43
46
  { id: "runBash", toggleKey: "runBash", tool: runBash },
44
47
  { id: "webSearch", toggleKey: "webSearch", tool: webSearch, gate: gateExa },
45
48
  { id: "fetchUrls", toggleKey: "fetchUrls", tool: fetchUrls, gate: gateExa },
46
49
  { id: "renderUrl", toggleKey: "renderUrl", tool: renderUrl, gate: gateRenderUrl },
47
50
  { id: "todoManager", toggleKey: "todoManager", tool: todoManager },
48
51
  { id: "groundingManager", toggleKey: "groundingManager", tool: groundingManager },
49
- { id: "subagent", toggleKey: "subagent", tool: subagent },
52
+ { id: "subagent", toggleKey: "subagent", tool: subagent, gate: gateSubagent },
50
53
  ];
51
54
 
52
55
  function gateExa(): Promise<ToolGateResult> {
@@ -65,9 +68,24 @@ async function gateRenderUrl(): Promise<ToolGateResult> {
65
68
  };
66
69
  }
67
70
 
71
+ function gateSubagent(): Promise<ToolGateResult> {
72
+ const capabilities = getProviderCapabilities();
73
+ if (!capabilities.supportsSubagentTool) {
74
+ return Promise.resolve({
75
+ envAvailable: false,
76
+ disabledReason: "Subagent tool is unavailable for the current model provider.",
77
+ });
78
+ }
79
+
80
+ return Promise.resolve({
81
+ envAvailable: true,
82
+ });
83
+ }
84
+
68
85
  function normalizeToggles(toggles?: ToolToggles): ToolToggles {
69
86
  return {
70
87
  readFile: toggles?.readFile ?? true,
88
+ writeFile: toggles?.writeFile ?? true,
71
89
  runBash: toggles?.runBash ?? true,
72
90
  webSearch: toggles?.webSearch ?? true,
73
91
  fetchUrls: toggles?.fetchUrls ?? true,
@@ -166,6 +184,7 @@ export async function buildToolSet(
166
184
  export function getToolLabels(): Record<ToolId, string> {
167
185
  return {
168
186
  readFile: "readFile",
187
+ writeFile: "writeFile",
169
188
  runBash: "runBash",
170
189
  webSearch: "webSearch",
171
190
  fetchUrls: "fetchUrls",
@@ -179,6 +198,7 @@ export function getToolLabels(): Record<ToolId, string> {
179
198
  export function getDefaultToolOrder(): ToolId[] {
180
199
  return [
181
200
  "readFile",
201
+ "writeFile",
182
202
  "runBash",
183
203
  "webSearch",
184
204
  "fetchUrls",
@@ -192,6 +212,7 @@ export function getDefaultToolOrder(): ToolId[] {
192
212
  export function createToolAvailabilitySnapshot(availability: ToolAvailabilityMap): Record<ToolId, boolean> {
193
213
  return {
194
214
  readFile: availability.readFile?.enabled ?? false,
215
+ writeFile: availability.writeFile?.enabled ?? false,
195
216
  runBash: availability.runBash?.enabled ?? false,
196
217
  webSearch: availability.webSearch?.enabled ?? false,
197
218
  fetchUrls: availability.fetchUrls?.enabled ?? false,
@@ -0,0 +1,51 @@
1
+ import fs from "node:fs";
2
+ import path from "node:path";
3
+ import { tool } from "ai";
4
+ import { z } from "zod";
5
+
6
+ export const writeFile = tool({
7
+ description:
8
+ "Write content to a file. Creates the file if it doesn't exist, or overwrites it if it does. Supports append mode to add content to existing files. Use this to create scripts, save outputs, write configuration files, or generate any text-based file.",
9
+ inputSchema: z.object({
10
+ path: z
11
+ .string()
12
+ .describe("Path to the file to write. Can be absolute or relative to the current working directory."),
13
+ content: z.string().describe("The content to write to the file."),
14
+ append: z
15
+ .boolean()
16
+ .optional()
17
+ .default(false)
18
+ .describe("If true, append to the file instead of overwriting. Creates the file if it doesn't exist."),
19
+ }),
20
+ execute: async ({ path: filePath, content, append }) => {
21
+ try {
22
+ const resolvedPath = path.resolve(filePath);
23
+ const dir = path.dirname(resolvedPath);
24
+
25
+ // Create parent directories if they don't exist
26
+ if (!fs.existsSync(dir)) {
27
+ fs.mkdirSync(dir, { recursive: true });
28
+ }
29
+
30
+ // Write or append to the file
31
+ if (append) {
32
+ fs.appendFileSync(resolvedPath, content, "utf8");
33
+ } else {
34
+ fs.writeFileSync(resolvedPath, content, "utf8");
35
+ }
36
+
37
+ return {
38
+ success: true,
39
+ path: resolvedPath,
40
+ bytesWritten: Buffer.byteLength(content, "utf8"),
41
+ };
42
+ } catch (error: unknown) {
43
+ const err = error instanceof Error ? error : new Error(String(error));
44
+ return {
45
+ success: false,
46
+ path: filePath,
47
+ error: err.message,
48
+ };
49
+ }
50
+ },
51
+ });
@@ -60,12 +60,16 @@ function AppOverlaysImpl({ conversationHistory, currentContentBlocks }: AppOverl
60
60
  reasoningEffort={settings.reasoningEffort}
61
61
  bashApprovalLevel={settings.bashApprovalLevel}
62
62
  supportsReasoning={settings.supportsReasoning}
63
+ supportsReasoningXHigh={settings.supportsReasoningXHigh}
64
+ modelProvider={model.currentModelProvider}
65
+ copilotAvailable={onboarding.copilotAuthenticated}
63
66
  canEnableVoiceOutput={settings.canEnableVoiceOutput}
64
67
  showFullReasoning={settings.showFullReasoning}
65
68
  showToolOutput={settings.showToolOutput}
66
69
  memoryEnabled={settings.memoryEnabled}
67
70
  onClose={() => menus.setShowSettingsMenu(false)}
68
71
  toggleInteractionMode={settingsCallbacks.onToggleInteractionMode}
72
+ cycleModelProvider={settingsCallbacks.onCycleModelProvider}
69
73
  setVoiceInteractionType={settingsCallbacks.onSetVoiceInteractionType}
70
74
  setSpeechSpeed={settingsCallbacks.onSetSpeechSpeed}
71
75
  setReasoningEffort={settingsCallbacks.onSetReasoningEffort}
@@ -81,6 +85,7 @@ function AppOverlaysImpl({ conversationHistory, currentContentBlocks }: AppOverl
81
85
  <ModelMenu
82
86
  curatedModels={model.curatedModels}
83
87
  allModels={model.openRouterModels}
88
+ modelProvider={model.currentModelProvider}
84
89
  allModelsLoading={model.openRouterModelsLoading}
85
90
  allModelsUpdatedAt={model.openRouterModelsUpdatedAt}
86
91
  currentModelId={model.currentModelId}
@@ -90,7 +95,7 @@ function AppOverlaysImpl({ conversationHistory, currentContentBlocks }: AppOverl
90
95
  />
91
96
  )}
92
97
 
93
- {menus.showProviderMenu && (
98
+ {menus.showProviderMenu && model.currentModelProvider === "openrouter" && (
94
99
  <ProviderMenu
95
100
  items={model.providerMenuItems}
96
101
  currentProviderTag={model.currentOpenRouterProviderTag}
@@ -141,10 +146,13 @@ function AppOverlaysImpl({ conversationHistory, currentContentBlocks }: AppOverl
141
146
  currentDevice={device.currentDevice}
142
147
  currentOutputDevice={device.currentOutputDevice}
143
148
  models={model.curatedModels}
149
+ currentModelProvider={model.currentModelProvider}
150
+ copilotAuthenticated={onboarding.copilotAuthenticated}
144
151
  currentModelId={model.currentModelId}
145
152
  deviceLoadTimedOut={device.deviceLoadTimedOut}
146
153
  soxAvailable={device.soxAvailable}
147
154
  soxInstallHint={device.soxInstallHint}
155
+ setCurrentModelProvider={model.setCurrentModelProvider}
148
156
  setCurrentDevice={device.setCurrentDevice}
149
157
  setCurrentOutputDevice={device.setCurrentOutputDevice}
150
158
  setCurrentModelId={model.setCurrentModelId}
@@ -12,7 +12,7 @@ import { InlineStatusIndicator } from "../../components/InlineStatusIndicator";
12
12
  import { StatusBar } from "../../components/StatusBar";
13
13
  import { TokenUsageDisplay } from "../../components/TokenUsageDisplay";
14
14
  import { TypingInputBar } from "../../components/TypingInputBar";
15
- import type { ContentBlock, ConversationMessage, TokenUsage } from "../../types";
15
+ import type { ContentBlock, ConversationMessage, LlmProvider, TokenUsage } from "../../types";
16
16
  import { DaemonState } from "../../types";
17
17
  import { COLORS, REASONING_MARKDOWN_STYLE } from "../../ui/constants";
18
18
  import { renderReasoningTicker } from "../../ui/reasoning-ticker";
@@ -68,6 +68,7 @@ export interface ConversationPaneProps {
68
68
  typing: TypingInputState;
69
69
  sessionUsage: TokenUsage;
70
70
  modelMetadata: ModelMetadata | null;
71
+ currentModelProvider: LlmProvider;
71
72
  hasInteracted: boolean;
72
73
  suppressStatusBar?: boolean;
73
74
  frostColor: string;
@@ -89,6 +90,7 @@ function ConversationPaneImpl(props: ConversationPaneProps) {
89
90
  typing,
90
91
  sessionUsage,
91
92
  modelMetadata,
93
+ currentModelProvider,
92
94
  hasInteracted,
93
95
  suppressStatusBar = false,
94
96
  frostColor,
@@ -246,7 +248,11 @@ function ConversationPaneImpl(props: ConversationPaneProps) {
246
248
  (sessionUsage.totalTokens > 0 ||
247
249
  (sessionUsage.subagentTotalTokens ?? 0) > 0 ||
248
250
  typeof sessionUsage.cost === "number") && (
249
- <TokenUsageDisplay usage={sessionUsage} modelMetadata={modelMetadata} />
251
+ <TokenUsageDisplay
252
+ usage={sessionUsage}
253
+ modelMetadata={modelMetadata}
254
+ hideCost={currentModelProvider === "copilot"}
255
+ />
250
256
  )}
251
257
 
252
258
  {hasInteracted && resetNotification && (