@townco/ui 0.1.19 → 0.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,6 +50,11 @@ export declare function useChatMessages(client: AcpClient | null): {
50
50
  totalTokens?: number | undefined;
51
51
  } | undefined;
52
52
  }[] | undefined;
53
+ tokenUsage?: {
54
+ inputTokens?: number | undefined;
55
+ outputTokens?: number | undefined;
56
+ totalTokens?: number | undefined;
57
+ } | undefined;
53
58
  }[];
54
59
  isStreaming: boolean;
55
60
  sendMessage: (content: string) => Promise<void>;
@@ -73,12 +73,16 @@ export function useChatMessages(client) {
73
73
  // Listen for streaming chunks
74
74
  let accumulatedContent = "";
75
75
  for await (const chunk of messageStream) {
76
+ if (chunk.tokenUsage) {
77
+ console.error("DEBUG use-chat-messages: chunk.tokenUsage:", JSON.stringify(chunk.tokenUsage));
78
+ }
76
79
  if (chunk.isComplete) {
77
80
  // Update final message
78
81
  updateMessage(assistantMessageId, {
79
82
  content: accumulatedContent,
80
83
  isStreaming: false,
81
84
  streamingStartTime: undefined, // Clear streaming start time
85
+ ...(chunk.tokenUsage ? { tokenUsage: chunk.tokenUsage } : {}),
82
86
  });
83
87
  setIsStreaming(false);
84
88
  setStreamingStartTime(null); // Clear global streaming start time
@@ -90,6 +94,7 @@ export function useChatMessages(client) {
90
94
  accumulatedContent += chunk.contentDelta.text;
91
95
  updateMessage(assistantMessageId, {
92
96
  content: accumulatedContent,
97
+ ...(chunk.tokenUsage ? { tokenUsage: chunk.tokenUsage } : {}),
93
98
  });
94
99
  // Small delay to allow Ink to render between chunks (~60fps)
95
100
  await new Promise((resolve) => setTimeout(resolve, 16));
@@ -10,6 +10,7 @@ export function useChatSession(client) {
10
10
  const setSessionId = useChatStore((state) => state.setSessionId);
11
11
  const setError = useChatStore((state) => state.setError);
12
12
  const clearMessages = useChatStore((state) => state.clearMessages);
13
+ const resetTokens = useChatStore((state) => state.resetTokens);
13
14
  /**
14
15
  * Connect to the agent
15
16
  */
@@ -43,12 +44,13 @@ export function useChatSession(client) {
43
44
  const id = await client.startSession();
44
45
  setSessionId(id);
45
46
  clearMessages();
47
+ resetTokens();
46
48
  }
47
49
  catch (error) {
48
50
  const message = error instanceof Error ? error.message : String(error);
49
51
  setError(message);
50
52
  }
51
- }, [client, setSessionId, setError, clearMessages]);
53
+ }, [client, setSessionId, setError, clearMessages, resetTokens]);
52
54
  /**
53
55
  * Disconnect from the agent
54
56
  */
@@ -72,6 +72,11 @@ export declare const DisplayMessage: z.ZodObject<{
72
72
  totalTokens: z.ZodOptional<z.ZodNumber>;
73
73
  }, z.core.$strip>>;
74
74
  }, z.core.$strip>>>;
75
+ tokenUsage: z.ZodOptional<z.ZodObject<{
76
+ inputTokens: z.ZodOptional<z.ZodNumber>;
77
+ outputTokens: z.ZodOptional<z.ZodNumber>;
78
+ totalTokens: z.ZodOptional<z.ZodNumber>;
79
+ }, z.core.$strip>>;
75
80
  }, z.core.$strip>;
76
81
  export type DisplayMessage = z.infer<typeof DisplayMessage>;
77
82
  /**
@@ -162,6 +167,11 @@ export declare const ChatSessionState: z.ZodObject<{
162
167
  totalTokens: z.ZodOptional<z.ZodNumber>;
163
168
  }, z.core.$strip>>;
164
169
  }, z.core.$strip>>>;
170
+ tokenUsage: z.ZodOptional<z.ZodObject<{
171
+ inputTokens: z.ZodOptional<z.ZodNumber>;
172
+ outputTokens: z.ZodOptional<z.ZodNumber>;
173
+ totalTokens: z.ZodOptional<z.ZodNumber>;
174
+ }, z.core.$strip>>;
165
175
  }, z.core.$strip>>;
166
176
  input: z.ZodObject<{
167
177
  value: z.ZodString;
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { ToolCallSchema } from "./tool-call.js";
2
+ import { TokenUsageSchema, ToolCallSchema } from "./tool-call.js";
3
3
  /**
4
4
  * Chat UI state schemas
5
5
  */
@@ -15,6 +15,7 @@ export const DisplayMessage = z.object({
15
15
  streamingStartTime: z.number().optional(), // Unix timestamp when streaming started
16
16
  metadata: z.record(z.string(), z.unknown()).optional(),
17
17
  toolCalls: z.array(ToolCallSchema).optional(),
18
+ tokenUsage: TokenUsageSchema.optional(), // Token usage for this message
18
19
  });
19
20
  /**
20
21
  * Input state schema
@@ -11,6 +11,18 @@ export interface ChatStore {
11
11
  isStreaming: boolean;
12
12
  streamingStartTime: number | null;
13
13
  toolCalls: Record<string, ToolCall[]>;
14
+ totalBilled: {
15
+ inputTokens: number;
16
+ outputTokens: number;
17
+ totalTokens: number;
18
+ };
19
+ currentContext: {
20
+ inputTokens: number;
21
+ outputTokens: number;
22
+ totalTokens: number;
23
+ };
24
+ currentModel: string | null;
25
+ tokenDisplayMode: "context" | "input" | "output";
14
26
  input: InputState;
15
27
  setConnectionStatus: (status: ConnectionStatus) => void;
16
28
  setSessionId: (id: string | null) => void;
@@ -29,6 +41,14 @@ export interface ChatStore {
29
41
  addFileAttachment: (file: InputState["attachedFiles"][number]) => void;
30
42
  removeFileAttachment: (index: number) => void;
31
43
  clearInput: () => void;
44
+ addTokenUsage: (tokenUsage: {
45
+ inputTokens?: number;
46
+ outputTokens?: number;
47
+ totalTokens?: number;
48
+ }) => void;
49
+ setCurrentModel: (model: string) => void;
50
+ resetTokens: () => void;
51
+ cycleTokenDisplayMode: () => void;
32
52
  }
33
53
  /**
34
54
  * Create chat store
@@ -12,6 +12,18 @@ export const useChatStore = create((set) => ({
12
12
  isStreaming: false,
13
13
  streamingStartTime: null,
14
14
  toolCalls: {},
15
+ totalBilled: {
16
+ inputTokens: 0,
17
+ outputTokens: 0,
18
+ totalTokens: 0,
19
+ },
20
+ currentContext: {
21
+ inputTokens: 0,
22
+ outputTokens: 0,
23
+ totalTokens: 0,
24
+ },
25
+ currentModel: "claude-sonnet-4-5-20250929", // Default model, TODO: get from server
26
+ tokenDisplayMode: "context", // Default to showing context (both billed and current)
15
27
  input: {
16
28
  value: "",
17
29
  isSubmitting: false,
@@ -24,9 +36,103 @@ export const useChatStore = create((set) => ({
24
36
  addMessage: (message) => set((state) => ({
25
37
  messages: [...state.messages, message],
26
38
  })),
27
- updateMessage: (id, updates) => set((state) => ({
28
- messages: state.messages.map((msg) => msg.id === id ? { ...msg, ...updates } : msg),
29
- })),
39
+ updateMessage: (id, updates) => set((state) => {
40
+ const existingMessage = state.messages.find((msg) => msg.id === id);
41
+ // If token usage was updated, we need to handle it carefully:
42
+ // 1. LangChain sends multiple updates for the SAME message (take max for that message)
43
+ // 2. We track TWO metrics: totalBilled (sum all) and currentContext (latest input + sum outputs)
44
+ let newTotalBilled = state.totalBilled;
45
+ let newCurrentContext = state.currentContext;
46
+ let finalUpdates = updates;
47
+ if (updates.tokenUsage) {
48
+ const existingTokenUsage = existingMessage?.tokenUsage;
49
+ console.error("DEBUG updateMessage: incoming tokenUsage:", JSON.stringify(updates.tokenUsage), "existing:", JSON.stringify(existingTokenUsage));
50
+ // LangChain sends multiple token updates:
51
+ // 1. Early chunk: inputTokens (context) + outputTokens (estimate) + totalTokens
52
+ // 2. Later chunk: inputTokens=0 + outputTokens (final) + totalTokens (just output)
53
+ // Strategy: Take max for input (keeps context), max for output (gets final count)
54
+ const messageMaxTokens = {
55
+ inputTokens: Math.max(updates.tokenUsage.inputTokens ?? 0, existingTokenUsage?.inputTokens ?? 0),
56
+ outputTokens: Math.max(updates.tokenUsage.outputTokens ?? 0, existingTokenUsage?.outputTokens ?? 0),
57
+ // Total should be input + output (not max, since chunks are partial)
58
+ totalTokens: Math.max(updates.tokenUsage.inputTokens ?? 0, existingTokenUsage?.inputTokens ?? 0) +
59
+ Math.max(updates.tokenUsage.outputTokens ?? 0, existingTokenUsage?.outputTokens ?? 0),
60
+ };
61
+ console.error("DEBUG updateMessage: merged tokenUsage:", JSON.stringify(messageMaxTokens));
62
+ // Replace the tokenUsage in updates with the max values
63
+ finalUpdates = {
64
+ ...updates,
65
+ tokenUsage: messageMaxTokens,
66
+ };
67
+ // Calculate the delta from what was previously counted for this message
68
+ const inputDelta = messageMaxTokens.inputTokens - (existingTokenUsage?.inputTokens ?? 0);
69
+ const outputDelta = messageMaxTokens.outputTokens -
70
+ (existingTokenUsage?.outputTokens ?? 0);
71
+ const totalDelta = messageMaxTokens.totalTokens - (existingTokenUsage?.totalTokens ?? 0);
72
+ // Update Total Billed (sum ALL tokens from every API call)
73
+ newTotalBilled = {
74
+ inputTokens: state.totalBilled.inputTokens + inputDelta,
75
+ outputTokens: state.totalBilled.outputTokens + outputDelta,
76
+ totalTokens: state.totalBilled.totalTokens + totalDelta,
77
+ };
78
+ // Update Current Context (latest input + sum of outputs)
79
+ // Note: We use the latest totalTokens from LangChain rather than calculating,
80
+ // because inputTokens + outputTokens doesn't always equal totalTokens
81
+ newCurrentContext = {
82
+ inputTokens: messageMaxTokens.inputTokens, // Latest context size
83
+ outputTokens: state.currentContext.outputTokens + outputDelta, // Sum all outputs
84
+ totalTokens: messageMaxTokens.totalTokens, // Use LangChain's reported total
85
+ };
86
+ }
87
+ // Apply the updates (using finalUpdates which has the max tokenUsage)
88
+ const messages = state.messages.map((msg) => msg.id === id ? { ...msg, ...finalUpdates } : msg);
89
+ // Verification logging (only if tokenUsage was updated)
90
+ if (updates.tokenUsage) {
91
+ const existingTokenUsage = existingMessage?.tokenUsage;
92
+ const messageMaxTokens = finalUpdates.tokenUsage;
93
+ const inputDelta = (messageMaxTokens.inputTokens ?? 0) -
94
+ (existingTokenUsage?.inputTokens ?? 0);
95
+ const outputDelta = (messageMaxTokens.outputTokens ?? 0) -
96
+ (existingTokenUsage?.outputTokens ?? 0);
97
+ const totalDelta = (messageMaxTokens.totalTokens ?? 0) -
98
+ (existingTokenUsage?.totalTokens ?? 0);
99
+ // Calculate actual sum from updated messages
100
+ const messageTokenBreakdown = messages
101
+ .filter((msg) => msg.tokenUsage)
102
+ .map((msg) => ({
103
+ id: msg.id,
104
+ role: msg.role,
105
+ tokens: msg.tokenUsage,
106
+ }));
107
+ const actualSum = messages.reduce((sum, msg) => ({
108
+ inputTokens: sum.inputTokens + (msg.tokenUsage?.inputTokens ?? 0),
109
+ outputTokens: sum.outputTokens + (msg.tokenUsage?.outputTokens ?? 0),
110
+ totalTokens: sum.totalTokens + (msg.tokenUsage?.totalTokens ?? 0),
111
+ }), { inputTokens: 0, outputTokens: 0, totalTokens: 0 });
112
+ const isBilledCorrect = actualSum.inputTokens === newTotalBilled.inputTokens &&
113
+ actualSum.outputTokens === newTotalBilled.outputTokens &&
114
+ actualSum.totalTokens === newTotalBilled.totalTokens;
115
+ console.error("DEBUG updateMessage: tokenUsage update", JSON.stringify({
116
+ messageId: id,
117
+ updates: updates.tokenUsage,
118
+ existing: existingTokenUsage,
119
+ messageMax: messageMaxTokens,
120
+ delta: { inputDelta, outputDelta, totalDelta },
121
+ totalBilled: newTotalBilled,
122
+ currentContext: newCurrentContext,
123
+ actualSum,
124
+ billedCorrect: isBilledCorrect ? "✅" : "❌",
125
+ messageCount: messages.length,
126
+ messagesWithTokens: messageTokenBreakdown.length,
127
+ breakdown: messageTokenBreakdown,
128
+ }));
129
+ }
130
+ return {
131
+ messages,
132
+ totalBilled: newTotalBilled,
133
+ currentContext: newCurrentContext,
134
+ };
135
+ }),
30
136
  clearMessages: () => set({ messages: [] }),
31
137
  setIsStreaming: (streaming) => set({ isStreaming: streaming }),
32
138
  setStreamingStartTime: (time) => set({ streamingStartTime: time }),
@@ -126,4 +232,43 @@ export const useChatStore = create((set) => ({
126
232
  attachedFiles: [],
127
233
  },
128
234
  })),
235
+ addTokenUsage: (tokenUsage) => set((state) => ({
236
+ totalBilled: {
237
+ inputTokens: state.totalBilled.inputTokens + (tokenUsage.inputTokens ?? 0),
238
+ outputTokens: state.totalBilled.outputTokens + (tokenUsage.outputTokens ?? 0),
239
+ totalTokens: state.totalBilled.totalTokens + (tokenUsage.totalTokens ?? 0),
240
+ },
241
+ currentContext: {
242
+ inputTokens: tokenUsage.inputTokens ?? state.currentContext.inputTokens,
243
+ outputTokens: state.currentContext.outputTokens + (tokenUsage.outputTokens ?? 0),
244
+ totalTokens: (tokenUsage.inputTokens ?? state.currentContext.inputTokens) +
245
+ (state.currentContext.outputTokens + (tokenUsage.outputTokens ?? 0)),
246
+ },
247
+ })),
248
+ setCurrentModel: (model) => set({ currentModel: model }),
249
+ resetTokens: () => set({
250
+ totalBilled: {
251
+ inputTokens: 0,
252
+ outputTokens: 0,
253
+ totalTokens: 0,
254
+ },
255
+ currentContext: {
256
+ inputTokens: 0,
257
+ outputTokens: 0,
258
+ totalTokens: 0,
259
+ },
260
+ }),
261
+ cycleTokenDisplayMode: () => set((state) => {
262
+ const modes = [
263
+ "context",
264
+ "input",
265
+ "output",
266
+ ];
267
+ const currentIndex = modes.indexOf(state.tokenDisplayMode);
268
+ const nextIndex = (currentIndex + 1) % modes.length;
269
+ const nextMode = modes[nextIndex];
270
+ if (!nextMode)
271
+ return state; // Should never happen, but satisfies TypeScript
272
+ return { tokenDisplayMode: nextMode };
273
+ }),
129
274
  }));
@@ -0,0 +1,25 @@
1
+ /**
2
+ * Model context window sizes in tokens
3
+ */
4
+ export declare const MODEL_CONTEXT_WINDOWS: Record<string, number>;
5
+ /**
6
+ * Get the context window size for a given model
7
+ * @param model - The model name/ID
8
+ * @returns The context window size in tokens
9
+ */
10
+ export declare function getModelContextWindow(model: string | undefined): number;
11
+ /**
12
+ * Calculate token usage percentage
13
+ * @param tokens - Number of tokens used
14
+ * @param model - The model name/ID
15
+ * @returns Percentage of context window used (0-100)
16
+ */
17
+ export declare function calculateTokenPercentage(tokens: number, model: string | undefined): number;
18
+ /**
19
+ * Format token usage as percentage string
20
+ * @param tokens - Number of tokens used
21
+ * @param model - The model name/ID
22
+ * @param decimals - Number of decimal places (default: 1)
23
+ * @returns Formatted percentage string (e.g., "12.5%")
24
+ */
25
+ export declare function formatTokenPercentage(tokens: number, model: string | undefined, decimals?: number): string;
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Default context window size for unknown models
3
+ */
4
+ const DEFAULT_CONTEXT_WINDOW = 128_000;
5
+ /**
6
+ * Model context window sizes in tokens
7
+ */
8
+ export const MODEL_CONTEXT_WINDOWS = {
9
+ // Claude models
10
+ "claude-sonnet-4-5-20250929": 200_000,
11
+ "claude-sonnet-4-20250514": 200_000,
12
+ "claude-3-5-sonnet-20241022": 200_000,
13
+ "claude-3-5-sonnet-20240620": 200_000,
14
+ "claude-3-opus-20240229": 200_000,
15
+ "claude-3-sonnet-20240229": 200_000,
16
+ "claude-3-haiku-20240307": 200_000,
17
+ // OpenAI GPT-4 models
18
+ "gpt-4-turbo": 128_000,
19
+ "gpt-4-turbo-preview": 128_000,
20
+ "gpt-4-0125-preview": 128_000,
21
+ "gpt-4-1106-preview": 128_000,
22
+ "gpt-4": 8_192,
23
+ "gpt-4-32k": 32_768,
24
+ // OpenAI GPT-3.5 models
25
+ "gpt-3.5-turbo": 16_385,
26
+ "gpt-3.5-turbo-16k": 16_385,
27
+ // Default fallback
28
+ default: DEFAULT_CONTEXT_WINDOW,
29
+ };
30
+ /**
31
+ * Get the context window size for a given model
32
+ * @param model - The model name/ID
33
+ * @returns The context window size in tokens
34
+ */
35
+ export function getModelContextWindow(model) {
36
+ if (!model) {
37
+ return DEFAULT_CONTEXT_WINDOW;
38
+ }
39
+ // Direct match
40
+ if (model in MODEL_CONTEXT_WINDOWS) {
41
+ return MODEL_CONTEXT_WINDOWS[model] ?? DEFAULT_CONTEXT_WINDOW;
42
+ }
43
+ // Partial match for model families
44
+ const normalizedModel = model.toLowerCase();
45
+ if (normalizedModel.includes("claude")) {
46
+ return 200_000;
47
+ }
48
+ if (normalizedModel.includes("gpt-4-turbo")) {
49
+ return 128_000;
50
+ }
51
+ if (normalizedModel.includes("gpt-4-32k")) {
52
+ return 32_768;
53
+ }
54
+ if (normalizedModel.includes("gpt-4")) {
55
+ return 8_192;
56
+ }
57
+ if (normalizedModel.includes("gpt-3.5")) {
58
+ return 16_385;
59
+ }
60
+ return DEFAULT_CONTEXT_WINDOW;
61
+ }
62
+ /**
63
+ * Calculate token usage percentage
64
+ * @param tokens - Number of tokens used
65
+ * @param model - The model name/ID
66
+ * @returns Percentage of context window used (0-100)
67
+ */
68
+ export function calculateTokenPercentage(tokens, model) {
69
+ const contextWindow = getModelContextWindow(model);
70
+ return (tokens / contextWindow) * 100;
71
+ }
72
+ /**
73
+ * Format token usage as percentage string
74
+ * @param tokens - Number of tokens used
75
+ * @param model - The model name/ID
76
+ * @param decimals - Number of decimal places (default: 1)
77
+ * @returns Formatted percentage string (e.g., "12.5%")
78
+ */
79
+ export function formatTokenPercentage(tokens, model, decimals = 1) {
80
+ const percentage = calculateTokenPercentage(tokens, model);
81
+ return `${percentage.toFixed(decimals)}%`;
82
+ }
@@ -1,7 +1,7 @@
1
1
  import { type VariantProps } from "class-variance-authority";
2
2
  import * as React from "react";
3
3
  declare const buttonVariants: (props?: ({
4
- variant?: "default" | "link" | "destructive" | "outline" | "secondary" | "ghost" | null | undefined;
4
+ variant?: "default" | "destructive" | "outline" | "secondary" | "ghost" | "link" | null | undefined;
5
5
  size?: "default" | "sm" | "lg" | "icon" | null | undefined;
6
6
  } & import("class-variance-authority/types").ClassProp) | undefined) => string;
7
7
  export interface ButtonProps extends React.ButtonHTMLAttributes<HTMLButtonElement>, VariantProps<typeof buttonVariants> {
@@ -3,6 +3,7 @@ import { cva } from "class-variance-authority";
3
3
  import { Loader2Icon } from "lucide-react";
4
4
  import * as React from "react";
5
5
  import { useChatStore } from "../../core/store/chat-store.js";
6
+ import { formatTokenPercentage } from "../../core/utils/model-context.js";
6
7
  import { cn } from "../lib/utils.js";
7
8
  import { Reasoning } from "./Reasoning.js";
8
9
  import { Response } from "./Response.js";
@@ -82,8 +83,9 @@ const messageContentVariants = cva("w-full px-4 py-3 rounded-xl text-[var(--font
82
83
  },
83
84
  });
84
85
  export const MessageContent = React.forwardRef(({ role: roleProp, variant, isStreaming: isStreamingProp, message, thinkingDisplayStyle = "collapsible", className, children, ...props }, ref) => {
85
- // Get streaming start time from store (when using smart rendering)
86
+ // Get streaming start time and current model from store
86
87
  const streamingStartTime = useChatStore((state) => state.streamingStartTime);
88
+ const currentModel = useChatStore((state) => state.currentModel);
87
89
  // Use smart rendering if message is provided and no custom children
88
90
  const useSmartRendering = message && !children;
89
91
  // Derive props from message if using smart rendering
@@ -100,7 +102,7 @@ export const MessageContent = React.forwardRef(({ role: roleProp, variant, isStr
100
102
  const isWaiting = message.isStreaming && !message.content && message.role === "assistant";
101
103
  content = (_jsxs(_Fragment, { children: [message.role === "assistant" && hasThinking && (_jsx(Reasoning, { content: thinking, isStreaming: message.isStreaming, mode: thinkingDisplayStyle, autoCollapse: true })), isWaiting && streamingStartTime && (_jsxs("div", { className: "flex items-center gap-2 opacity-50", children: [_jsx(Loader2Icon, { className: "size-4 animate-spin text-muted-foreground" }), _jsx(WaitingElapsedTime, { startTime: streamingStartTime })] })), message.role === "assistant" &&
102
104
  message.toolCalls &&
103
- message.toolCalls.length > 0 && (_jsx("div", { className: "flex flex-col gap-2 mb-3", children: message.toolCalls.map((toolCall) => (_jsx(ToolCall, { toolCall: toolCall }, toolCall.id))) })), message.role === "user" ? (_jsx("div", { className: "whitespace-pre-wrap", children: message.content })) : (_jsx(Response, { content: message.content, isStreaming: message.isStreaming, showEmpty: false }))] }));
105
+ message.toolCalls.length > 0 && (_jsx("div", { className: "flex flex-col gap-2 mb-3", children: message.toolCalls.map((toolCall) => (_jsx(ToolCall, { toolCall: toolCall }, toolCall.id))) })), message.role === "user" ? (_jsx("div", { className: "whitespace-pre-wrap", children: message.content })) : (_jsx(Response, { content: message.content, isStreaming: message.isStreaming, showEmpty: false })), message.role === "assistant" && message.tokenUsage && (_jsx("div", { className: "mt-3 pt-2 border-t border-border/30 text-xs text-muted-foreground/60", children: _jsxs("span", { children: ["Context:", " ", formatTokenPercentage(message.tokenUsage.totalTokens ?? 0, currentModel ?? undefined), " ", "(", (message.tokenUsage.totalTokens ?? 0).toLocaleString(), " ", "tokens)"] }) }))] }));
104
106
  }
105
107
  return (_jsx("div", { ref: ref, className: cn(messageContentVariants({ role, variant }), isStreaming && "animate-pulse-subtle", className), ...props, children: content }));
106
108
  });
@@ -191,5 +191,10 @@ export declare const MessageChunk: z.ZodObject<{
191
191
  error: z.ZodOptional<z.ZodString>;
192
192
  }, z.core.$strip>], "type">;
193
193
  isComplete: z.ZodBoolean;
194
+ tokenUsage: z.ZodOptional<z.ZodObject<{
195
+ inputTokens: z.ZodOptional<z.ZodNumber>;
196
+ outputTokens: z.ZodOptional<z.ZodNumber>;
197
+ totalTokens: z.ZodOptional<z.ZodNumber>;
198
+ }, z.core.$strip>>;
194
199
  }, z.core.$strip>;
195
200
  export type MessageChunk = z.infer<typeof MessageChunk>;
@@ -92,4 +92,11 @@ export const MessageChunk = z.object({
92
92
  role: MessageRole,
93
93
  contentDelta: Content,
94
94
  isComplete: z.boolean(),
95
+ tokenUsage: z
96
+ .object({
97
+ inputTokens: z.number().optional(),
98
+ outputTokens: z.number().optional(),
99
+ totalTokens: z.number().optional(),
100
+ })
101
+ .optional(),
95
102
  });
@@ -1,4 +1,6 @@
1
1
  import * as acp from "@agentclientprotocol/sdk";
2
+ import { createLogger } from "../../core/lib/logger.js";
3
+ const logger = createLogger("http-transport");
2
4
  /**
3
5
  * HTTP transport implementation using ACP over HTTP + SSE
4
6
  * Uses POST /rpc for client->agent messages and GET /events (SSE) for agent->client
@@ -39,7 +41,7 @@ export class HttpTransport {
39
41
  },
40
42
  };
41
43
  const initResponse = await this.sendRpcRequest("initialize", initRequest);
42
- console.log("ACP connection initialized:", initResponse);
44
+ logger.info("ACP connection initialized", { initResponse });
43
45
  // Step 2: Create a new session
44
46
  const sessionRequest = {
45
47
  cwd: "/",
@@ -47,7 +49,7 @@ export class HttpTransport {
47
49
  };
48
50
  const sessionResponse = await this.sendRpcRequest("session/new", sessionRequest);
49
51
  this.currentSessionId = sessionResponse.sessionId;
50
- console.log("Session created:", this.currentSessionId);
52
+ logger.info("Session created", { sessionId: this.currentSessionId });
51
53
  // Step 3: Open SSE connection for receiving messages
52
54
  await this.connectSSE();
53
55
  this.connected = true;
@@ -115,7 +117,7 @@ export class HttpTransport {
115
117
  };
116
118
  // Send the prompt - this will trigger streaming responses via SSE
117
119
  const promptResponse = await this.sendRpcRequest("session/prompt", promptRequest);
118
- console.log("Prompt sent:", promptResponse);
120
+ logger.debug("Prompt sent", { promptResponse });
119
121
  // Mark stream as complete after prompt finishes
120
122
  this.streamComplete = true;
121
123
  // Send completion chunk
@@ -274,7 +276,7 @@ export class HttpTransport {
274
276
  if (!response.body) {
275
277
  throw new Error("Response body is null");
276
278
  }
277
- console.log("SSE connection opened");
279
+ logger.debug("SSE connection opened");
278
280
  this.reconnectAttempts = 0;
279
281
  this.reconnectDelay = 1000;
280
282
  // Read the SSE stream
@@ -287,7 +289,7 @@ export class HttpTransport {
287
289
  while (true) {
288
290
  const { done, value } = await reader.read();
289
291
  if (done) {
290
- console.log("SSE stream closed by server");
292
+ logger.debug("SSE stream closed by server");
291
293
  if (this.connected) {
292
294
  await this.handleSSEDisconnect();
293
295
  }
@@ -319,10 +321,10 @@ export class HttpTransport {
319
321
  }
320
322
  catch (error) {
321
323
  if (error instanceof Error && error.name === "AbortError") {
322
- console.log("SSE stream aborted");
324
+ logger.debug("SSE stream aborted");
323
325
  return;
324
326
  }
325
- console.error("Error reading SSE stream:", error);
327
+ logger.error("Error reading SSE stream", { error });
326
328
  if (this.connected && !this.reconnecting) {
327
329
  await this.handleSSEDisconnect();
328
330
  }
@@ -330,7 +332,7 @@ export class HttpTransport {
330
332
  })();
331
333
  }
332
334
  catch (error) {
333
- console.error("SSE connection error:", error);
335
+ logger.error("SSE connection error", { error });
334
336
  throw error;
335
337
  }
336
338
  }
@@ -356,15 +358,19 @@ export class HttpTransport {
356
358
  }
357
359
  this.reconnectAttempts++;
358
360
  const delay = Math.min(this.reconnectDelay * 2 ** (this.reconnectAttempts - 1), 32000);
359
- console.log(`SSE reconnecting in ${delay}ms (attempt ${this.reconnectAttempts}/${this.maxReconnectAttempts})`);
361
+ logger.debug("SSE reconnecting", {
362
+ delay,
363
+ attempt: this.reconnectAttempts,
364
+ maxAttempts: this.maxReconnectAttempts,
365
+ });
360
366
  await new Promise((resolve) => setTimeout(resolve, delay));
361
367
  try {
362
368
  await this.connectSSE();
363
- console.log("SSE reconnected successfully");
369
+ logger.info("SSE reconnected successfully");
364
370
  this.reconnecting = false;
365
371
  }
366
372
  catch (error) {
367
- console.error("SSE reconnection failed:", error);
373
+ logger.error("SSE reconnection failed", { error });
368
374
  this.reconnecting = false;
369
375
  // Will try again on next error event
370
376
  }
@@ -375,27 +381,33 @@ export class HttpTransport {
375
381
  handleSSEMessage(data) {
376
382
  try {
377
383
  const message = JSON.parse(data);
378
- console.log("[HTTP Transport] Received SSE message:", message);
384
+ logger.debug("Received SSE message", { message });
379
385
  // Validate the message is an ACP agent outgoing message
380
386
  const parseResult = acp.agentOutgoingMessageSchema.safeParse(message);
381
387
  if (!parseResult.success) {
382
- console.error("Invalid ACP message from SSE:", parseResult.error.issues);
388
+ logger.error("Invalid ACP message from SSE", {
389
+ issues: parseResult.error.issues,
390
+ });
383
391
  return;
384
392
  }
385
393
  const acpMessage = parseResult.data;
386
- console.log("[HTTP Transport] Parsed ACP message, method:", "method" in acpMessage ? acpMessage.method : "(no method)");
394
+ logger.debug("Parsed ACP message", {
395
+ method: "method" in acpMessage ? acpMessage.method : "(no method)",
396
+ });
387
397
  // Check if this is a notification (has method but not a response)
388
398
  if ("method" in acpMessage && acpMessage.method === "session/update") {
389
- console.log("[HTTP Transport] This is a session/update notification");
399
+ logger.debug("Received session/update notification");
390
400
  // Type narrowing: we know it has method and params
391
401
  if ("params" in acpMessage && acpMessage.params) {
392
- console.log("[HTTP Transport] Calling handleSessionNotification with params:", acpMessage.params);
402
+ logger.debug("Calling handleSessionNotification", {
403
+ params: acpMessage.params,
404
+ });
393
405
  this.handleSessionNotification(acpMessage.params);
394
406
  }
395
407
  }
396
408
  }
397
409
  catch (error) {
398
- console.error("Error parsing SSE message:", error);
410
+ logger.error("Error parsing SSE message", { error });
399
411
  this.notifyError(error instanceof Error ? error : new Error(String(error)));
400
412
  }
401
413
  }
@@ -403,15 +415,19 @@ export class HttpTransport {
403
415
  * Handle a session notification from the agent
404
416
  */
405
417
  handleSessionNotification(params) {
406
- console.log("[HTTP Transport] handleSessionNotification called with:", params);
418
+ logger.debug("handleSessionNotification called", { params });
407
419
  // Extract content from the update
408
420
  const paramsExtended = params;
409
421
  const update = paramsExtended.update;
410
422
  const sessionId = this.currentSessionId || params.sessionId;
411
- console.log("[HTTP Transport] update.sessionUpdate type:", update?.sessionUpdate);
423
+ logger.debug("Update session type", {
424
+ sessionUpdate: update?.sessionUpdate,
425
+ });
412
426
  // Handle ACP tool call notifications
413
427
  if (update?.sessionUpdate === "tool_call") {
414
- console.log("[HTTP Transport] tool_call - tokenUsage:", update.tokenUsage);
428
+ logger.debug("Tool call notification", {
429
+ tokenUsage: update.tokenUsage,
430
+ });
415
431
  // Extract messageId from _meta
416
432
  const messageId = update._meta &&
417
433
  typeof update._meta === "object" &&
@@ -556,7 +572,9 @@ export class HttpTransport {
556
572
  toolCallUpdate: toolCallUpdate,
557
573
  messageId,
558
574
  };
559
- console.log("[HTTP Transport] Notifying tool_call_update session update:", sessionUpdate);
575
+ logger.debug("Notifying tool_call_update session update", {
576
+ sessionUpdate,
577
+ });
560
578
  this.notifySessionUpdate(sessionUpdate);
561
579
  }
562
580
  else if (update &&
@@ -629,7 +647,9 @@ export class HttpTransport {
629
647
  toolCallUpdate: toolOutput,
630
648
  messageId,
631
649
  };
632
- console.log("[HTTP Transport] Notifying tool_output as tool_call_update:", sessionUpdate);
650
+ logger.debug("Notifying tool_output as tool_call_update", {
651
+ sessionUpdate,
652
+ });
633
653
  this.notifySessionUpdate(sessionUpdate);
634
654
  }
635
655
  else if (update?.sessionUpdate === "agent_message_chunk") {
@@ -639,6 +659,15 @@ export class HttpTransport {
639
659
  sessionId,
640
660
  status: "active",
641
661
  };
662
+ // Extract token usage from _meta if present
663
+ const tokenUsage = update._meta &&
664
+ typeof update._meta === "object" &&
665
+ "tokenUsage" in update._meta
666
+ ? update._meta.tokenUsage
667
+ : undefined;
668
+ logger.debug("Agent message chunk", {
669
+ tokenUsage,
670
+ });
642
671
  // Queue message chunks if present
643
672
  // For agent_message_chunk, content is an object, not an array
644
673
  const content = update.content;
@@ -650,6 +679,7 @@ export class HttpTransport {
650
679
  id: params.sessionId,
651
680
  role: "assistant",
652
681
  contentDelta: { type: "text", text: contentObj.text },
682
+ tokenUsage,
653
683
  isComplete: false,
654
684
  };
655
685
  }
@@ -692,7 +722,7 @@ export class HttpTransport {
692
722
  callback(update);
693
723
  }
694
724
  catch (error) {
695
- console.error("Error in session update callback:", error);
725
+ logger.error("Error in session update callback", { error });
696
726
  }
697
727
  }
698
728
  }
@@ -705,7 +735,7 @@ export class HttpTransport {
705
735
  callback(error);
706
736
  }
707
737
  catch (err) {
708
- console.error("Error in error callback:", err);
738
+ logger.error("Error in error callback", { error: err });
709
739
  }
710
740
  }
711
741
  }
@@ -1,6 +1,8 @@
1
1
  import { spawn } from "node:child_process";
2
2
  import { Readable, Writable } from "node:stream";
3
3
  import { ClientSideConnection, ndJsonStream, PROTOCOL_VERSION, } from "@agentclientprotocol/sdk";
4
+ import { createLogger } from "../../core/lib/logger.js";
5
+ const logger = createLogger("stdio-transport");
4
6
  /**
5
7
  * Stdio transport implementation using Agent Client Protocol SDK
6
8
  * Uses JSON-RPC 2.0 over stdio to communicate with agents
@@ -47,7 +49,7 @@ export class StdioTransport {
47
49
  async requestPermission(params) {
48
50
  // For now, auto-approve all permissions
49
51
  // In a real implementation, this should prompt the user
50
- console.log(`Permission requested:`, params);
52
+ logger.debug("Permission requested", { params });
51
53
  // @ts-expect-error - Type mismatch with ACP SDK v0.5.1
52
54
  return { outcome: "approved" };
53
55
  },
@@ -209,6 +211,20 @@ export class StdioTransport {
209
211
  sessionId,
210
212
  status: "active",
211
213
  };
214
+ logger.debug("Processing agent_message_chunk", { update });
215
+ // Extract token usage from _meta if available
216
+ const tokenUsage = "_meta" in update &&
217
+ update._meta &&
218
+ typeof update._meta === "object" &&
219
+ "tokenUsage" in update._meta
220
+ ? update._meta.tokenUsage
221
+ : undefined;
222
+ if (tokenUsage) {
223
+ logger.debug("Extracted tokenUsage from _meta", { tokenUsage });
224
+ }
225
+ else {
226
+ logger.debug("No tokenUsage in _meta");
227
+ }
212
228
  // Queue message chunks if present
213
229
  // For agent_message_chunk, content is an object, not an array
214
230
  const content = update.content;
@@ -222,6 +238,7 @@ export class StdioTransport {
222
238
  role: "assistant",
223
239
  contentDelta: { type: "text", text: contentObj.text },
224
240
  isComplete: false,
241
+ ...(tokenUsage ? { tokenUsage } : {}),
225
242
  };
226
243
  }
227
244
  if (chunk) {
@@ -264,7 +281,7 @@ export class StdioTransport {
264
281
  this.connection = new ClientSideConnection(clientFactory, stream);
265
282
  // Handle stderr for debugging
266
283
  this.agentProcess.stderr.on("data", (data) => {
267
- console.error(`Agent stderr: ${data}`);
284
+ logger.debug("Agent stderr output", { output: data.toString() });
268
285
  });
269
286
  // Handle process exit
270
287
  this.agentProcess.on("exit", (code, signal) => {
@@ -286,7 +303,7 @@ export class StdioTransport {
286
303
  },
287
304
  },
288
305
  });
289
- console.log("ACP connection initialized:", initResponse);
306
+ logger.info("ACP connection initialized", { initResponse });
290
307
  this.connected = true;
291
308
  }
292
309
  catch (error) {
@@ -351,7 +368,7 @@ export class StdioTransport {
351
368
  },
352
369
  ],
353
370
  });
354
- console.log("Prompt response:", promptResponse);
371
+ logger.debug("Prompt response received", { promptResponse });
355
372
  // Mark stream as complete after prompt finishes
356
373
  this.streamComplete = true;
357
374
  // Resolve any waiting receive() calls with completion
@@ -432,7 +449,7 @@ export class StdioTransport {
432
449
  callback(update);
433
450
  }
434
451
  catch (error) {
435
- console.error("Error in session update callback:", error);
452
+ logger.error("Error in session update callback", { error });
436
453
  }
437
454
  }
438
455
  }
@@ -442,7 +459,7 @@ export class StdioTransport {
442
459
  callback(error);
443
460
  }
444
461
  catch (err) {
445
- console.error("Error in error callback:", err);
462
+ logger.error("Error in error callback", { error: err });
446
463
  }
447
464
  }
448
465
  }
@@ -7,6 +7,10 @@ import { StatusBar } from "./StatusBar.js";
7
7
  export function ChatView({ client }) {
8
8
  const setIsStreaming = useChatStore((state) => state.setIsStreaming);
9
9
  const streamingStartTime = useChatStore((state) => state.streamingStartTime);
10
+ const totalBilled = useChatStore((state) => state.totalBilled);
11
+ const currentContext = useChatStore((state) => state.currentContext);
12
+ const currentModel = useChatStore((state) => state.currentModel);
13
+ const tokenDisplayMode = useChatStore((state) => state.tokenDisplayMode);
10
14
  // Use headless hooks for business logic
11
15
  const { connectionStatus, sessionId } = useChatSession(client);
12
16
  const { messages, isStreaming } = useChatMessages(client);
@@ -21,5 +25,5 @@ export function ChatView({ client }) {
21
25
  setIsStreaming(false);
22
26
  }
23
27
  };
24
- return (_jsxs(Box, { flexDirection: "column", height: "100%", children: [_jsx(Box, { flexGrow: 1, flexDirection: "column", children: _jsx(MessageList, { messages: messages }) }), _jsx(InputBox, { value: value, isSubmitting: isSubmitting, attachedFiles: attachedFiles, onChange: onChange, onSubmit: onSubmit, onEscape: handleEscape }), _jsx(StatusBar, { connectionStatus: connectionStatus, sessionId: sessionId, isStreaming: isStreaming, streamingStartTime: streamingStartTime, hasStreamingContent: hasStreamingContent })] }));
28
+ return (_jsxs(Box, { flexDirection: "column", height: "100%", children: [_jsx(Box, { flexGrow: 1, flexDirection: "column", children: _jsx(MessageList, { messages: messages }) }), _jsx(InputBox, { value: value, isSubmitting: isSubmitting, attachedFiles: attachedFiles, onChange: onChange, onSubmit: onSubmit, onEscape: handleEscape }), _jsx(StatusBar, { connectionStatus: connectionStatus, sessionId: sessionId, isStreaming: isStreaming, streamingStartTime: streamingStartTime, hasStreamingContent: hasStreamingContent, totalBilled: totalBilled, currentContext: currentContext, currentModel: currentModel, tokenDisplayMode: tokenDisplayMode })] }));
25
29
  }
@@ -3,6 +3,7 @@ export interface ReadlineInputProps {
3
3
  onChange: (value: string) => void;
4
4
  onSubmit: () => void;
5
5
  onEscape?: () => void;
6
+ onCtrlT?: () => void;
6
7
  placeholder?: string;
7
8
  }
8
- export declare function ReadlineInput({ value, onChange, onSubmit, onEscape, placeholder, }: ReadlineInputProps): import("react/jsx-runtime").JSX.Element;
9
+ export declare function ReadlineInput({ value, onChange, onSubmit, onEscape, onCtrlT, placeholder, }: ReadlineInputProps): import("react/jsx-runtime").JSX.Element;
@@ -1,7 +1,7 @@
1
1
  import { jsx as _jsx, Fragment as _Fragment, jsxs as _jsxs } from "react/jsx-runtime";
2
2
  import { Text, useInput } from "ink";
3
3
  import React, { useState } from "react";
4
- export function ReadlineInput({ value, onChange, onSubmit, onEscape, placeholder = "", }) {
4
+ export function ReadlineInput({ value, onChange, onSubmit, onEscape, onCtrlT, placeholder = "", }) {
5
5
  const [cursorOffset, setCursorOffset] = useState(0);
6
6
  useInput((input, key) => {
7
7
  // Handle Esc: Call escape handler
@@ -107,6 +107,13 @@ export function ReadlineInput({ value, onChange, onSubmit, onEscape, placeholder
107
107
  }
108
108
  return;
109
109
  }
110
+ // Ctrl+T: Cycle token display mode
111
+ if (input === "t") {
112
+ if (onCtrlT) {
113
+ onCtrlT();
114
+ }
115
+ return;
116
+ }
110
117
  }
111
118
  // Handle backspace
112
119
  if (key.backspace || key.delete) {
@@ -5,5 +5,17 @@ export interface StatusBarProps {
5
5
  isStreaming: boolean;
6
6
  streamingStartTime: number | null;
7
7
  hasStreamingContent: boolean;
8
+ totalBilled?: {
9
+ inputTokens: number;
10
+ outputTokens: number;
11
+ totalTokens: number;
12
+ };
13
+ currentContext?: {
14
+ inputTokens: number;
15
+ outputTokens: number;
16
+ totalTokens: number;
17
+ };
18
+ currentModel?: string | null;
19
+ tokenDisplayMode: "context" | "input" | "output";
8
20
  }
9
- export declare function StatusBar({ connectionStatus, isStreaming, streamingStartTime, hasStreamingContent, }: StatusBarProps): import("react/jsx-runtime").JSX.Element;
21
+ export declare function StatusBar({ connectionStatus, isStreaming, streamingStartTime, hasStreamingContent, totalBilled: _totalBilled, currentContext, currentModel, tokenDisplayMode: _tokenDisplayMode, }: StatusBarProps): import("react/jsx-runtime").JSX.Element;
@@ -1,6 +1,7 @@
1
1
  import { jsxs as _jsxs, jsx as _jsx, Fragment as _Fragment } from "react/jsx-runtime";
2
2
  import { Box, Text } from "ink";
3
3
  import { useEffect, useState } from "react";
4
+ import { calculateTokenPercentage, formatTokenPercentage, } from "../../core/utils/model-context.js";
4
5
  // Synonyms of "thinking" in multiple languages
5
6
  const THINKING_WORDS = [
6
7
  "Thinking", // English
@@ -32,6 +33,32 @@ const DOT_ANIMATIONS = [
32
33
  ".·.", // Middle dot up
33
34
  "·..", // First dot up
34
35
  ];
36
+ /**
37
+ * Get color based on token usage percentage
38
+ * Green: 0-50%, Yellow: 50-75%, Red: 75%+
39
+ */
40
+ function getTokenColor(percentage) {
41
+ if (percentage < 50) {
42
+ return "green";
43
+ }
44
+ if (percentage < 75) {
45
+ return "yellow";
46
+ }
47
+ return "red";
48
+ }
49
+ /**
50
+ * Format token count with K/M suffixes
51
+ * Examples: 1200 -> 1.2K, 1000000 -> 1M, 500 -> 500
52
+ */
53
+ function formatTokenCount(count) {
54
+ if (count >= 1_000_000) {
55
+ return `${(count / 1_000_000).toFixed(1)}M`;
56
+ }
57
+ if (count >= 1_000) {
58
+ return `${(count / 1_000).toFixed(1)}K`;
59
+ }
60
+ return count.toString();
61
+ }
35
62
  /**
36
63
  * Component that displays elapsed time for waiting/streaming
37
64
  */
@@ -68,7 +95,7 @@ function WaitingElapsedTime({ startTime }) {
68
95
  const animatedDots = DOT_ANIMATIONS[dotIndex];
69
96
  return (_jsxs(Text, { color: "red", children: [thinkingWord, animatedDots, " ", seconds, "s"] }));
70
97
  }
71
- export function StatusBar({ connectionStatus, isStreaming, streamingStartTime, hasStreamingContent, }) {
98
+ export function StatusBar({ connectionStatus, isStreaming, streamingStartTime, hasStreamingContent, totalBilled: _totalBilled, currentContext, currentModel, tokenDisplayMode: _tokenDisplayMode, }) {
72
99
  const statusColor = connectionStatus === "connected"
73
100
  ? "green"
74
101
  : connectionStatus === "connecting"
@@ -78,5 +105,15 @@ export function StatusBar({ connectionStatus, isStreaming, streamingStartTime, h
78
105
  : "gray";
79
106
  // Only show waiting indicator when streaming but haven't received content yet
80
107
  const showWaiting = isStreaming && streamingStartTime && !hasStreamingContent;
81
- return (_jsxs(Box, { flexDirection: "row", gap: 2, paddingY: 1, children: [_jsxs(Text, { children: ["Status: ", _jsx(Text, { color: statusColor, children: connectionStatus })] }), showWaiting && (_jsxs(_Fragment, { children: [_jsx(Text, { children: " " }), _jsx(WaitingElapsedTime, { startTime: streamingStartTime })] }))] }));
108
+ // Render token display: Context percentage, input count, output count
109
+ const renderTokenDisplay = () => {
110
+ const inputTokens = currentContext?.inputTokens ?? 0;
111
+ const outputTokens = currentContext?.outputTokens ?? 0;
112
+ const contextTokens = currentContext?.totalTokens ?? 0;
113
+ const contextPercentage = calculateTokenPercentage(contextTokens, currentModel ?? undefined);
114
+ const contextPercentageStr = formatTokenPercentage(contextTokens, currentModel ?? undefined);
115
+ const contextColor = getTokenColor(contextPercentage);
116
+ return (_jsxs(Box, { flexDirection: "row", children: [_jsx(Text, { dimColor: true, children: "Context: " }), _jsx(Text, { color: contextColor, children: contextPercentageStr }), _jsx(Text, { dimColor: true, children: " | Input: " }), _jsx(Text, { children: formatTokenCount(inputTokens) }), _jsx(Text, { dimColor: true, children: " | Output: " }), _jsx(Text, { children: formatTokenCount(outputTokens) })] }));
117
+ };
118
+ return (_jsxs(Box, { flexDirection: "row", justifyContent: "space-between", paddingY: 1, children: [_jsxs(Box, { flexDirection: "row", gap: 2, children: [_jsx(Text, { dimColor: true, children: "Status: " }), _jsx(Text, { color: statusColor, children: connectionStatus }), showWaiting && (_jsxs(_Fragment, { children: [_jsx(Text, { children: " " }), _jsx(WaitingElapsedTime, { startTime: streamingStartTime })] }))] }), renderTokenDisplay()] }));
82
119
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@townco/ui",
3
- "version": "0.1.19",
3
+ "version": "0.1.20",
4
4
  "type": "module",
5
5
  "main": "./dist/index.js",
6
6
  "types": "./dist/index.d.ts",
@@ -61,7 +61,7 @@
61
61
  },
62
62
  "devDependencies": {
63
63
  "@tailwindcss/postcss": "^4.1.17",
64
- "@townco/tsconfig": "0.1.16",
64
+ "@townco/tsconfig": "0.1.17",
65
65
  "@types/node": "^24.10.0",
66
66
  "@types/react": "^19.2.2",
67
67
  "ink": "^6.4.0",