@shuyhere/bb-agent 0.0.13 → 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,27 @@ All notable changes to BB-Agent will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.0.14] - 2026-04-12
9
+
10
+ ### Added
11
+
12
+ - fullscreen now supports extension-driven workflows and structured slash-command outcomes, including menus, hidden dispatches, and richer command result handling
13
+ - `/settings` in fullscreen now exposes compaction controls for `Auto-compact`, `Reserve tokens`, and `Keep recent tokens`
14
+ - skills can now be listed, disabled, and re-enabled from the CLI without deleting their installed files
15
+ - startup model selection now prefers configured provider/model defaults more consistently, with better OpenAI startup fallback behavior
16
+ - added a parity test script against installed pi compaction logic to keep BB token accounting aligned with upstream behavior
17
+
18
+ ### Fixed
19
+
20
+ - session resume now restores the prior model and thinking level instead of starting with mismatched runtime defaults
21
+ - fullscreen/TUI terminal rendering now sanitizes terminal control text more reliably and avoids ANSI leakage into the UI
22
+ - auto-compaction token estimation now matches pi more closely by using the last successful assistant usage plus trailing estimates, using ceil-based token heuristics, computing `tokens_before` from rebuilt context instead of raw payload size, and ignoring assistant usage from before the latest compaction boundary
23
+ - fullscreen compaction behavior and status reporting are more consistent after auto-compaction and manual compaction events, and local fullscreen actions now show an animated elapsed-time status while they run
24
+
25
+ ### Changed
26
+
27
+ - fullscreen extension workflows and session compaction support are now merged into the main interaction path on `master`
28
+
8
29
  ## [0.0.13] - 2026-04-09
9
30
 
10
31
  ### Added
@@ -107,6 +128,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
107
128
 
108
129
  - latest published package includes the post-0.0.7 startup, auth, model-default, and update-notice improvements
109
130
 
131
+ [0.0.14]: https://github.com/shuyhere/bb-agent/releases/tag/v0.0.14
110
132
  [0.0.13]: https://github.com/shuyhere/bb-agent/releases/tag/v0.0.13
111
133
  [0.0.12]: https://github.com/shuyhere/bb-agent/releases/tag/v0.0.12
112
134
  [0.0.11]: https://github.com/shuyhere/bb-agent/releases/tag/v0.0.11
package/README.md CHANGED
@@ -246,7 +246,6 @@ Or set this in `~/.bb-agent/settings.json`:
246
246
  - [Development Guide](docs/development.md) — build from source, dev workflow, project structure, debugging
247
247
  - [Contributing](CONTRIBUTING.md) — code style, PR process
248
248
  - [Changelog](CHANGELOG.md) — release history
249
- - [Security](SECURITY.md) — vulnerability reporting, security model
250
249
 
251
250
  ## Development
252
251
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@shuyhere/bb-agent",
3
- "version": "0.0.13",
3
+ "version": "0.0.14",
4
4
  "description": "BB-Agent — a Rust-native AI coding agent for the terminal",
5
5
  "license": "MIT",
6
6
  "repository": {
@@ -0,0 +1,141 @@
1
+ import test from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import * as pi from '/home/shuyhere/projects/tako/node_modules/@mariozechner/pi-coding-agent/dist/core/compaction/compaction.js';
4
+
5
+ const settings = { enabled: true, reserveTokens: 16384, keepRecentTokens: 20000 };
6
+
7
+ function bbEstimateContextTokens(messages) {
8
+ function calc(usage) {
9
+ return usage.total_tokens > 0
10
+ ? usage.total_tokens
11
+ : usage.input + usage.output + usage.cache_read + usage.cache_write;
12
+ }
13
+
14
+ function estimateText(text) {
15
+ return Math.ceil(text.length / 4);
16
+ }
17
+
18
+ function estimateMessage(message) {
19
+ switch (message.role) {
20
+ case 'user':
21
+ return message.content.reduce((sum, block) => sum + (block.type === 'text' ? estimateText(block.text) : 1200), 0);
22
+ case 'assistant':
23
+ return message.content.reduce((sum, block) => {
24
+ if (block.type === 'text') return sum + estimateText(block.text);
25
+ if (block.type === 'thinking') return sum + estimateText(block.thinking);
26
+ if (block.type === 'toolCall') return sum + estimateText(block.name) + estimateText(JSON.stringify(block.arguments ?? {}));
27
+ return sum;
28
+ }, 0);
29
+ case 'toolResult':
30
+ case 'custom':
31
+ return message.content.reduce((sum, block) => sum + (block.type === 'text' ? estimateText(block.text) : 1200), 0);
32
+ case 'bashExecution':
33
+ return estimateText(message.command) + estimateText(message.output);
34
+ case 'branchSummary':
35
+ case 'compactionSummary':
36
+ return estimateText(message.summary);
37
+ default:
38
+ return 0;
39
+ }
40
+ }
41
+
42
+ const lastUsageIndex = [...messages.keys()].reverse().find((i) => {
43
+ const m = messages[i];
44
+ return m.role === 'assistant' && m.stopReason !== 'aborted' && m.stopReason !== 'error' && calc(m.usage) > 0;
45
+ });
46
+
47
+ if (lastUsageIndex == null) {
48
+ const trailingTokens = messages.reduce((sum, m) => sum + estimateMessage(m), 0);
49
+ return { tokens: trailingTokens, usageTokens: 0, trailingTokens, lastUsageIndex: null };
50
+ }
51
+
52
+ const usageTokens = calc(messages[lastUsageIndex].usage);
53
+ const trailingTokens = messages.slice(lastUsageIndex + 1).reduce((sum, m) => sum + estimateMessage(m), 0);
54
+ return { tokens: usageTokens + trailingTokens, usageTokens, trailingTokens, lastUsageIndex };
55
+ }
56
+
57
+ test('bb estimator matches installed pi on shared fixtures', () => {
58
+ const fixtures = [
59
+ [
60
+ {
61
+ role: 'assistant',
62
+ content: [{ type: 'text', text: 'done' }],
63
+ provider: 'test',
64
+ model: 'test',
65
+ usage: { input: 100, output: 20, cacheRead: 10, cacheWrite: 5, totalTokens: 0 },
66
+ stopReason: 'stop',
67
+ timestamp: Date.now(),
68
+ },
69
+ { role: 'user', content: [{ type: 'text', text: '12345678' }], timestamp: Date.now() },
70
+ ],
71
+ [
72
+ {
73
+ role: 'assistant',
74
+ content: [{ type: 'text', text: 'aborted' }],
75
+ provider: 'test',
76
+ model: 'test',
77
+ usage: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, totalTokens: 500 },
78
+ stopReason: 'aborted',
79
+ timestamp: Date.now(),
80
+ },
81
+ { role: 'user', content: [{ type: 'text', text: '12345678' }], timestamp: Date.now() },
82
+ ],
83
+ [
84
+ {
85
+ role: 'assistant',
86
+ content: [
87
+ { type: 'text', text: 'hello' },
88
+ { type: 'thinking', thinking: 'reasoning text' },
89
+ { type: 'toolCall', id: '1', name: 'read', arguments: { path: 'src/main.rs' } },
90
+ ],
91
+ provider: 'test',
92
+ model: 'test',
93
+ usage: { input: 300, output: 40, cacheRead: 50, cacheWrite: 0, totalTokens: 0 },
94
+ stopReason: 'stop',
95
+ timestamp: Date.now(),
96
+ },
97
+ {
98
+ role: 'toolResult',
99
+ toolCallId: '1',
100
+ toolName: 'read',
101
+ content: [{ type: 'text', text: 'fn main() {}' }],
102
+ isError: false,
103
+ timestamp: Date.now(),
104
+ },
105
+ {
106
+ role: 'bashExecution',
107
+ command: 'ls -la',
108
+ output: 'file1\nfile2\n',
109
+ cancelled: false,
110
+ truncated: false,
111
+ timestamp: Date.now(),
112
+ },
113
+ ],
114
+ ];
115
+
116
+ for (const messages of fixtures) {
117
+ const piEstimate = pi.estimateContextTokens(messages);
118
+ const bbEstimate = bbEstimateContextTokens(
119
+ JSON.parse(
120
+ JSON.stringify(messages)
121
+ .replace(/cacheRead/g, 'cache_read')
122
+ .replace(/cacheWrite/g, 'cache_write')
123
+ .replace(/totalTokens/g, 'total_tokens')
124
+ .replace(/stopReason/g, 'stopReason')
125
+ )
126
+ );
127
+
128
+ assert.deepEqual(bbEstimate, {
129
+ tokens: piEstimate.tokens,
130
+ usageTokens: piEstimate.usageTokens,
131
+ trailingTokens: piEstimate.trailingTokens,
132
+ lastUsageIndex: piEstimate.lastUsageIndex,
133
+ });
134
+
135
+ const contextWindow = 128000;
136
+ assert.equal(
137
+ bbEstimate.tokens > contextWindow - settings.reserveTokens,
138
+ pi.shouldCompact(piEstimate.tokens, contextWindow, settings)
139
+ );
140
+ }
141
+ });