@rubytech/taskmaster 1.0.39 → 1.0.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,8 +6,8 @@
6
6
  <title>Taskmaster Control</title>
7
7
  <meta name="color-scheme" content="dark light" />
8
8
  <link rel="icon" type="image/png" href="./favicon.png" />
9
- <script type="module" crossorigin src="./assets/index-gQeHDI6a.js"></script>
10
- <link rel="stylesheet" crossorigin href="./assets/index-Ceb3FTmS.css">
9
+ <script type="module" crossorigin src="./assets/index-RlAacvDz.js"></script>
10
+ <link rel="stylesheet" crossorigin href="./assets/index-BfV0Mtl7.css">
11
11
  </head>
12
12
  <body>
13
13
  <taskmaster-app></taskmaster-app>
@@ -74,6 +74,42 @@ export const memoryHandlers = {
74
74
  respond(false, undefined, errorShape(ErrorCodes.UNAVAILABLE, String(err)));
75
75
  }
76
76
  },
77
+ "memory.search": async ({ params, respond }) => {
78
+ const query = typeof params.query === "string" ? params.query.trim() : "";
79
+ if (!query) {
80
+ respond(false, undefined, errorShape(ErrorCodes.INVALID_REQUEST, "query is required"));
81
+ return;
82
+ }
83
+ const cfg = loadConfig();
84
+ const agentId = typeof params.agentId === "string" && params.agentId.trim()
85
+ ? params.agentId.trim()
86
+ : resolveDefaultAgentId(cfg);
87
+ const { manager, error } = await getMemorySearchManager({ cfg, agentId });
88
+ if (!manager) {
89
+ respond(false, undefined, errorShape(ErrorCodes.UNAVAILABLE, error ?? "memory index unavailable"));
90
+ return;
91
+ }
92
+ try {
93
+ const maxResults = typeof params.maxResults === "number" ? params.maxResults : 10;
94
+ // minScore: 0 — show all results for diagnostic purposes
95
+ const results = await manager.search(query, { maxResults, minScore: 0 });
96
+ respond(true, {
97
+ ok: true,
98
+ agentId,
99
+ results: results.map((r) => ({
100
+ path: r.path,
101
+ startLine: r.startLine,
102
+ endLine: r.endLine,
103
+ score: r.score,
104
+ snippet: r.snippet,
105
+ source: r.source,
106
+ })),
107
+ });
108
+ }
109
+ catch (err) {
110
+ respond(false, undefined, errorShape(ErrorCodes.UNAVAILABLE, String(err)));
111
+ }
112
+ },
77
113
  "memory.auditClear": async ({ params, respond }) => {
78
114
  try {
79
115
  const cfg = loadConfig();
@@ -12,6 +12,32 @@ export function bm25RankToScore(rank) {
12
12
  const normalized = Number.isFinite(rank) ? Math.max(0, rank) : 999;
13
13
  return 1 / (1 + normalized);
14
14
  }
15
+ /**
16
+ * Path-based boost factors applied during hybrid merge.
17
+ * Curated knowledge (public/, shared/, root memory files) is boosted over
18
+ * raw logs (conversations/, session transcripts) so authoritative content
19
+ * outranks casual mentions at similar raw scores.
20
+ *
21
+ * Patterns are checked in order — first match wins.
22
+ */
23
+ const PATH_BOOST_RULES = [
24
+ // Conversation archives — demote (high volume, low signal-to-noise)
25
+ { pattern: /\/conversations\//, boost: 0.6 },
26
+ // Session source transcripts — demote
27
+ { pattern: /^sessions\//, boost: 0.6 },
28
+ // Curated public/shared knowledge — boost
29
+ { pattern: /^memory\/public\//, boost: 1.4 },
30
+ { pattern: /^memory\/shared\//, boost: 1.3 },
31
+ // Root memory files (MEMORY.md etc.) — slight boost
32
+ { pattern: /^(?:MEMORY|memory)\.md$/, boost: 1.2 },
33
+ ];
34
+ function pathBoost(filePath) {
35
+ for (const rule of PATH_BOOST_RULES) {
36
+ if (rule.pattern.test(filePath))
37
+ return rule.boost;
38
+ }
39
+ return 1.0;
40
+ }
15
41
  export function mergeHybridResults(params) {
16
42
  const byId = new Map();
17
43
  for (const r of params.vector) {
@@ -47,7 +73,8 @@ export function mergeHybridResults(params) {
47
73
  }
48
74
  }
49
75
  const merged = Array.from(byId.values()).map((entry) => {
50
- const score = params.vectorWeight * entry.vectorScore + params.textWeight * entry.textScore;
76
+ const raw = params.vectorWeight * entry.vectorScore + params.textWeight * entry.textScore;
77
+ const score = raw * pathBoost(entry.path);
51
78
  return {
52
79
  path: entry.path,
53
80
  startLine: entry.startLine,
@@ -89,77 +89,166 @@ export async function buildFileEntry(absPath, workspaceDir) {
89
89
  hash,
90
90
  };
91
91
  }
92
- export function chunkMarkdown(content, chunking) {
93
- const lines = content.split("\n");
94
- if (lines.length === 0)
92
+ /**
93
+ * Heading level (1-6) parsed from a markdown heading line, or 0 if not a heading.
94
+ */
95
+ function headingLevel(line) {
96
+ const match = line.match(/^(#{1,6})\s/);
97
+ return match ? match[1].length : 0;
98
+ }
99
+ /**
100
+ * Build a heading breadcrumb prefix from the current heading stack.
101
+ * E.g., ["# User Guide", "## Updating Taskmaster"] → "# User Guide > ## Updating Taskmaster"
102
+ */
103
+ function headingPrefix(stack) {
104
+ const filtered = stack.filter(Boolean);
105
+ return filtered.length > 0 ? filtered.join(" > ") + "\n" : "";
106
+ }
107
+ /**
108
+ * Split lines into fixed-size chunks (the original algorithm).
109
+ * Used as a fallback when a single section exceeds maxChars.
110
+ */
111
+ function chunkLinesFixed(entries, maxChars, prefix) {
112
+ if (entries.length === 0)
95
113
  return [];
96
- const maxChars = Math.max(32, chunking.tokens * 4);
97
- const overlapChars = Math.max(0, chunking.overlap * 4);
114
+ const prefixLen = prefix.length;
115
+ const effectiveMax = Math.max(32, maxChars - prefixLen);
98
116
  const chunks = [];
99
117
  let current = [];
100
118
  let currentChars = 0;
101
119
  const flush = () => {
102
120
  if (current.length === 0)
103
121
  return;
104
- const firstEntry = current[0];
105
- const lastEntry = current[current.length - 1];
106
- if (!firstEntry || !lastEntry)
107
- return;
108
- const text = current.map((entry) => entry.line).join("\n");
109
- const startLine = firstEntry.lineNo;
110
- const endLine = lastEntry.lineNo;
122
+ const first = current[0];
123
+ const last = current[current.length - 1];
124
+ const body = current.map((e) => e.line).join("\n");
125
+ const text = prefix + body;
111
126
  chunks.push({
112
- startLine,
113
- endLine,
127
+ startLine: first.lineNo,
128
+ endLine: last.lineNo,
114
129
  text,
115
130
  hash: hashText(text),
116
131
  });
117
132
  };
118
- const carryOverlap = () => {
119
- if (overlapChars <= 0 || current.length === 0) {
120
- current = [];
121
- currentChars = 0;
122
- return;
123
- }
124
- let acc = 0;
125
- const kept = [];
126
- for (let i = current.length - 1; i >= 0; i -= 1) {
127
- const entry = current[i];
128
- if (!entry)
129
- continue;
130
- acc += entry.line.length + 1;
131
- kept.unshift(entry);
132
- if (acc >= overlapChars)
133
- break;
134
- }
135
- current = kept;
136
- currentChars = kept.reduce((sum, entry) => sum + entry.line.length + 1, 0);
137
- };
138
- for (let i = 0; i < lines.length; i += 1) {
139
- const line = lines[i] ?? "";
140
- const lineNo = i + 1;
133
+ for (const entry of entries) {
134
+ // Split overly long lines into segments that fit within effectiveMax
141
135
  const segments = [];
142
- if (line.length === 0) {
143
- segments.push("");
136
+ if (entry.line.length === 0) {
137
+ segments.push(entry);
144
138
  }
145
139
  else {
146
- for (let start = 0; start < line.length; start += maxChars) {
147
- segments.push(line.slice(start, start + maxChars));
140
+ for (let start = 0; start < entry.line.length; start += effectiveMax) {
141
+ segments.push({
142
+ line: entry.line.slice(start, start + effectiveMax),
143
+ lineNo: entry.lineNo,
144
+ });
148
145
  }
149
146
  }
150
- for (const segment of segments) {
151
- const lineSize = segment.length + 1;
152
- if (currentChars + lineSize > maxChars && current.length > 0) {
147
+ for (const seg of segments) {
148
+ const segSize = seg.line.length + 1;
149
+ if (currentChars + segSize > effectiveMax && current.length > 0) {
153
150
  flush();
154
- carryOverlap();
151
+ current = [];
152
+ currentChars = 0;
155
153
  }
156
- current.push({ line: segment, lineNo });
157
- currentChars += lineSize;
154
+ current.push(seg);
155
+ currentChars += segSize;
158
156
  }
159
157
  }
160
158
  flush();
161
159
  return chunks;
162
160
  }
161
+ /**
162
+ * Semantic markdown chunker.
163
+ *
164
+ * Splits content at markdown headings so each chunk corresponds to a logical section.
165
+ * Each chunk is prefixed with the heading breadcrumb (ancestor headings) so the embedding
166
+ * model has structural context — e.g., "# User Guide > ## Updating Taskmaster\n...content...".
167
+ *
168
+ * If a section exceeds maxChars, it falls back to fixed-size splitting within that section,
169
+ * but each sub-chunk still receives the heading prefix.
170
+ *
171
+ * Files with no headings are chunked using fixed-size splitting (original behavior).
172
+ */
173
+ export function chunkMarkdown(content, chunking) {
174
+ if (!content.trim())
175
+ return [];
176
+ const lines = content.split("\n");
177
+ const maxChars = Math.max(32, chunking.tokens * 4);
178
+ // Parse all lines to detect if there are any headings
179
+ const parsedLines = [];
180
+ let hasHeadings = false;
181
+ for (let i = 0; i < lines.length; i++) {
182
+ const line = lines[i] ?? "";
183
+ const level = headingLevel(line);
184
+ if (level > 0)
185
+ hasHeadings = true;
186
+ parsedLines.push({ line, lineNo: i + 1, level });
187
+ }
188
+ // No headings at all — fall back to fixed-size chunking (no prefix)
189
+ if (!hasHeadings) {
190
+ return chunkLinesFixed(parsedLines.map((p) => ({ line: p.line, lineNo: p.lineNo })), maxChars, "");
191
+ }
192
+ const sections = [];
193
+ // headingStack tracks the current heading hierarchy: index = level-1
194
+ const headingStack = [];
195
+ let currentSection = { headingStack: [], lines: [] };
196
+ for (const parsed of parsedLines) {
197
+ if (parsed.level > 0) {
198
+ // Flush the previous section if it has content
199
+ if (currentSection.lines.length > 0) {
200
+ sections.push(currentSection);
201
+ }
202
+ // Update the heading stack: trim to this level, then set this heading.
203
+ // Use splice to avoid sparse arrays (setting .length on a shorter array
204
+ // leaves undefined holes when the heading appears without ancestors).
205
+ if (headingStack.length >= parsed.level) {
206
+ headingStack.length = parsed.level - 1;
207
+ }
208
+ headingStack[parsed.level - 1] = parsed.line;
209
+ // Start a new section with the current heading stack as context
210
+ currentSection = {
211
+ headingStack: [...headingStack],
212
+ lines: [{ line: parsed.line, lineNo: parsed.lineNo }],
213
+ };
214
+ }
215
+ else {
216
+ currentSection.lines.push({ line: parsed.line, lineNo: parsed.lineNo });
217
+ }
218
+ }
219
+ // Flush final section
220
+ if (currentSection.lines.length > 0) {
221
+ sections.push(currentSection);
222
+ }
223
+ // Convert sections to chunks
224
+ const chunks = [];
225
+ for (const section of sections) {
226
+ // Build the prefix from ancestor headings (all except the current heading,
227
+ // which is already the first line of the section body)
228
+ const ancestors = section.headingStack.slice(0, -1);
229
+ const prefix = headingPrefix(ancestors);
230
+ const bodyText = section.lines.map((e) => e.line).join("\n");
231
+ const totalLen = prefix.length + bodyText.length;
232
+ if (totalLen <= maxChars) {
233
+ // Section fits in one chunk
234
+ const first = section.lines[0];
235
+ const last = section.lines[section.lines.length - 1];
236
+ const text = prefix + bodyText;
237
+ chunks.push({
238
+ startLine: first.lineNo,
239
+ endLine: last.lineNo,
240
+ text,
241
+ hash: hashText(text),
242
+ });
243
+ }
244
+ else {
245
+ // Section too large — split with fixed-size chunking, each sub-chunk gets prefix
246
+ const subChunks = chunkLinesFixed(section.lines, maxChars, prefix);
247
+ chunks.push(...subChunks);
248
+ }
249
+ }
250
+ return chunks;
251
+ }
163
252
  export function parseEmbedding(raw) {
164
253
  try {
165
254
  const parsed = JSON.parse(raw);
@@ -1367,8 +1367,12 @@ export class MemoryIndexManager {
1367
1367
  const shouldSyncMemory = this.sources.has("memory") && (params?.force || needsFullReindex || this.dirty);
1368
1368
  const shouldSyncSessions = this.shouldSyncSessions(params, needsFullReindex);
1369
1369
  if (shouldSyncMemory) {
1370
- await this.syncMemoryFiles({ needsFullReindex, progress: progress ?? undefined });
1371
- this.dirty = false;
1370
+ try {
1371
+ await this.syncMemoryFiles({ needsFullReindex, progress: progress ?? undefined });
1372
+ }
1373
+ finally {
1374
+ this.dirty = false;
1375
+ }
1372
1376
  }
1373
1377
  if (shouldSyncSessions) {
1374
1378
  await this.syncSessionFiles({ needsFullReindex, progress: progress ?? undefined });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rubytech/taskmaster",
3
- "version": "1.0.39",
3
+ "version": "1.0.40",
4
4
  "description": "AI-powered business assistant for small businesses",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -495,6 +495,18 @@ When you add or change a file, your assistant picks it up automatically — no r
495
495
 
496
496
  When your assistant writes to **public/** or **shared/**, a shield icon appears in the navigation bar so you can review what was written (see [Data Safety Alert](#data-safety-alert) above).
497
497
 
498
+ ### Searching Memory
499
+
500
+ The Files page includes a **memory search bar** that lets you test what your assistant finds when it searches its knowledge base.
501
+
502
+ 1. Type a query in the **Search memory** box and press Enter (or click **Search**)
503
+ 2. Results show the file path, relevance score (as a percentage), matching line numbers, and a snippet of the matched text
504
+ 3. Click any result to open the file preview
505
+ 4. Use the **agent selector** dropdown next to the search bar to switch between agents (e.g., public vs admin) — this lets you verify what each assistant can see
506
+ 5. Click **Clear** to return to the normal file tree view
507
+
508
+ This is useful for diagnosing search issues — if your assistant can't find something in conversation, test the same query here to see what comes back. All results are shown regardless of score threshold, so you can see everything the search engine found.
509
+
498
510
  ---
499
511
 
500
512
  ## Status Dashboard
@@ -952,19 +964,50 @@ Only the business owner can change the activation mode.
952
964
 
953
965
  ## Updating Taskmaster
954
966
 
955
- When a new version of Taskmaster is available, the **Software** row on the Status Dashboard turns yellow and shows the new version number (e.g., "v1.2.3 → v1.3.0").
967
+ ### Checking for Updates
968
+
969
+ Taskmaster checks for updates automatically when you open the Setup page. The **Software** row in the Status Dashboard shows your current version and whether an update is available:
956
970
 
957
- To update:
971
+ - **Green** — You're running the latest version
972
+ - **Yellow** — An update is available (shows the new version number, e.g., "v1.0.38 → v1.0.39")
973
+ - **Grey** — Not yet checked
974
+
975
+ If the row shows "Unknown", tap the **refresh** button (circular arrow) to check manually. Tap the **(i)** button to see version details (current version, latest version, and status).
976
+
977
+ ### Installing an Update
958
978
 
959
979
  1. Open the **Setup** page
960
- 2. Look for the **Software** row in the dashboard
961
- 3. If an update is available, tap the **download** button (down-arrow icon)
962
- 4. Wait for the update to complete the page will reload automatically
963
- 5. After reload, the Software row should show "Up to date" in green
980
+ 2. Look for the **Software** row if it's yellow, an update is available
981
+ 3. Tap the **download** button (down-arrow icon)
982
+ 4. A progress overlay appears showing each step of the update (fetching, building, running checks, etc.)
983
+ 5. When the update completes, the gateway restarts automatically
984
+ 6. The page reconnects on its own — you'll see a result banner showing the version change (e.g., "Updated: v1.0.38 → v1.0.39")
985
+ 7. The Software row should now show green
986
+
987
+ You don't need to refresh the page — the overlay stays visible during the update and the page reconnects automatically after the gateway restarts.
988
+
989
+ ### Alternative: Re-run the Installer
990
+
991
+ You can also update by re-running the install command in Terminal:
992
+
993
+ ```bash
994
+ curl -fsSL https://taskmaster.bot/install.sh | bash
995
+ ```
996
+
997
+ This detects your existing installation and upgrades it in place.
998
+
999
+ ### If an Update Fails
1000
+
1001
+ If something goes wrong during the update:
1002
+
1003
+ - The progress overlay shows which step failed (marked with an X)
1004
+ - A result banner appears with the failure reason
1005
+ - Your previous version remains running — a failed update does not leave your system broken
1006
+ - Tap **Dismiss** to close the result banner
964
1007
 
965
- If the Software row shows "Unknown", tap the **refresh** button (circular arrow) to check for updates. Tap the **(i)** button to see version details.
1008
+ If the page loses connection during the update and doesn't reconnect within two minutes, refresh the page manually. If the gateway doesn't come back, try power-cycling your device (unplug and replug).
966
1009
 
967
- > **Note:** Updates require an internet connection. The update process takes about 30 seconds. Your assistant will be briefly unavailable during the restart.
1010
+ > **Note:** Updates require an internet connection. The update process typically takes 30–60 seconds. Your assistant will be briefly unavailable during the restart.
968
1011
 
969
1012
  ---
970
1013