@rubytech/taskmaster 1.0.5 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/skills/refresh.js +3 -1
- package/dist/build-info.json +3 -3
- package/dist/cli/provision-cli.js +8 -1
- package/dist/filler/config.js +69 -0
- package/dist/filler/generator.js +237 -0
- package/dist/filler/index.js +8 -0
- package/dist/filler/trigger.js +163 -0
- package/dist/filler/types.js +7 -0
- package/dist/infra/taskmaster-root.js +1 -1
- package/dist/license/device-id.js +61 -0
- package/dist/license/keys.js +61 -0
- package/dist/license/revalidation.js +52 -0
- package/dist/license/state.js +12 -0
- package/dist/license/validate.js +59 -0
- package/dist/records/records-manager.js +92 -0
- package/package.json +5 -2
- package/scripts/install.sh +68 -2
- package/skills/event-management/SKILL.md +5 -0
- package/skills/taskmaster/SKILL.md +5 -11
|
@@ -7,7 +7,9 @@ const log = createSubsystemLogger("gateway/skills");
|
|
|
7
7
|
const listeners = new Set();
|
|
8
8
|
const workspaceVersions = new Map();
|
|
9
9
|
const watchers = new Map();
|
|
10
|
-
|
|
10
|
+
// Start at 1 so that any cached session snapshot with version 0 is treated as
|
|
11
|
+
// stale on the first message after a gateway restart.
|
|
12
|
+
let globalVersion = 1;
|
|
11
13
|
export const DEFAULT_SKILLS_WATCH_IGNORED = [
|
|
12
14
|
/(^|[\\/])\.git([\\/]|$)/,
|
|
13
15
|
/(^|[\\/])node_modules([\\/]|$)/,
|
package/dist/build-info.json
CHANGED
|
@@ -22,6 +22,7 @@ export function registerProvisionCli(program) {
|
|
|
22
22
|
.option("--port <port>", `Gateway port (default: ${DEFAULT_GATEWAY_PORT})`)
|
|
23
23
|
.option("--workspace <path>", `Workspace path (default: ${DEFAULT_WORKSPACE})`)
|
|
24
24
|
.option("--force", "Overwrite existing config and workspace")
|
|
25
|
+
.option("--skip-platform", "Skip platform setup (avahi, hostname, mDNS) — used by install.sh")
|
|
25
26
|
.action(async (opts) => {
|
|
26
27
|
await runProvision(opts);
|
|
27
28
|
});
|
|
@@ -34,6 +35,7 @@ async function runProvision(opts) {
|
|
|
34
35
|
}
|
|
35
36
|
const workspace = opts.workspace ? path.resolve(opts.workspace) : DEFAULT_WORKSPACE;
|
|
36
37
|
const force = Boolean(opts.force);
|
|
38
|
+
const skipPlatform = Boolean(opts.skipPlatform);
|
|
37
39
|
const isLinux = process.platform === "linux";
|
|
38
40
|
console.log("");
|
|
39
41
|
console.log("Taskmaster Provision");
|
|
@@ -46,7 +48,12 @@ async function runProvision(opts) {
|
|
|
46
48
|
// Step 3: Write agent list to config
|
|
47
49
|
await writeAgentList(workspace);
|
|
48
50
|
// Step 4-6: Linux-only mDNS setup
|
|
49
|
-
if (
|
|
51
|
+
if (skipPlatform) {
|
|
52
|
+
console.log("[4/7] Avahi: handled by install script");
|
|
53
|
+
console.log("[5/7] Hostname: handled by install script");
|
|
54
|
+
console.log("[6/7] mDNS: handled by install script");
|
|
55
|
+
}
|
|
56
|
+
else if (isLinux) {
|
|
50
57
|
await installAvahi();
|
|
51
58
|
await setHostname();
|
|
52
59
|
await registerMdnsService(port);
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Filler configuration resolution.
|
|
3
|
+
*/
|
|
4
|
+
/** Default filler configuration values. */
|
|
5
|
+
export const FILLER_DEFAULTS = {
|
|
6
|
+
enabled: false,
|
|
7
|
+
channels: [
|
|
8
|
+
"whatsapp",
|
|
9
|
+
"telegram",
|
|
10
|
+
"discord",
|
|
11
|
+
"slack",
|
|
12
|
+
"signal",
|
|
13
|
+
"googlechat",
|
|
14
|
+
"imessage",
|
|
15
|
+
"webchat",
|
|
16
|
+
],
|
|
17
|
+
maxWaitMs: 3000,
|
|
18
|
+
model: "claude-3-haiku-20240307",
|
|
19
|
+
maxWords: 15,
|
|
20
|
+
};
|
|
21
|
+
/** Resolve filler config with defaults applied. */
|
|
22
|
+
export function resolveFillerConfig(cfg) {
|
|
23
|
+
return {
|
|
24
|
+
...FILLER_DEFAULTS,
|
|
25
|
+
...cfg?.filler,
|
|
26
|
+
// Ensure channels array is always present
|
|
27
|
+
channels: cfg?.filler?.channels ?? FILLER_DEFAULTS.channels,
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Resolve whether filler is enabled, checking session and agent-level config.
|
|
32
|
+
*
|
|
33
|
+
* Priority:
|
|
34
|
+
* 1. Session override (from /filler command)
|
|
35
|
+
* 2. Per-agent config (agents.list[].fillerEnabled)
|
|
36
|
+
* 3. Agent defaults (agents.defaults.fillerEnabled)
|
|
37
|
+
* 4. Global filler config (filler.enabled)
|
|
38
|
+
* 5. Default (false)
|
|
39
|
+
*/
|
|
40
|
+
export function resolveFillerEnabled(params) {
|
|
41
|
+
const { cfg, agentId, sessionFillerEnabled } = params;
|
|
42
|
+
// Session-level override takes precedence (from /filler command)
|
|
43
|
+
if (sessionFillerEnabled !== undefined) {
|
|
44
|
+
return sessionFillerEnabled;
|
|
45
|
+
}
|
|
46
|
+
// Check per-agent config
|
|
47
|
+
if (agentId && cfg?.agents?.list) {
|
|
48
|
+
const agentConfig = cfg.agents.list.find((a) => a.id === agentId);
|
|
49
|
+
if (agentConfig?.fillerEnabled !== undefined) {
|
|
50
|
+
return agentConfig.fillerEnabled;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
// Check agent defaults
|
|
54
|
+
if (cfg?.agents?.defaults?.fillerEnabled !== undefined) {
|
|
55
|
+
return cfg.agents.defaults.fillerEnabled;
|
|
56
|
+
}
|
|
57
|
+
// Fall back to global filler config
|
|
58
|
+
return cfg?.filler?.enabled ?? FILLER_DEFAULTS.enabled;
|
|
59
|
+
}
|
|
60
|
+
/** Check if filler is enabled for a specific channel. */
|
|
61
|
+
export function isFillerEnabledForChannel(params) {
|
|
62
|
+
const { cfg, channel } = params;
|
|
63
|
+
if (!resolveFillerEnabled(params))
|
|
64
|
+
return false;
|
|
65
|
+
if (!channel)
|
|
66
|
+
return false;
|
|
67
|
+
const fillerConfig = resolveFillerConfig(cfg);
|
|
68
|
+
return fillerConfig.channels.includes(channel);
|
|
69
|
+
}
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Filler message generation using a lightweight model (Haiku).
|
|
3
|
+
*
|
|
4
|
+
* Uses pi-ai complete() for OAuth-compatible inference.
|
|
5
|
+
*/
|
|
6
|
+
import path from "node:path";
|
|
7
|
+
import { complete } from "@mariozechner/pi-ai";
|
|
8
|
+
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
|
9
|
+
import { getApiKeyForModel, requireApiKey } from "../agents/model-auth.js";
|
|
10
|
+
import { ensureTaskmasterModelsJson } from "../agents/models-config.js";
|
|
11
|
+
const FILLER_SYSTEM_PROMPT = `You generate a brief, first-person filler while processing a request. Output ONLY the filler text.
|
|
12
|
+
|
|
13
|
+
Rules:
|
|
14
|
+
1. For trivial messages (greetings, "yes", "ok", "thanks", single-word replies), output: SKIP
|
|
15
|
+
2. Always use first person ("I'll...", "I need to...", "I'm...")
|
|
16
|
+
3. Match the filler to what you're doing (shown in thinking)
|
|
17
|
+
4. Keep it short (2-6 words) and casual
|
|
18
|
+
5. Vary your responses every time
|
|
19
|
+
|
|
20
|
+
Quick acknowledgments (no lookup needed):
|
|
21
|
+
- "one moment please", "just a moment", "one sec", "OK", "sure thing", "got it"
|
|
22
|
+
|
|
23
|
+
When looking something up / searching:
|
|
24
|
+
- "I need to look that up", "I'll check on that", "I need to search for that", "I'm looking into it"
|
|
25
|
+
|
|
26
|
+
When checking / verifying:
|
|
27
|
+
- "I'll check", "I'm checking now", "I need to verify that"
|
|
28
|
+
|
|
29
|
+
When thinking / working out:
|
|
30
|
+
- "hmm", "I'm thinking", "I need to work this out"
|
|
31
|
+
|
|
32
|
+
IMPORTANT: Vary every response. Never repeat the same phrase twice in a row.`;
|
|
33
|
+
const TOOL_PROGRESS_SYSTEM_PROMPT = `You generate a brief, first-person progress update while a tool is running. Output ONLY the progress text.
|
|
34
|
+
|
|
35
|
+
Rules:
|
|
36
|
+
1. Always use first person ("I'm...", "Just...", "Let me...")
|
|
37
|
+
2. Be specific about what you're doing based on the tool and its arguments
|
|
38
|
+
3. Keep it short (3-8 words) and natural
|
|
39
|
+
4. Sound like a human assistant giving a quick status update
|
|
40
|
+
|
|
41
|
+
Examples:
|
|
42
|
+
- exec with "git status" → "Just checking the git status"
|
|
43
|
+
- exec with "npm install" → "Installing the dependencies"
|
|
44
|
+
- memory_search with query "invoice" → "Searching my notes for invoices"
|
|
45
|
+
- read with path "/config.json" → "Reading the config file"
|
|
46
|
+
- web_search with query "weather" → "Looking up the weather"
|
|
47
|
+
|
|
48
|
+
Output ONLY the progress message, nothing else.`;
|
|
49
|
+
/** Token returned when filler should not be sent. */
|
|
50
|
+
export const FILLER_SKIP_TOKEN = "SKIP";
|
|
51
|
+
/**
|
|
52
|
+
* Generate a filler message using pi-ai complete().
|
|
53
|
+
*
|
|
54
|
+
* Works with both API keys and OAuth tokens.
|
|
55
|
+
*/
|
|
56
|
+
export async function generateFiller(params) {
|
|
57
|
+
const { userMessage, thinking, model: modelId, maxWords, timeoutMs = 3000, abortSignal, cfg, agentDir, } = params;
|
|
58
|
+
// Check if already aborted before starting
|
|
59
|
+
if (abortSignal?.aborted) {
|
|
60
|
+
return { ok: false, error: new Error("Filler generation aborted") };
|
|
61
|
+
}
|
|
62
|
+
try {
|
|
63
|
+
// Set up model discovery with auth
|
|
64
|
+
await ensureTaskmasterModelsJson(cfg, agentDir);
|
|
65
|
+
const authStorage = agentDir
|
|
66
|
+
? new AuthStorage(path.join(agentDir, "auth.json"))
|
|
67
|
+
: new AuthStorage();
|
|
68
|
+
const modelRegistry = agentDir
|
|
69
|
+
? new ModelRegistry(authStorage, path.join(agentDir, "models.json"))
|
|
70
|
+
: new ModelRegistry(authStorage);
|
|
71
|
+
// Find Haiku model
|
|
72
|
+
const model = modelRegistry.find("anthropic", modelId);
|
|
73
|
+
if (!model) {
|
|
74
|
+
return { ok: false, error: new Error(`Filler model not found: anthropic/${modelId}`) };
|
|
75
|
+
}
|
|
76
|
+
// Resolve and set API key
|
|
77
|
+
const apiKeyInfo = await getApiKeyForModel({
|
|
78
|
+
model,
|
|
79
|
+
cfg,
|
|
80
|
+
agentDir,
|
|
81
|
+
});
|
|
82
|
+
const apiKey = requireApiKey(apiKeyInfo, model.provider);
|
|
83
|
+
authStorage.setRuntimeApiKey(model.provider, apiKey);
|
|
84
|
+
// Truncate thinking if too long (keep last 400 chars for context)
|
|
85
|
+
const truncatedThinking = thinking
|
|
86
|
+
? thinking.length > 400
|
|
87
|
+
? `...${thinking.slice(-400)}`
|
|
88
|
+
: thinking
|
|
89
|
+
: "";
|
|
90
|
+
// Build the prompt with both user message and thinking
|
|
91
|
+
let prompt = `User's message: "${userMessage}"`;
|
|
92
|
+
if (truncatedThinking) {
|
|
93
|
+
prompt += `\n\nAssistant's thinking so far:\n---\n${truncatedThinking}\n---`;
|
|
94
|
+
}
|
|
95
|
+
prompt += `\n\nGenerate a brief filler (max ${maxWords} words) that fits what the assistant is doing:`;
|
|
96
|
+
// console.log(`[filler] === FILLER GENERATION ===`);
|
|
97
|
+
// console.log(`[filler] User message: "${userMessage}"`);
|
|
98
|
+
// console.log(
|
|
99
|
+
// `[filler] Thinking (${truncatedThinking.length} chars): "${truncatedThinking.slice(0, 100)}..."`,
|
|
100
|
+
// );
|
|
101
|
+
// Build context with system prompt and user message
|
|
102
|
+
const context = {
|
|
103
|
+
systemPrompt: FILLER_SYSTEM_PROMPT,
|
|
104
|
+
messages: [
|
|
105
|
+
{
|
|
106
|
+
role: "user",
|
|
107
|
+
content: prompt,
|
|
108
|
+
timestamp: Date.now(),
|
|
109
|
+
},
|
|
110
|
+
],
|
|
111
|
+
};
|
|
112
|
+
// Create timeout promise
|
|
113
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
114
|
+
const timer = setTimeout(() => reject(new Error("Filler generation timeout")), timeoutMs);
|
|
115
|
+
abortSignal?.addEventListener("abort", () => {
|
|
116
|
+
clearTimeout(timer);
|
|
117
|
+
reject(new Error("Filler generation aborted"));
|
|
118
|
+
});
|
|
119
|
+
});
|
|
120
|
+
// Race completion against timeout
|
|
121
|
+
const message = (await Promise.race([
|
|
122
|
+
complete(model, context, {
|
|
123
|
+
apiKey,
|
|
124
|
+
maxTokens: 50,
|
|
125
|
+
temperature: 0.9, // Higher temperature for variety
|
|
126
|
+
}),
|
|
127
|
+
timeoutPromise,
|
|
128
|
+
]));
|
|
129
|
+
// Extract text from response
|
|
130
|
+
const content = message.content?.[0];
|
|
131
|
+
if (!content || content.type !== "text" || !content.text) {
|
|
132
|
+
return { ok: false, skip: true };
|
|
133
|
+
}
|
|
134
|
+
const text = content.text.trim();
|
|
135
|
+
// console.log(`[filler] Generated output: "${text}"`);
|
|
136
|
+
// Check for SKIP token
|
|
137
|
+
if (text.toUpperCase() === FILLER_SKIP_TOKEN) {
|
|
138
|
+
// logVerbose(`[filler] Skipping (model returned SKIP)`);
|
|
139
|
+
return { ok: false, skip: true };
|
|
140
|
+
}
|
|
141
|
+
// Don't truncate - mid-sentence cuts look bad. The prompt already
|
|
142
|
+
// instructs Haiku to keep it brief (2-6 words).
|
|
143
|
+
return { ok: true, text };
|
|
144
|
+
}
|
|
145
|
+
catch (err) {
|
|
146
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
147
|
+
// logVerbose(`Filler generation failed: ${error.message}`);
|
|
148
|
+
return { ok: false, error };
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* Generate a tool progress message using Haiku.
|
|
153
|
+
*/
|
|
154
|
+
export async function generateToolProgress(params) {
|
|
155
|
+
const { toolName, args, model: modelId, timeoutMs = 2000, cfg, agentDir } = params;
|
|
156
|
+
try {
|
|
157
|
+
// Set up model discovery with auth
|
|
158
|
+
await ensureTaskmasterModelsJson(cfg, agentDir);
|
|
159
|
+
const authStorage = agentDir
|
|
160
|
+
? new AuthStorage(path.join(agentDir, "auth.json"))
|
|
161
|
+
: new AuthStorage();
|
|
162
|
+
const modelRegistry = agentDir
|
|
163
|
+
? new ModelRegistry(authStorage, path.join(agentDir, "models.json"))
|
|
164
|
+
: new ModelRegistry(authStorage);
|
|
165
|
+
// Find Haiku model
|
|
166
|
+
const model = modelRegistry.find("anthropic", modelId);
|
|
167
|
+
if (!model) {
|
|
168
|
+
return { ok: false, error: new Error(`Filler model not found: anthropic/${modelId}`) };
|
|
169
|
+
}
|
|
170
|
+
// Resolve and set API key
|
|
171
|
+
const apiKeyInfo = await getApiKeyForModel({
|
|
172
|
+
model,
|
|
173
|
+
cfg,
|
|
174
|
+
agentDir,
|
|
175
|
+
});
|
|
176
|
+
const apiKey = requireApiKey(apiKeyInfo, model.provider);
|
|
177
|
+
authStorage.setRuntimeApiKey(model.provider, apiKey);
|
|
178
|
+
// Build prompt with tool info
|
|
179
|
+
let prompt = `Tool: ${toolName}`;
|
|
180
|
+
if (args && Object.keys(args).length > 0) {
|
|
181
|
+
// Include relevant args, truncating long values
|
|
182
|
+
const relevantArgs = {};
|
|
183
|
+
for (const [key, value] of Object.entries(args)) {
|
|
184
|
+
if (typeof value === "string" && value.length > 100) {
|
|
185
|
+
relevantArgs[key] = value.slice(0, 100) + "...";
|
|
186
|
+
}
|
|
187
|
+
else if (typeof value === "string" ||
|
|
188
|
+
typeof value === "number" ||
|
|
189
|
+
typeof value === "boolean") {
|
|
190
|
+
relevantArgs[key] = value;
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
if (Object.keys(relevantArgs).length > 0) {
|
|
194
|
+
prompt += `\nArguments: ${JSON.stringify(relevantArgs)}`;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
prompt += "\n\nGenerate a brief progress update:";
|
|
198
|
+
// console.log(`[filler] === TOOL PROGRESS GENERATION ===`);
|
|
199
|
+
// console.log(`[filler] Tool: ${toolName}, args: ${JSON.stringify(args ?? {}).slice(0, 100)}`);
|
|
200
|
+
const context = {
|
|
201
|
+
systemPrompt: TOOL_PROGRESS_SYSTEM_PROMPT,
|
|
202
|
+
messages: [
|
|
203
|
+
{
|
|
204
|
+
role: "user",
|
|
205
|
+
content: prompt,
|
|
206
|
+
timestamp: Date.now(),
|
|
207
|
+
},
|
|
208
|
+
],
|
|
209
|
+
};
|
|
210
|
+
// Create timeout promise
|
|
211
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
212
|
+
setTimeout(() => reject(new Error("Tool progress generation timeout")), timeoutMs);
|
|
213
|
+
});
|
|
214
|
+
// Race completion against timeout
|
|
215
|
+
const message = (await Promise.race([
|
|
216
|
+
complete(model, context, {
|
|
217
|
+
apiKey,
|
|
218
|
+
maxTokens: 30,
|
|
219
|
+
temperature: 0.7,
|
|
220
|
+
}),
|
|
221
|
+
timeoutPromise,
|
|
222
|
+
]));
|
|
223
|
+
// Extract text from response
|
|
224
|
+
const content = message.content?.[0];
|
|
225
|
+
if (!content || content.type !== "text" || !content.text) {
|
|
226
|
+
return { ok: false, skip: true };
|
|
227
|
+
}
|
|
228
|
+
const text = content.text.trim();
|
|
229
|
+
// console.log(`[filler] Tool progress: "${text}"`);
|
|
230
|
+
return { ok: true, text };
|
|
231
|
+
}
|
|
232
|
+
catch (err) {
|
|
233
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
234
|
+
console.warn(`[filler] Tool progress generation failed: ${error.message}`);
|
|
235
|
+
return { ok: false, error };
|
|
236
|
+
}
|
|
237
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Filler message system - reduces perceived latency by emitting brief
|
|
3
|
+
* acknowledgments while the main AI response generates.
|
|
4
|
+
*/
|
|
5
|
+
export * from "./types.js";
|
|
6
|
+
export { FILLER_DEFAULTS, resolveFillerConfig, resolveFillerEnabled, isFillerEnabledForChannel, } from "./config.js";
|
|
7
|
+
export { createFillerTrigger } from "./trigger.js";
|
|
8
|
+
export { generateFiller, FILLER_SKIP_TOKEN } from "./generator.js";
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Filler trigger: generates and delivers a brief acknowledgment message
|
|
3
|
+
* while the main AI response is being generated.
|
|
4
|
+
*
|
|
5
|
+
* Waits briefly for initial thinking to accumulate, then generates a
|
|
6
|
+
* context-aware filler using both the user message and model's thinking.
|
|
7
|
+
*
|
|
8
|
+
* Also sends progress updates during tool calls (throttled).
|
|
9
|
+
*/
|
|
10
|
+
import { isFillerEnabledForChannel, resolveFillerConfig } from "./config.js";
|
|
11
|
+
import { generateFiller, generateToolProgress } from "./generator.js";
|
|
12
|
+
/** How long to wait for thinking before generating filler (ms) */
|
|
13
|
+
const THINKING_WAIT_MS = 350;
|
|
14
|
+
/** Minimum time between tool progress updates (ms) */
|
|
15
|
+
const TOOL_PROGRESS_THROTTLE_MS = 8000;
|
|
16
|
+
/**
|
|
17
|
+
* Check if a tool should trigger a progress update.
|
|
18
|
+
* Messaging tools are skipped since they're the final output.
|
|
19
|
+
*/
|
|
20
|
+
function shouldSkipToolProgress(toolName) {
|
|
21
|
+
const normalized = toolName.toLowerCase().trim();
|
|
22
|
+
return (normalized === "message" ||
|
|
23
|
+
normalized === "send" ||
|
|
24
|
+
normalized === "send_message" ||
|
|
25
|
+
normalized === "sessions_send");
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Create a filler trigger for an agent run.
|
|
29
|
+
*
|
|
30
|
+
* Returns null if filler is disabled or not enabled for this channel.
|
|
31
|
+
* Waits briefly for thinking to accumulate before generating filler.
|
|
32
|
+
*/
|
|
33
|
+
export function createFillerTrigger(params) {
|
|
34
|
+
const config = resolveFillerConfig(params.cfg);
|
|
35
|
+
// Check if filler is enabled for this channel (respects session and per-agent config)
|
|
36
|
+
if (!isFillerEnabledForChannel({
|
|
37
|
+
cfg: params.cfg,
|
|
38
|
+
agentId: params.agentId,
|
|
39
|
+
channel: params.channel,
|
|
40
|
+
sessionFillerEnabled: params.sessionFillerEnabled,
|
|
41
|
+
})) {
|
|
42
|
+
return null;
|
|
43
|
+
}
|
|
44
|
+
// console.log(`[filler] waiting for thinking (${THINKING_WAIT_MS}ms) for run ${params.runId}`);
|
|
45
|
+
let cancelled = false;
|
|
46
|
+
let fillerSent = false;
|
|
47
|
+
const abortController = new AbortController();
|
|
48
|
+
const thinkingChunks = [];
|
|
49
|
+
let generatePromise = null;
|
|
50
|
+
let lastProgressAt = 0;
|
|
51
|
+
// Collect thinking chunks
|
|
52
|
+
const feedThinking = (text) => {
|
|
53
|
+
if (cancelled || fillerSent || !text)
|
|
54
|
+
return;
|
|
55
|
+
thinkingChunks.push(text);
|
|
56
|
+
};
|
|
57
|
+
// Send progress update for tool calls (throttled, uses Haiku to generate)
|
|
58
|
+
const feedToolCall = (toolName, args) => {
|
|
59
|
+
if (cancelled)
|
|
60
|
+
return;
|
|
61
|
+
// Skip messaging tools (they're the final output)
|
|
62
|
+
if (shouldSkipToolProgress(toolName)) {
|
|
63
|
+
// console.log(`[filler] skipping progress for tool ${toolName}`);
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
// Throttle progress updates
|
|
67
|
+
const now = Date.now();
|
|
68
|
+
if (now - lastProgressAt < TOOL_PROGRESS_THROTTLE_MS) {
|
|
69
|
+
// console.log(`[filler] throttled tool progress for ${toolName}`);
|
|
70
|
+
return;
|
|
71
|
+
}
|
|
72
|
+
lastProgressAt = now;
|
|
73
|
+
// console.log(`[filler] generating tool progress for ${toolName}`);
|
|
74
|
+
// Generate and send progress update via Haiku
|
|
75
|
+
void generateToolProgress({
|
|
76
|
+
toolName,
|
|
77
|
+
args,
|
|
78
|
+
model: config.model,
|
|
79
|
+
timeoutMs: 2000,
|
|
80
|
+
cfg: params.cfg,
|
|
81
|
+
agentDir: params.agentDir,
|
|
82
|
+
})
|
|
83
|
+
.then((result) => {
|
|
84
|
+
if (cancelled) {
|
|
85
|
+
// console.log(`[filler] cancelled, discarding tool progress`);
|
|
86
|
+
return;
|
|
87
|
+
}
|
|
88
|
+
if (result.ok) {
|
|
89
|
+
// console.log(`[filler] sending tool progress: "${result.text}" (${toolName})`);
|
|
90
|
+
return params.onFiller(result.text);
|
|
91
|
+
}
|
|
92
|
+
})
|
|
93
|
+
.catch((err) => {
|
|
94
|
+
console.warn(`[filler] tool progress failed: ${err instanceof Error ? err.message : String(err)}`);
|
|
95
|
+
});
|
|
96
|
+
};
|
|
97
|
+
// Start generation after brief delay to collect thinking
|
|
98
|
+
const thinkingTimer = setTimeout(() => {
|
|
99
|
+
if (cancelled)
|
|
100
|
+
return;
|
|
101
|
+
generatePromise = startGeneration();
|
|
102
|
+
}, THINKING_WAIT_MS);
|
|
103
|
+
async function startGeneration() {
|
|
104
|
+
if (cancelled)
|
|
105
|
+
return;
|
|
106
|
+
const thinking = thinkingChunks.join("").trim();
|
|
107
|
+
// console.log(`[filler] generating with ${thinking.length} chars of thinking`);
|
|
108
|
+
try {
|
|
109
|
+
const result = await generateFiller({
|
|
110
|
+
userMessage: params.userMessage || "",
|
|
111
|
+
thinking: thinking || undefined,
|
|
112
|
+
model: config.model,
|
|
113
|
+
maxWords: config.maxWords,
|
|
114
|
+
timeoutMs: config.maxWaitMs,
|
|
115
|
+
abortSignal: abortController.signal,
|
|
116
|
+
cfg: params.cfg,
|
|
117
|
+
agentDir: params.agentDir,
|
|
118
|
+
});
|
|
119
|
+
if (cancelled) {
|
|
120
|
+
// console.log(`[filler] cancelled during generation, discarding`);
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
if (result.ok) {
|
|
124
|
+
fillerSent = true;
|
|
125
|
+
// console.log(`[filler] delivering: "${result.text}"`);
|
|
126
|
+
await params.onFiller(result.text);
|
|
127
|
+
}
|
|
128
|
+
else if ("skip" in result && result.skip) {
|
|
129
|
+
// console.log(`[filler] model returned SKIP`);
|
|
130
|
+
}
|
|
131
|
+
else if ("error" in result) {
|
|
132
|
+
console.warn(`[filler] generation error: ${result.error.message}`);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
catch (err) {
|
|
136
|
+
// Fail silently - filler is non-critical
|
|
137
|
+
console.warn(`[filler] error: ${err instanceof Error ? err.message : String(err)}`);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
const cancel = () => {
|
|
141
|
+
if (cancelled)
|
|
142
|
+
return;
|
|
143
|
+
cancelled = true;
|
|
144
|
+
clearTimeout(thinkingTimer);
|
|
145
|
+
abortController.abort();
|
|
146
|
+
console.warn(`[filler] cancelled for run ${params.runId}`);
|
|
147
|
+
};
|
|
148
|
+
const flush = async () => {
|
|
149
|
+
clearTimeout(thinkingTimer);
|
|
150
|
+
if (!generatePromise && !cancelled && !fillerSent) {
|
|
151
|
+
generatePromise = startGeneration();
|
|
152
|
+
}
|
|
153
|
+
if (generatePromise) {
|
|
154
|
+
await generatePromise;
|
|
155
|
+
}
|
|
156
|
+
};
|
|
157
|
+
return {
|
|
158
|
+
feedThinking,
|
|
159
|
+
feedToolCall,
|
|
160
|
+
cancel,
|
|
161
|
+
flush,
|
|
162
|
+
};
|
|
163
|
+
}
|
|
@@ -15,7 +15,7 @@ async function findPackageRoot(startDir, maxDepth = 12) {
|
|
|
15
15
|
let current = path.resolve(startDir);
|
|
16
16
|
for (let i = 0; i < maxDepth; i += 1) {
|
|
17
17
|
const name = await readPackageName(current);
|
|
18
|
-
if (name === "taskmaster")
|
|
18
|
+
if (name === "taskmaster" || name === "@rubytech/taskmaster")
|
|
19
19
|
return current;
|
|
20
20
|
const parent = path.dirname(current);
|
|
21
21
|
if (parent === current)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import crypto from "node:crypto";
|
|
2
|
+
import { execFileSync } from "node:child_process";
|
|
3
|
+
import fs from "node:fs";
|
|
4
|
+
import os from "node:os";
|
|
5
|
+
const SALT = "taskmaster-device-v1";
|
|
6
|
+
const PREFIX = "tm_dev_";
|
|
7
|
+
let cachedDeviceId = null;
|
|
8
|
+
function hashWithSalt(input) {
|
|
9
|
+
return crypto.createHash("sha256").update(`${SALT}:${input}`).digest("hex");
|
|
10
|
+
}
|
|
11
|
+
function getMacSerial() {
|
|
12
|
+
try {
|
|
13
|
+
const output = execFileSync("ioreg", ["-rd1", "-c", "IOPlatformExpertDevice"], {
|
|
14
|
+
encoding: "utf8",
|
|
15
|
+
timeout: 5000,
|
|
16
|
+
});
|
|
17
|
+
const match = output.match(/"IOPlatformSerialNumber"\s*=\s*"([^"]+)"/);
|
|
18
|
+
return match?.[1] ?? null;
|
|
19
|
+
}
|
|
20
|
+
catch {
|
|
21
|
+
return null;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
function getLinuxSerial() {
|
|
25
|
+
try {
|
|
26
|
+
const cpuinfo = fs.readFileSync("/proc/cpuinfo", "utf8");
|
|
27
|
+
const match = cpuinfo.match(/^Serial\s*:\s*(\S+)/m);
|
|
28
|
+
return match?.[1] ?? null;
|
|
29
|
+
}
|
|
30
|
+
catch {
|
|
31
|
+
return null;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
function getFallbackId() {
|
|
35
|
+
const cpuModel = os.cpus()[0]?.model ?? "unknown";
|
|
36
|
+
return `${os.hostname()}:${os.platform()}:${os.arch()}:${cpuModel}`;
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Generate a stable hardware fingerprint for this device.
|
|
40
|
+
* - macOS: SHA-256 of IOPlatformSerialNumber
|
|
41
|
+
* - Linux (Pi): SHA-256 of /proc/cpuinfo Serial
|
|
42
|
+
* - Fallback: SHA-256 of hostname + platform + arch + cpu model
|
|
43
|
+
*
|
|
44
|
+
* Result is cached in memory (won't change during runtime).
|
|
45
|
+
*/
|
|
46
|
+
export function getDeviceId() {
|
|
47
|
+
if (cachedDeviceId)
|
|
48
|
+
return cachedDeviceId;
|
|
49
|
+
let raw = null;
|
|
50
|
+
if (os.platform() === "darwin") {
|
|
51
|
+
raw = getMacSerial();
|
|
52
|
+
}
|
|
53
|
+
else if (os.platform() === "linux") {
|
|
54
|
+
raw = getLinuxSerial();
|
|
55
|
+
}
|
|
56
|
+
if (!raw) {
|
|
57
|
+
raw = getFallbackId();
|
|
58
|
+
}
|
|
59
|
+
cachedDeviceId = `${PREFIX}${hashWithSalt(raw)}`;
|
|
60
|
+
return cachedDeviceId;
|
|
61
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import crypto from "node:crypto";
|
|
2
|
+
/**
|
|
3
|
+
* Ed25519 public key for verifying license tokens.
|
|
4
|
+
*
|
|
5
|
+
* The corresponding private key is kept secret and used to sign
|
|
6
|
+
* tokens when a license is issued (via our Taskmaster WhatsApp agent
|
|
7
|
+
* or manually).
|
|
8
|
+
*
|
|
9
|
+
* Token format: TM1-<base64url(payload)>.<base64url(signature)>
|
|
10
|
+
* Payload JSON: { did, tier, exp, cid, iat }
|
|
11
|
+
* Note: `tier` is always "standard" — no tier differentiation exists.
|
|
12
|
+
*/
|
|
13
|
+
const PUBLIC_KEY_PEM = `-----BEGIN PUBLIC KEY-----
|
|
14
|
+
MCowBQYDK2VwAyEA/t/C4A4I0rDlj5rEqv6Hy6VdHJr7WiJHWUxgwGz9HcM=
|
|
15
|
+
-----END PUBLIC KEY-----`;
|
|
16
|
+
const publicKey = crypto.createPublicKey(PUBLIC_KEY_PEM);
|
|
17
|
+
/** Token version prefix. */
|
|
18
|
+
const TOKEN_PREFIX = "TM1-";
|
|
19
|
+
/**
|
|
20
|
+
* Verify a license token's cryptographic signature and decode its payload.
|
|
21
|
+
* Does NOT check device binding or expiry — caller handles that.
|
|
22
|
+
*/
|
|
23
|
+
export function verifyLicenseToken(token) {
|
|
24
|
+
if (!token.startsWith(TOKEN_PREFIX)) {
|
|
25
|
+
return { valid: false, message: "Invalid token format" };
|
|
26
|
+
}
|
|
27
|
+
const body = token.slice(TOKEN_PREFIX.length);
|
|
28
|
+
const dotIndex = body.indexOf(".");
|
|
29
|
+
if (dotIndex === -1) {
|
|
30
|
+
return { valid: false, message: "Invalid token format" };
|
|
31
|
+
}
|
|
32
|
+
const payloadB64 = body.slice(0, dotIndex);
|
|
33
|
+
const signatureB64 = body.slice(dotIndex + 1);
|
|
34
|
+
if (!payloadB64 || !signatureB64) {
|
|
35
|
+
return { valid: false, message: "Invalid token format" };
|
|
36
|
+
}
|
|
37
|
+
// Verify signature
|
|
38
|
+
let signatureValid;
|
|
39
|
+
try {
|
|
40
|
+
const signature = Buffer.from(signatureB64, "base64url");
|
|
41
|
+
signatureValid = crypto.verify(null, Buffer.from(payloadB64), publicKey, signature);
|
|
42
|
+
}
|
|
43
|
+
catch {
|
|
44
|
+
return { valid: false, message: "Signature verification failed" };
|
|
45
|
+
}
|
|
46
|
+
if (!signatureValid) {
|
|
47
|
+
return { valid: false, message: "Invalid license key" };
|
|
48
|
+
}
|
|
49
|
+
// Decode payload
|
|
50
|
+
try {
|
|
51
|
+
const payloadJson = Buffer.from(payloadB64, "base64url").toString("utf8");
|
|
52
|
+
const payload = JSON.parse(payloadJson);
|
|
53
|
+
if (!payload.did || !payload.tier || !payload.exp || !payload.cid || !payload.iat) {
|
|
54
|
+
return { valid: false, message: "Incomplete license data" };
|
|
55
|
+
}
|
|
56
|
+
return { valid: true, payload };
|
|
57
|
+
}
|
|
58
|
+
catch {
|
|
59
|
+
return { valid: false, message: "Invalid license data" };
|
|
60
|
+
}
|
|
61
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import { loadConfig, writeConfigFile } from "../config/config.js";
|
|
2
|
+
import { getDeviceId } from "./device-id.js";
|
|
3
|
+
import { validateLicenseKey } from "./validate.js";
|
|
4
|
+
/** Check every 24 hours during gateway uptime. */
|
|
5
|
+
const CHECK_INTERVAL_MS = 24 * 60 * 60 * 1000;
|
|
6
|
+
let revalidationTimer = null;
|
|
7
|
+
function revalidate(log, onChange) {
|
|
8
|
+
const config = loadConfig();
|
|
9
|
+
const lic = config.license;
|
|
10
|
+
// Nothing to revalidate if there's no stored key
|
|
11
|
+
if (!lic?.key || !lic.validatedAt)
|
|
12
|
+
return;
|
|
13
|
+
// Re-verify the stored token locally (signature + device + expiry)
|
|
14
|
+
const deviceId = getDeviceId();
|
|
15
|
+
const result = validateLicenseKey(lic.key, deviceId);
|
|
16
|
+
if (result.valid) {
|
|
17
|
+
log.info("license still valid");
|
|
18
|
+
return;
|
|
19
|
+
}
|
|
20
|
+
// Token is no longer valid (expired, wrong device, bad signature)
|
|
21
|
+
log.warn(`license revoked: ${result.message}`);
|
|
22
|
+
try {
|
|
23
|
+
void writeConfigFile({
|
|
24
|
+
...config,
|
|
25
|
+
license: {
|
|
26
|
+
key: lic.key,
|
|
27
|
+
deviceId: lic.deviceId,
|
|
28
|
+
// Clear validation fields to force re-activation
|
|
29
|
+
},
|
|
30
|
+
});
|
|
31
|
+
}
|
|
32
|
+
catch (err) {
|
|
33
|
+
log.warn(`failed to clear revoked license: ${String(err)}`);
|
|
34
|
+
}
|
|
35
|
+
onChange(false);
|
|
36
|
+
}
|
|
37
|
+
export function startLicenseRevalidation(log, onChange) {
|
|
38
|
+
stopLicenseRevalidation();
|
|
39
|
+
revalidationTimer = setInterval(() => {
|
|
40
|
+
revalidate(log, onChange);
|
|
41
|
+
}, CHECK_INTERVAL_MS);
|
|
42
|
+
// Don't prevent process exit
|
|
43
|
+
if (revalidationTimer && typeof revalidationTimer === "object" && "unref" in revalidationTimer) {
|
|
44
|
+
revalidationTimer.unref();
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
export function stopLicenseRevalidation() {
|
|
48
|
+
if (revalidationTimer) {
|
|
49
|
+
clearInterval(revalidationTimer);
|
|
50
|
+
revalidationTimer = null;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* In-memory license state flag.
|
|
3
|
+
* Updated at startup and when the license is activated or revoked.
|
|
4
|
+
* Defaults to true in test environments so tests don't need to mock licensing.
|
|
5
|
+
*/
|
|
6
|
+
let licensed = process.env.VITEST === "true";
|
|
7
|
+
export function isLicensed() {
|
|
8
|
+
return licensed;
|
|
9
|
+
}
|
|
10
|
+
export function setLicensed(value) {
|
|
11
|
+
licensed = value;
|
|
12
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { verifyLicenseToken } from "./keys.js";
|
|
2
|
+
/** Max age for a stored validation before we require re-checking (30 days). */
|
|
3
|
+
const MAX_VALIDATION_AGE_MS = 30 * 24 * 60 * 60 * 1000;
|
|
4
|
+
/**
|
|
5
|
+
* Validate a license token locally using Ed25519 signature verification.
|
|
6
|
+
* Checks: signature valid, device ID matches, not expired.
|
|
7
|
+
*/
|
|
8
|
+
export function validateLicenseKey(key, deviceId) {
|
|
9
|
+
const result = verifyLicenseToken(key);
|
|
10
|
+
if (!result.valid) {
|
|
11
|
+
return { valid: false, message: result.message };
|
|
12
|
+
}
|
|
13
|
+
const { payload } = result;
|
|
14
|
+
// Check device binding ("*" = any device, used for dev/master keys)
|
|
15
|
+
if (payload.did !== "*" && payload.did !== deviceId) {
|
|
16
|
+
return {
|
|
17
|
+
valid: false,
|
|
18
|
+
message: "This license key is bound to a different device",
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
// Check expiry
|
|
22
|
+
const expiresAt = new Date(payload.exp).getTime();
|
|
23
|
+
if (Number.isNaN(expiresAt) || Date.now() > expiresAt) {
|
|
24
|
+
return { valid: false, message: "License has expired" };
|
|
25
|
+
}
|
|
26
|
+
return {
|
|
27
|
+
valid: true,
|
|
28
|
+
message: "License activated",
|
|
29
|
+
expiresAt: payload.exp,
|
|
30
|
+
tier: payload.tier,
|
|
31
|
+
customerId: payload.cid,
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Check whether the stored license in config is currently valid.
|
|
36
|
+
* Does NOT re-verify the signature — only checks local state.
|
|
37
|
+
*/
|
|
38
|
+
export function isLicenseValid(config) {
|
|
39
|
+
const lic = config.license;
|
|
40
|
+
if (!lic?.key)
|
|
41
|
+
return false;
|
|
42
|
+
if (!lic.validatedAt)
|
|
43
|
+
return false;
|
|
44
|
+
// Check validation age
|
|
45
|
+
const validatedAt = new Date(lic.validatedAt).getTime();
|
|
46
|
+
if (Number.isNaN(validatedAt))
|
|
47
|
+
return false;
|
|
48
|
+
if (Date.now() - validatedAt > MAX_VALIDATION_AGE_MS)
|
|
49
|
+
return false;
|
|
50
|
+
// Check expiry (absent expiresAt = perpetual)
|
|
51
|
+
if (lic.expiresAt) {
|
|
52
|
+
const expiresAt = new Date(lic.expiresAt).getTime();
|
|
53
|
+
if (Number.isNaN(expiresAt))
|
|
54
|
+
return false;
|
|
55
|
+
if (Date.now() > expiresAt)
|
|
56
|
+
return false;
|
|
57
|
+
}
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import os from "node:os";
|
|
4
|
+
const RECORDS_DIR = path.join(os.homedir(), ".taskmaster");
|
|
5
|
+
const RECORDS_PATH = path.join(RECORDS_DIR, "records.json");
|
|
6
|
+
function readFile() {
|
|
7
|
+
try {
|
|
8
|
+
const raw = fs.readFileSync(RECORDS_PATH, "utf-8");
|
|
9
|
+
const parsed = JSON.parse(raw);
|
|
10
|
+
if (parsed.version === 1 && typeof parsed.records === "object" && parsed.records !== null) {
|
|
11
|
+
return parsed;
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
catch {
|
|
15
|
+
// File doesn't exist or is invalid — return empty
|
|
16
|
+
}
|
|
17
|
+
return { version: 1, records: {} };
|
|
18
|
+
}
|
|
19
|
+
function writeFile(data) {
|
|
20
|
+
fs.mkdirSync(RECORDS_DIR, { recursive: true });
|
|
21
|
+
const tmp = RECORDS_PATH + ".tmp";
|
|
22
|
+
fs.writeFileSync(tmp, JSON.stringify(data, null, 2), "utf-8");
|
|
23
|
+
fs.renameSync(tmp, RECORDS_PATH);
|
|
24
|
+
}
|
|
25
|
+
export function listRecords(workspace) {
|
|
26
|
+
const data = readFile();
|
|
27
|
+
let records = Object.values(data.records);
|
|
28
|
+
if (workspace) {
|
|
29
|
+
// Strict workspace filter — only show records tagged for this workspace.
|
|
30
|
+
// Untagged records belong to the default workspace.
|
|
31
|
+
records = records.filter((r) => (r.workspace ?? "taskmaster") === workspace);
|
|
32
|
+
}
|
|
33
|
+
return records.sort((a, b) => a.name.localeCompare(b.name));
|
|
34
|
+
}
|
|
35
|
+
export function getRecord(id) {
|
|
36
|
+
const data = readFile();
|
|
37
|
+
return data.records[id] ?? null;
|
|
38
|
+
}
|
|
39
|
+
export function searchRecords(query, workspace) {
|
|
40
|
+
const q = query.toLowerCase();
|
|
41
|
+
const data = readFile();
|
|
42
|
+
let records = Object.values(data.records);
|
|
43
|
+
if (workspace) {
|
|
44
|
+
// Strict workspace filter — same as listRecords
|
|
45
|
+
records = records.filter((r) => (r.workspace ?? "taskmaster") === workspace);
|
|
46
|
+
}
|
|
47
|
+
return records.filter((r) => r.id.includes(q) || r.name.toLowerCase().includes(q));
|
|
48
|
+
}
|
|
49
|
+
export function setRecord(id, input) {
|
|
50
|
+
const data = readFile();
|
|
51
|
+
const now = new Date().toISOString();
|
|
52
|
+
const existing = data.records[id];
|
|
53
|
+
const record = {
|
|
54
|
+
id,
|
|
55
|
+
name: input.name,
|
|
56
|
+
workspace: input.workspace ?? existing?.workspace,
|
|
57
|
+
createdAt: existing?.createdAt ?? now,
|
|
58
|
+
updatedAt: now,
|
|
59
|
+
fields: input.fields,
|
|
60
|
+
};
|
|
61
|
+
data.records[id] = record;
|
|
62
|
+
writeFile(data);
|
|
63
|
+
return record;
|
|
64
|
+
}
|
|
65
|
+
export function deleteRecord(id) {
|
|
66
|
+
const data = readFile();
|
|
67
|
+
if (!data.records[id])
|
|
68
|
+
return false;
|
|
69
|
+
delete data.records[id];
|
|
70
|
+
writeFile(data);
|
|
71
|
+
return true;
|
|
72
|
+
}
|
|
73
|
+
export function setRecordField(id, key, value) {
|
|
74
|
+
const data = readFile();
|
|
75
|
+
const record = data.records[id];
|
|
76
|
+
if (!record)
|
|
77
|
+
return null;
|
|
78
|
+
record.fields[key] = value;
|
|
79
|
+
record.updatedAt = new Date().toISOString();
|
|
80
|
+
writeFile(data);
|
|
81
|
+
return record;
|
|
82
|
+
}
|
|
83
|
+
export function deleteRecordField(id, key) {
|
|
84
|
+
const data = readFile();
|
|
85
|
+
const record = data.records[id];
|
|
86
|
+
if (!record)
|
|
87
|
+
return null;
|
|
88
|
+
delete record.fields[key];
|
|
89
|
+
record.updatedAt = new Date().toISOString();
|
|
90
|
+
writeFile(data);
|
|
91
|
+
return record;
|
|
92
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@rubytech/taskmaster",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.7",
|
|
4
4
|
"description": "AI-powered business assistant for small businesses",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
@@ -72,7 +72,10 @@
|
|
|
72
72
|
"dist/markdown/**",
|
|
73
73
|
"dist/node-host/**",
|
|
74
74
|
"dist/pairing/**",
|
|
75
|
-
"dist/whatsapp/**"
|
|
75
|
+
"dist/whatsapp/**",
|
|
76
|
+
"dist/records/**",
|
|
77
|
+
"dist/filler/**",
|
|
78
|
+
"dist/license/**"
|
|
76
79
|
],
|
|
77
80
|
"scripts": {
|
|
78
81
|
"dev": "node scripts/run-node.mjs",
|
package/scripts/install.sh
CHANGED
|
@@ -118,8 +118,74 @@ if [ -n "$PORT" ]; then
|
|
|
118
118
|
PROVISION_ARGS="--port $PORT"
|
|
119
119
|
fi
|
|
120
120
|
|
|
121
|
-
|
|
122
|
-
|
|
121
|
+
REAL_USER="${SUDO_USER:-$(whoami)}"
|
|
122
|
+
MDNS_PORT="${PORT:-18789}"
|
|
123
|
+
|
|
124
|
+
if [ "$(id -u)" = "0" ] && [ "$REAL_USER" != "root" ]; then
|
|
125
|
+
# ── Running as root via sudo ──
|
|
126
|
+
# Platform setup needs root. Provision (config, workspace, daemon) needs
|
|
127
|
+
# to run as the real user so paths resolve to their home and systemctl
|
|
128
|
+
# --user has a D-Bus session.
|
|
129
|
+
|
|
130
|
+
if [ "$PLATFORM" = "linux" ]; then
|
|
131
|
+
echo "Platform setup..."
|
|
132
|
+
|
|
133
|
+
# Avahi / mDNS
|
|
134
|
+
apt-get install -y avahi-daemon avahi-utils >/dev/null 2>&1 \
|
|
135
|
+
&& echo " avahi-daemon installed" \
|
|
136
|
+
|| echo " avahi-daemon install failed (continuing)"
|
|
137
|
+
|
|
138
|
+
# Hostname
|
|
139
|
+
hostnamectl set-hostname taskmaster 2>/dev/null \
|
|
140
|
+
&& echo " hostname set to 'taskmaster'" \
|
|
141
|
+
|| echo " hostname set failed (continuing)"
|
|
142
|
+
|
|
143
|
+
# mDNS service file
|
|
144
|
+
mkdir -p /etc/avahi/services
|
|
145
|
+
cat > /etc/avahi/services/taskmaster.service << XMLEOF
|
|
146
|
+
<?xml version="1.0" standalone='no'?>
|
|
147
|
+
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
|
|
148
|
+
<service-group>
|
|
149
|
+
<name replace-wildcards="yes">Taskmaster on %h</name>
|
|
150
|
+
<service>
|
|
151
|
+
<type>_http._tcp</type>
|
|
152
|
+
<port>${MDNS_PORT}</port>
|
|
153
|
+
<txt-record>path=/</txt-record>
|
|
154
|
+
</service>
|
|
155
|
+
</service-group>
|
|
156
|
+
XMLEOF
|
|
157
|
+
systemctl restart avahi-daemon 2>/dev/null || true
|
|
158
|
+
echo " mDNS service registered on port $MDNS_PORT"
|
|
159
|
+
|
|
160
|
+
# Enable user services so systemctl --user works after logout
|
|
161
|
+
REAL_UID=$(id -u "$REAL_USER")
|
|
162
|
+
loginctl enable-linger "$REAL_USER" 2>/dev/null || true
|
|
163
|
+
systemctl start "user@${REAL_UID}.service" 2>/dev/null || true
|
|
164
|
+
|
|
165
|
+
# Wait for user D-Bus session bus
|
|
166
|
+
for _i in $(seq 1 10); do
|
|
167
|
+
[ -S "/run/user/$REAL_UID/bus" ] && break
|
|
168
|
+
sleep 0.5
|
|
169
|
+
done
|
|
170
|
+
fi
|
|
171
|
+
|
|
172
|
+
# Resolve real user's home
|
|
173
|
+
REAL_HOME=$(getent passwd "$REAL_USER" 2>/dev/null | cut -d: -f6)
|
|
174
|
+
[ -z "$REAL_HOME" ] && REAL_HOME="/home/$REAL_USER"
|
|
175
|
+
REAL_UID=$(id -u "$REAL_USER")
|
|
176
|
+
|
|
177
|
+
# Run provision as the real user
|
|
178
|
+
# shellcheck disable=SC2086
|
|
179
|
+
sudo -u "$REAL_USER" \
|
|
180
|
+
HOME="$REAL_HOME" \
|
|
181
|
+
XDG_RUNTIME_DIR="/run/user/$REAL_UID" \
|
|
182
|
+
DBUS_SESSION_BUS_ADDRESS="unix:path=/run/user/$REAL_UID/bus" \
|
|
183
|
+
taskmaster provision --skip-platform $PROVISION_ARGS
|
|
184
|
+
else
|
|
185
|
+
# Not root, or running as actual root — run provision normally
|
|
186
|
+
# shellcheck disable=SC2086
|
|
187
|
+
taskmaster provision $PROVISION_ARGS
|
|
188
|
+
fi
|
|
123
189
|
|
|
124
190
|
echo ""
|
|
125
191
|
echo "Installation complete."
|
|
@@ -1,3 +1,8 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: event-management
|
|
3
|
+
description: "Manage anything time-bound: appointments, meetings, reminders, follow-ups, callbacks, deadlines — any scheduled action between one or more parties."
|
|
4
|
+
---
|
|
5
|
+
|
|
1
6
|
# Event Management
|
|
2
7
|
|
|
3
8
|
Applies when handling anything time-bound: appointments, meetings, reminders, follow-ups, callbacks, deadlines — any commitment or scheduled action between one or more parties.
|
|
@@ -1,15 +1,9 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
</description>
|
|
1
|
+
---
|
|
2
|
+
name: taskmaster
|
|
3
|
+
description: "Handle enquiries about Taskmaster — the AI business assistant for small businesses. Product info, pricing, signup, licensing, and support."
|
|
4
|
+
---
|
|
6
5
|
|
|
7
|
-
|
|
8
|
-
- Questions about Taskmaster, the product, pricing, or signup
|
|
9
|
-
- "What is Taskmaster?", "How does it work?", "How much does it cost?"
|
|
10
|
-
- Interest in AI assistant for trades business
|
|
11
|
-
- Requests for demo, trial, or signup
|
|
12
|
-
</triggers>
|
|
6
|
+
# Taskmaster Product Skill
|
|
13
7
|
|
|
14
8
|
---
|
|
15
9
|
|