@agent-hive/agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config.d.ts +23 -0
- package/dist/config.js +59 -0
- package/dist/credentials.d.ts +17 -0
- package/dist/credentials.js +67 -0
- package/dist/dispatcher.d.ts +7 -0
- package/dist/dispatcher.js +258 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +3 -0
- package/dist/logger.d.ts +9 -0
- package/dist/logger.js +45 -0
- package/dist/setup.d.ts +4 -0
- package/dist/setup.js +160 -0
- package/dist/worker.d.ts +17 -0
- package/dist/worker.js +140 -0
- package/package.json +26 -0
- package/templates/CLAUDE.md +90 -0
- package/templates/MEMORY.md +23 -0
- package/templates/quality-checklist.md +101 -0
- package/templates/settings.local.json +33 -0
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
export declare const HIVE_DIR: string;
|
|
2
|
+
export declare const CREDENTIALS_FILE: string;
|
|
3
|
+
export declare const AGENT_CONFIG_FILE: string;
|
|
4
|
+
export declare const DEFAULT_WORKSPACE_DIR: string;
|
|
5
|
+
export interface AgentConfig {
|
|
6
|
+
workspace: string;
|
|
7
|
+
}
|
|
8
|
+
export declare function getAgentConfig(): AgentConfig | null;
|
|
9
|
+
export declare function saveAgentConfig(config: AgentConfig): void;
|
|
10
|
+
/**
|
|
11
|
+
* Resolve workspace directory. Priority:
|
|
12
|
+
* 1. Explicit override (from --workspace flag)
|
|
13
|
+
* 2. Saved in ~/.hive/agent.json
|
|
14
|
+
* 3. Default ~/.hive/workspace/
|
|
15
|
+
*/
|
|
16
|
+
export declare function getWorkspaceDir(override?: string): string;
|
|
17
|
+
export declare function getLogsDir(workspaceDir: string): string;
|
|
18
|
+
export declare const SKILLS_MANIFEST: string[];
|
|
19
|
+
export declare const DEFAULT_WORKER_BUDGET_USD = 2;
|
|
20
|
+
export declare const DEFAULT_WATCH_TIMEOUT_SECONDS = 300;
|
|
21
|
+
export declare const WORKER_TIMEOUT_MS: number;
|
|
22
|
+
export declare const CRASH_BACKOFF_MS = 5000;
|
|
23
|
+
export declare const MAX_CONSECUTIVE_CRASHES = 5;
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
|
|
2
|
+
import { homedir } from 'os';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
// ~/.hive/ — config and metadata only
|
|
5
|
+
export const HIVE_DIR = join(homedir(), '.hive');
|
|
6
|
+
export const CREDENTIALS_FILE = join(HIVE_DIR, 'credentials.json');
|
|
7
|
+
export const AGENT_CONFIG_FILE = join(HIVE_DIR, 'agent.json');
|
|
8
|
+
// Default workspace (used if no custom path configured)
|
|
9
|
+
export const DEFAULT_WORKSPACE_DIR = join(HIVE_DIR, 'workspace');
|
|
10
|
+
export function getAgentConfig() {
|
|
11
|
+
if (!existsSync(AGENT_CONFIG_FILE)) {
|
|
12
|
+
return null;
|
|
13
|
+
}
|
|
14
|
+
try {
|
|
15
|
+
return JSON.parse(readFileSync(AGENT_CONFIG_FILE, 'utf-8'));
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
return null;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
export function saveAgentConfig(config) {
|
|
22
|
+
if (!existsSync(HIVE_DIR)) {
|
|
23
|
+
mkdirSync(HIVE_DIR, { recursive: true });
|
|
24
|
+
}
|
|
25
|
+
writeFileSync(AGENT_CONFIG_FILE, JSON.stringify(config, null, 2));
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Resolve workspace directory. Priority:
|
|
29
|
+
* 1. Explicit override (from --workspace flag)
|
|
30
|
+
* 2. Saved in ~/.hive/agent.json
|
|
31
|
+
* 3. Default ~/.hive/workspace/
|
|
32
|
+
*/
|
|
33
|
+
export function getWorkspaceDir(override) {
|
|
34
|
+
if (override)
|
|
35
|
+
return override;
|
|
36
|
+
const config = getAgentConfig();
|
|
37
|
+
if (config?.workspace)
|
|
38
|
+
return config.workspace;
|
|
39
|
+
return DEFAULT_WORKSPACE_DIR;
|
|
40
|
+
}
|
|
41
|
+
export function getLogsDir(workspaceDir) {
|
|
42
|
+
return join(workspaceDir, 'logs');
|
|
43
|
+
}
|
|
44
|
+
// Skills manifest — installed via `npx skills add <source> -y`
|
|
45
|
+
export const SKILLS_MANIFEST = [
|
|
46
|
+
'anthropics/skills@pdf',
|
|
47
|
+
'anthropics/skills@docx',
|
|
48
|
+
'anthropics/skills@canvas-design',
|
|
49
|
+
'rknall/claude-skills@svg-logo-designer',
|
|
50
|
+
'anthropics/skills@frontend-design',
|
|
51
|
+
'wshobson/agents@visual-design-foundations',
|
|
52
|
+
];
|
|
53
|
+
// Worker defaults
|
|
54
|
+
export const DEFAULT_WORKER_BUDGET_USD = 2.00;
|
|
55
|
+
export const DEFAULT_WATCH_TIMEOUT_SECONDS = 300;
|
|
56
|
+
export const WORKER_TIMEOUT_MS = 10 * 60 * 1000; // 10 minutes
|
|
57
|
+
// Crash recovery
|
|
58
|
+
export const CRASH_BACKOFF_MS = 5000;
|
|
59
|
+
export const MAX_CONSECUTIVE_CRASHES = 5;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export interface Credentials {
|
|
2
|
+
api_key: string;
|
|
3
|
+
api_url?: string;
|
|
4
|
+
operator_id?: string;
|
|
5
|
+
}
|
|
6
|
+
export declare function getCredentials(): Credentials | null;
|
|
7
|
+
export declare function getApiUrl(): string;
|
|
8
|
+
export declare function validateCredentials(): Promise<{
|
|
9
|
+
valid: boolean;
|
|
10
|
+
stats?: Record<string, unknown>;
|
|
11
|
+
error?: string;
|
|
12
|
+
}>;
|
|
13
|
+
export declare function checkStripeStatus(): Promise<{
|
|
14
|
+
connected: boolean;
|
|
15
|
+
active: boolean;
|
|
16
|
+
error?: string;
|
|
17
|
+
}>;
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { readFileSync, existsSync } from 'fs';
|
|
2
|
+
import { CREDENTIALS_FILE } from './config.js';
|
|
3
|
+
export function getCredentials() {
|
|
4
|
+
if (!existsSync(CREDENTIALS_FILE)) {
|
|
5
|
+
return null;
|
|
6
|
+
}
|
|
7
|
+
try {
|
|
8
|
+
return JSON.parse(readFileSync(CREDENTIALS_FILE, 'utf-8'));
|
|
9
|
+
}
|
|
10
|
+
catch {
|
|
11
|
+
return null;
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
export function getApiUrl() {
|
|
15
|
+
if (process.env.HIVE_API_URL) {
|
|
16
|
+
return process.env.HIVE_API_URL;
|
|
17
|
+
}
|
|
18
|
+
const creds = getCredentials();
|
|
19
|
+
if (creds?.api_url) {
|
|
20
|
+
return creds.api_url;
|
|
21
|
+
}
|
|
22
|
+
return 'http://localhost:3001';
|
|
23
|
+
}
|
|
24
|
+
export async function validateCredentials() {
|
|
25
|
+
const creds = getCredentials();
|
|
26
|
+
if (!creds?.api_key) {
|
|
27
|
+
return { valid: false, error: 'No API key found' };
|
|
28
|
+
}
|
|
29
|
+
const apiUrl = getApiUrl();
|
|
30
|
+
try {
|
|
31
|
+
const res = await fetch(`${apiUrl}/operators/stats`, {
|
|
32
|
+
headers: { 'X-Hive-Api-Key': creds.api_key },
|
|
33
|
+
});
|
|
34
|
+
if (!res.ok) {
|
|
35
|
+
const data = await res.json();
|
|
36
|
+
return { valid: false, error: data.error || 'Invalid API key' };
|
|
37
|
+
}
|
|
38
|
+
const stats = await res.json();
|
|
39
|
+
return { valid: true, stats };
|
|
40
|
+
}
|
|
41
|
+
catch (err) {
|
|
42
|
+
return { valid: false, error: `Failed to connect to API at ${apiUrl}` };
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
export async function checkStripeStatus() {
|
|
46
|
+
const creds = getCredentials();
|
|
47
|
+
if (!creds?.api_key) {
|
|
48
|
+
return { connected: false, active: false, error: 'No API key found' };
|
|
49
|
+
}
|
|
50
|
+
const apiUrl = getApiUrl();
|
|
51
|
+
try {
|
|
52
|
+
const res = await fetch(`${apiUrl}/operators/stripe/status`, {
|
|
53
|
+
headers: { 'X-Hive-Api-Key': creds.api_key },
|
|
54
|
+
});
|
|
55
|
+
if (!res.ok) {
|
|
56
|
+
return { connected: false, active: false, error: 'Failed to check Stripe status' };
|
|
57
|
+
}
|
|
58
|
+
const data = await res.json();
|
|
59
|
+
return {
|
|
60
|
+
connected: !!data.connected,
|
|
61
|
+
active: !!(data.connected && data.onboarding_complete),
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
return { connected: false, active: false, error: 'Failed to connect to API' };
|
|
66
|
+
}
|
|
67
|
+
}
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
import { existsSync } from 'fs';
|
|
2
|
+
import chalk from 'chalk';
|
|
3
|
+
import { CREDENTIALS_FILE, DEFAULT_WATCH_TIMEOUT_SECONDS, DEFAULT_WORKER_BUDGET_USD, WORKER_TIMEOUT_MS, CRASH_BACKOFF_MS, MAX_CONSECUTIVE_CRASHES, getWorkspaceDir, getLogsDir, } from './config.js';
|
|
4
|
+
import { getCredentials, getApiUrl, validateCredentials } from './credentials.js';
|
|
5
|
+
import { setLogsDir, log } from './logger.js';
|
|
6
|
+
import { spawnWorker, buildWorkerPrompt } from './worker.js';
|
|
7
|
+
function sleep(ms) {
|
|
8
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Parse claimed task IDs from worker stdout.
|
|
12
|
+
* Worker uses --output-format stream-json, so claim responses are nested inside
|
|
13
|
+
* stream-json structures with escaped quotes (\" instead of ").
|
|
14
|
+
* We scan for the UUID pattern near "claimed" regardless of escaping.
|
|
15
|
+
*/
|
|
16
|
+
function parseClaimedTaskIds(output) {
|
|
17
|
+
const uuid = '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}';
|
|
18
|
+
// Match with optional backslash-escaping on quotes: \" or "
|
|
19
|
+
const q = `\\\\?"`; // matches either \" or "
|
|
20
|
+
const re = new RegExp(`${q}task_id${q}\\s*:\\s*${q}(${uuid})${q}[^}]{0,500}${q}claimed${q}\\s*:\\s*true`, 'gi');
|
|
21
|
+
const matches = output.matchAll(re);
|
|
22
|
+
const taskIds = Array.from(new Set(Array.from(matches, m => m[1])));
|
|
23
|
+
return taskIds;
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Call unclaim for each task ID. Best-effort — failures are logged but not fatal.
|
|
27
|
+
*/
|
|
28
|
+
async function unclaimTasks(taskIds, apiUrl, apiKey) {
|
|
29
|
+
for (const taskId of taskIds) {
|
|
30
|
+
try {
|
|
31
|
+
const res = await fetch(`${apiUrl}/tasks/${taskId}/unclaim`, {
|
|
32
|
+
method: 'POST',
|
|
33
|
+
headers: {
|
|
34
|
+
'X-Hive-Api-Key': apiKey,
|
|
35
|
+
'Content-Type': 'application/json',
|
|
36
|
+
},
|
|
37
|
+
});
|
|
38
|
+
if (res.ok) {
|
|
39
|
+
log({ event: 'info', message: `Unclaimed task ${taskId} after worker failure` });
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
const data = await res.json().catch(() => ({}));
|
|
43
|
+
log({
|
|
44
|
+
event: 'info',
|
|
45
|
+
message: `Unclaim task ${taskId} returned ${res.status}: ${data.error || ''}`,
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
catch (err) {
|
|
50
|
+
log({
|
|
51
|
+
event: 'error',
|
|
52
|
+
message: `Failed to unclaim task ${taskId}: ${err instanceof Error ? err.message : String(err)}`,
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
export async function startDispatcher(options = {}) {
|
|
58
|
+
const budgetPerRun = options.budgetPerRun ?? DEFAULT_WORKER_BUDGET_USD;
|
|
59
|
+
const workerTimeoutMs = options.workerTimeoutMs ?? WORKER_TIMEOUT_MS;
|
|
60
|
+
const model = options.model;
|
|
61
|
+
const workspaceDir = getWorkspaceDir(options.workspace);
|
|
62
|
+
const logsDir = getLogsDir(workspaceDir);
|
|
63
|
+
setLogsDir(logsDir);
|
|
64
|
+
console.log('');
|
|
65
|
+
console.log(chalk.bold('Hive Agent Dispatcher'));
|
|
66
|
+
console.log('');
|
|
67
|
+
// Pre-flight checks
|
|
68
|
+
if (!existsSync(CREDENTIALS_FILE)) {
|
|
69
|
+
console.error(chalk.red('No credentials found. Run: npx hive agent setup'));
|
|
70
|
+
process.exit(1);
|
|
71
|
+
}
|
|
72
|
+
if (!existsSync(workspaceDir)) {
|
|
73
|
+
console.error(chalk.red(`Workspace not found at ${workspaceDir}. Run: npx hive agent setup`));
|
|
74
|
+
process.exit(1);
|
|
75
|
+
}
|
|
76
|
+
console.log(' Validating credentials...');
|
|
77
|
+
const credResult = await validateCredentials();
|
|
78
|
+
if (!credResult.valid) {
|
|
79
|
+
console.error(chalk.red(` Credentials invalid: ${credResult.error}`));
|
|
80
|
+
process.exit(1);
|
|
81
|
+
}
|
|
82
|
+
console.log(chalk.green(' Credentials valid'));
|
|
83
|
+
console.log('');
|
|
84
|
+
console.log(` Workspace: ${workspaceDir}`);
|
|
85
|
+
console.log(` Budget per run: $${budgetPerRun.toFixed(2)}`);
|
|
86
|
+
console.log(` Worker timeout: ${Math.round(workerTimeoutMs / 1000)}s`);
|
|
87
|
+
if (model)
|
|
88
|
+
console.log(` Model: ${model}`);
|
|
89
|
+
console.log('');
|
|
90
|
+
log({
|
|
91
|
+
event: 'info',
|
|
92
|
+
message: 'Dispatcher starting',
|
|
93
|
+
data: { budgetPerRun, workerTimeoutMs, model, workspaceDir },
|
|
94
|
+
});
|
|
95
|
+
const creds = getCredentials();
|
|
96
|
+
const apiUrl = getApiUrl();
|
|
97
|
+
let running = true;
|
|
98
|
+
let consecutiveCrashes = 0;
|
|
99
|
+
const budgetSkipList = new Set(); // Task IDs that exceeded budget — don't retry
|
|
100
|
+
let currentWorkerOutput = ''; // Track worker output for unclaim on shutdown
|
|
101
|
+
// Graceful shutdown — unclaim any tasks the worker claimed before exiting
|
|
102
|
+
const shutdown = async () => {
|
|
103
|
+
if (!running)
|
|
104
|
+
return;
|
|
105
|
+
running = false;
|
|
106
|
+
log({ event: 'shutdown', message: 'Shutting down gracefully...' });
|
|
107
|
+
console.log('');
|
|
108
|
+
console.log(chalk.magenta('Shutting down gracefully...'));
|
|
109
|
+
// Best-effort unclaim from whatever output we've captured so far
|
|
110
|
+
if (currentWorkerOutput) {
|
|
111
|
+
const claimedIds = parseClaimedTaskIds(currentWorkerOutput);
|
|
112
|
+
if (claimedIds.length > 0) {
|
|
113
|
+
log({ event: 'info', message: `Shutdown — unclaiming ${claimedIds.length} task(s)` });
|
|
114
|
+
await unclaimTasks(claimedIds, apiUrl, creds.api_key);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
process.exit(0);
|
|
118
|
+
};
|
|
119
|
+
process.on('SIGINT', shutdown);
|
|
120
|
+
process.on('SIGTERM', shutdown);
|
|
121
|
+
// Main loop
|
|
122
|
+
while (running) {
|
|
123
|
+
log({
|
|
124
|
+
event: 'watch_start',
|
|
125
|
+
message: 'Watching for tasks...',
|
|
126
|
+
});
|
|
127
|
+
try {
|
|
128
|
+
// Long-poll for tasks
|
|
129
|
+
const res = await fetch(`${apiUrl}/tasks/watch?timeout=${DEFAULT_WATCH_TIMEOUT_SECONDS}`, {
|
|
130
|
+
headers: { 'X-Hive-Api-Key': creds.api_key },
|
|
131
|
+
signal: AbortSignal.timeout((DEFAULT_WATCH_TIMEOUT_SECONDS + 10) * 1000),
|
|
132
|
+
});
|
|
133
|
+
if (!running)
|
|
134
|
+
break;
|
|
135
|
+
if (!res.ok) {
|
|
136
|
+
const errorData = await res.json().catch(() => ({}));
|
|
137
|
+
log({
|
|
138
|
+
event: 'error',
|
|
139
|
+
message: `Watch request failed: ${res.status} ${errorData.error || ''}`,
|
|
140
|
+
});
|
|
141
|
+
await sleep(CRASH_BACKOFF_MS);
|
|
142
|
+
continue;
|
|
143
|
+
}
|
|
144
|
+
const data = await res.json();
|
|
145
|
+
// Check if we got tasks
|
|
146
|
+
const allTasks = data.tasks;
|
|
147
|
+
if (!allTasks || !Array.isArray(allTasks) || allTasks.length === 0) {
|
|
148
|
+
log({
|
|
149
|
+
event: 'watch_result',
|
|
150
|
+
message: 'No tasks available, continuing watch...',
|
|
151
|
+
});
|
|
152
|
+
continue;
|
|
153
|
+
}
|
|
154
|
+
// Filter out tasks that previously exceeded budget
|
|
155
|
+
const tasks = allTasks.filter(t => !budgetSkipList.has(String(t.task_id || t.id)));
|
|
156
|
+
if (tasks.length === 0) {
|
|
157
|
+
log({
|
|
158
|
+
event: 'watch_result',
|
|
159
|
+
message: `All ${allTasks.length} task(s) in budget skip list. Waiting ${DEFAULT_WATCH_TIMEOUT_SECONDS}s for new tasks...`,
|
|
160
|
+
});
|
|
161
|
+
await sleep(DEFAULT_WATCH_TIMEOUT_SECONDS * 1000);
|
|
162
|
+
continue;
|
|
163
|
+
}
|
|
164
|
+
log({
|
|
165
|
+
event: 'watch_result',
|
|
166
|
+
message: `Found ${tasks.length} available task(s)${budgetSkipList.size > 0 ? ` (${budgetSkipList.size} skipped)` : ''}`,
|
|
167
|
+
data: { task_count: tasks.length, skipped_count: budgetSkipList.size },
|
|
168
|
+
});
|
|
169
|
+
// Track task IDs offered to this worker (for budget skip list)
|
|
170
|
+
const offeredTaskIds = tasks.map(t => String(t.task_id || t.id));
|
|
171
|
+
// Build prompt and spawn worker
|
|
172
|
+
const tasksJson = JSON.stringify(tasks, null, 2);
|
|
173
|
+
const prompt = buildWorkerPrompt(tasksJson);
|
|
174
|
+
currentWorkerOutput = '';
|
|
175
|
+
const result = await spawnWorker(prompt, {
|
|
176
|
+
budgetPerRun,
|
|
177
|
+
model,
|
|
178
|
+
workerTimeoutMs,
|
|
179
|
+
workspaceDir,
|
|
180
|
+
onOutput: (chunk) => { currentWorkerOutput += chunk; },
|
|
181
|
+
});
|
|
182
|
+
if (!running)
|
|
183
|
+
break;
|
|
184
|
+
// On any failure, try to unclaim tasks the worker had claimed
|
|
185
|
+
if (result.exit_code !== 0 || result.budget_exceeded) {
|
|
186
|
+
const claimedIds = parseClaimedTaskIds(result.output);
|
|
187
|
+
if (claimedIds.length > 0) {
|
|
188
|
+
log({
|
|
189
|
+
event: 'info',
|
|
190
|
+
message: `Worker failed — unclaiming ${claimedIds.length} task(s): ${claimedIds.join(', ')}`,
|
|
191
|
+
});
|
|
192
|
+
await unclaimTasks(claimedIds, apiUrl, creds.api_key);
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
if (result.budget_exceeded) {
|
|
196
|
+
// Budget exceeded (any exit code) — skip these tasks, don't count as crash
|
|
197
|
+
for (const id of offeredTaskIds) {
|
|
198
|
+
budgetSkipList.add(id);
|
|
199
|
+
}
|
|
200
|
+
log({
|
|
201
|
+
event: 'error',
|
|
202
|
+
message: `Worker exceeded budget. Skipping ${offeredTaskIds.length} task(s) from this batch.`,
|
|
203
|
+
data: { skipped_task_ids: offeredTaskIds },
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
else if (result.exit_code === 0) {
|
|
207
|
+
// Success — reset crash counter
|
|
208
|
+
consecutiveCrashes = 0;
|
|
209
|
+
log({
|
|
210
|
+
event: 'info',
|
|
211
|
+
message: 'Worker completed successfully, continuing...',
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
else {
|
|
215
|
+
// General failure — increment crash counter, allow retry
|
|
216
|
+
consecutiveCrashes++;
|
|
217
|
+
log({
|
|
218
|
+
event: 'error',
|
|
219
|
+
message: `Worker failed (crash ${consecutiveCrashes}/${MAX_CONSECUTIVE_CRASHES})`,
|
|
220
|
+
data: { exit_code: result.exit_code, timed_out: result.timed_out },
|
|
221
|
+
});
|
|
222
|
+
if (consecutiveCrashes >= MAX_CONSECUTIVE_CRASHES) {
|
|
223
|
+
log({
|
|
224
|
+
event: 'shutdown',
|
|
225
|
+
message: `Stopping after ${MAX_CONSECUTIVE_CRASHES} consecutive crashes`,
|
|
226
|
+
});
|
|
227
|
+
console.error(chalk.red(`\nStopping: ${MAX_CONSECUTIVE_CRASHES} consecutive worker failures. Check logs in ${logsDir}`));
|
|
228
|
+
process.exit(1);
|
|
229
|
+
}
|
|
230
|
+
// Backoff before next attempt
|
|
231
|
+
await sleep(CRASH_BACKOFF_MS * consecutiveCrashes);
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
catch (err) {
|
|
235
|
+
if (!running)
|
|
236
|
+
break;
|
|
237
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
238
|
+
// AbortError from timeout is expected
|
|
239
|
+
if (message.includes('abort') || message.includes('timeout')) {
|
|
240
|
+
log({
|
|
241
|
+
event: 'watch_result',
|
|
242
|
+
message: 'Watch timed out, retrying...',
|
|
243
|
+
});
|
|
244
|
+
continue;
|
|
245
|
+
}
|
|
246
|
+
log({
|
|
247
|
+
event: 'error',
|
|
248
|
+
message: `Dispatcher error: ${message}`,
|
|
249
|
+
});
|
|
250
|
+
consecutiveCrashes++;
|
|
251
|
+
if (consecutiveCrashes >= MAX_CONSECUTIVE_CRASHES) {
|
|
252
|
+
console.error(chalk.red(`\nStopping: ${MAX_CONSECUTIVE_CRASHES} consecutive failures. Check logs.`));
|
|
253
|
+
process.exit(1);
|
|
254
|
+
}
|
|
255
|
+
await sleep(CRASH_BACKOFF_MS * consecutiveCrashes);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.js
ADDED
package/dist/logger.d.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
export type LogEvent = 'watch_start' | 'watch_result' | 'worker_start' | 'worker_end' | 'error' | 'shutdown' | 'setup' | 'info';
|
|
2
|
+
export interface RunLogEntry {
|
|
3
|
+
event: LogEvent;
|
|
4
|
+
message: string;
|
|
5
|
+
data?: Record<string, unknown>;
|
|
6
|
+
}
|
|
7
|
+
/** Set the logs directory. Must be called before log(). */
|
|
8
|
+
export declare function setLogsDir(dir: string): void;
|
|
9
|
+
export declare function log(entry: RunLogEntry): void;
|
package/dist/logger.js
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { mkdirSync, appendFileSync, existsSync } from 'fs';
|
|
2
|
+
import { join } from 'path';
|
|
3
|
+
import chalk from 'chalk';
|
|
4
|
+
let _logsDir = null;
|
|
5
|
+
/** Set the logs directory. Must be called before log(). */
|
|
6
|
+
export function setLogsDir(dir) {
|
|
7
|
+
_logsDir = dir;
|
|
8
|
+
}
|
|
9
|
+
function getLogFilePath() {
|
|
10
|
+
if (!_logsDir)
|
|
11
|
+
return null;
|
|
12
|
+
const date = new Date().toISOString().slice(0, 10); // YYYY-MM-DD
|
|
13
|
+
return join(_logsDir, `${date}.jsonl`);
|
|
14
|
+
}
|
|
15
|
+
function formatForStdout(entry) {
|
|
16
|
+
const timestamp = new Date().toISOString().slice(11, 19); // HH:MM:SS
|
|
17
|
+
const eventColors = {
|
|
18
|
+
watch_start: chalk.blue,
|
|
19
|
+
watch_result: chalk.green,
|
|
20
|
+
worker_start: chalk.yellow,
|
|
21
|
+
worker_end: chalk.green,
|
|
22
|
+
error: chalk.red,
|
|
23
|
+
shutdown: chalk.magenta,
|
|
24
|
+
setup: chalk.cyan,
|
|
25
|
+
info: chalk.white,
|
|
26
|
+
};
|
|
27
|
+
const colorize = eventColors[entry.event] || chalk.white;
|
|
28
|
+
return `${chalk.dim(timestamp)} ${colorize(`[${entry.event}]`)} ${entry.message}`;
|
|
29
|
+
}
|
|
30
|
+
export function log(entry) {
|
|
31
|
+
// Human-readable to stdout
|
|
32
|
+
console.log(formatForStdout(entry));
|
|
33
|
+
// Machine-readable to JSONL file (only if logs dir is configured)
|
|
34
|
+
const logFile = getLogFilePath();
|
|
35
|
+
if (!logFile)
|
|
36
|
+
return;
|
|
37
|
+
if (!existsSync(_logsDir)) {
|
|
38
|
+
mkdirSync(_logsDir, { recursive: true });
|
|
39
|
+
}
|
|
40
|
+
const logLine = JSON.stringify({
|
|
41
|
+
timestamp: new Date().toISOString(),
|
|
42
|
+
...entry,
|
|
43
|
+
});
|
|
44
|
+
appendFileSync(logFile, logLine + '\n');
|
|
45
|
+
}
|
package/dist/setup.d.ts
ADDED
package/dist/setup.js
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import { existsSync, mkdirSync, copyFileSync, writeFileSync } from 'fs';
|
|
2
|
+
import { join, dirname } from 'path';
|
|
3
|
+
import { execSync } from 'child_process';
|
|
4
|
+
import { fileURLToPath } from 'url';
|
|
5
|
+
import chalk from 'chalk';
|
|
6
|
+
import { CREDENTIALS_FILE, SKILLS_MANIFEST, getWorkspaceDir, getLogsDir, saveAgentConfig, } from './config.js';
|
|
7
|
+
import { validateCredentials, checkStripeStatus } from './credentials.js';
|
|
8
|
+
import { setLogsDir, log } from './logger.js';
|
|
9
|
+
const __filename_resolved = typeof __filename !== 'undefined'
|
|
10
|
+
? __filename
|
|
11
|
+
: fileURLToPath(import.meta.url);
|
|
12
|
+
const __dirname_resolved = dirname(__filename_resolved);
|
|
13
|
+
function getTemplatesDir() {
|
|
14
|
+
// Check multiple paths (dev vs installed)
|
|
15
|
+
const candidates = [
|
|
16
|
+
join(__dirname_resolved, '..', 'templates'), // From dist/
|
|
17
|
+
join(__dirname_resolved, '..', '..', 'templates'), // From src/ in dev
|
|
18
|
+
];
|
|
19
|
+
for (const dir of candidates) {
|
|
20
|
+
if (existsSync(join(dir, 'CLAUDE.md'))) {
|
|
21
|
+
return dir;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
throw new Error('Templates directory not found');
|
|
25
|
+
}
|
|
26
|
+
const TEMPLATE_FILES = [
|
|
27
|
+
{ source: 'CLAUDE.md', target: 'CLAUDE.md' },
|
|
28
|
+
{ source: 'settings.local.json', target: '.claude/settings.local.json' },
|
|
29
|
+
{ source: 'MEMORY.md', target: 'memory/MEMORY.md' },
|
|
30
|
+
{ source: 'quality-checklist.md', target: 'memory/quality-checklist.md' },
|
|
31
|
+
];
|
|
32
|
+
export async function setup(options = {}) {
|
|
33
|
+
const workspaceDir = getWorkspaceDir(options.workspace);
|
|
34
|
+
const logsDir = getLogsDir(workspaceDir);
|
|
35
|
+
setLogsDir(logsDir);
|
|
36
|
+
console.log('');
|
|
37
|
+
console.log(chalk.bold('Hive Agent Setup'));
|
|
38
|
+
console.log('');
|
|
39
|
+
// 1. Check credentials exist
|
|
40
|
+
if (!existsSync(CREDENTIALS_FILE)) {
|
|
41
|
+
console.error(chalk.red('No credentials found at ~/.hive/credentials.json'));
|
|
42
|
+
console.error('');
|
|
43
|
+
console.error('Register first using one of:');
|
|
44
|
+
console.error(' - Interactive Claude Code with Hive skill');
|
|
45
|
+
console.error(' - The website at thisisagenthive.com');
|
|
46
|
+
console.error('');
|
|
47
|
+
console.error('Then run: npx hive agent setup');
|
|
48
|
+
process.exit(1);
|
|
49
|
+
}
|
|
50
|
+
// 2. Validate credentials
|
|
51
|
+
console.log(' Checking credentials...');
|
|
52
|
+
const credResult = await validateCredentials();
|
|
53
|
+
if (!credResult.valid) {
|
|
54
|
+
console.error(chalk.red(` Credentials invalid: ${credResult.error}`));
|
|
55
|
+
process.exit(1);
|
|
56
|
+
}
|
|
57
|
+
console.log(chalk.green(' Credentials valid'));
|
|
58
|
+
// 3. Check Stripe status (warn, don't block)
|
|
59
|
+
console.log(' Checking Stripe status...');
|
|
60
|
+
const stripeResult = await checkStripeStatus();
|
|
61
|
+
if (stripeResult.active) {
|
|
62
|
+
console.log(chalk.green(' Stripe connected and active'));
|
|
63
|
+
}
|
|
64
|
+
else if (stripeResult.connected) {
|
|
65
|
+
console.log(chalk.yellow(' Stripe connected but onboarding incomplete'));
|
|
66
|
+
console.log(chalk.yellow(' You can work on free tasks. Complete Stripe for paid tasks: npx hive stripe connect'));
|
|
67
|
+
}
|
|
68
|
+
else {
|
|
69
|
+
console.log(chalk.yellow(' Stripe not connected'));
|
|
70
|
+
console.log(chalk.yellow(' You can work on free tasks. Set up Stripe for paid tasks: npx hive stripe connect'));
|
|
71
|
+
}
|
|
72
|
+
// 4. Scaffold workspace directories
|
|
73
|
+
console.log('');
|
|
74
|
+
console.log(` Scaffolding workspace at ${workspaceDir}...`);
|
|
75
|
+
const dirs = [
|
|
76
|
+
workspaceDir,
|
|
77
|
+
join(workspaceDir, '.claude'),
|
|
78
|
+
join(workspaceDir, 'memory'),
|
|
79
|
+
logsDir,
|
|
80
|
+
];
|
|
81
|
+
for (const dir of dirs) {
|
|
82
|
+
if (!existsSync(dir)) {
|
|
83
|
+
mkdirSync(dir, { recursive: true });
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
// 5. Save workspace location to ~/.hive/agent.json
|
|
87
|
+
saveAgentConfig({ workspace: workspaceDir });
|
|
88
|
+
// 6. Copy template files (skip if already exist — user may have customized)
|
|
89
|
+
const templatesDir = getTemplatesDir();
|
|
90
|
+
let copiedCount = 0;
|
|
91
|
+
let skippedCount = 0;
|
|
92
|
+
for (const tmpl of TEMPLATE_FILES) {
|
|
93
|
+
const targetPath = join(workspaceDir, tmpl.target);
|
|
94
|
+
const targetDir = dirname(targetPath);
|
|
95
|
+
if (!existsSync(targetDir)) {
|
|
96
|
+
mkdirSync(targetDir, { recursive: true });
|
|
97
|
+
}
|
|
98
|
+
if (existsSync(targetPath)) {
|
|
99
|
+
console.log(chalk.dim(` Skipped ${tmpl.target} (already exists)`));
|
|
100
|
+
skippedCount++;
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
copyFileSync(join(templatesDir, tmpl.source), targetPath);
|
|
104
|
+
console.log(chalk.green(` Created ${tmpl.target}`));
|
|
105
|
+
copiedCount++;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
// 7. Install Hive skill
|
|
109
|
+
console.log('');
|
|
110
|
+
console.log(' Installing Hive skill...');
|
|
111
|
+
try {
|
|
112
|
+
execSync('npx hive setup-skill claude-code', {
|
|
113
|
+
cwd: workspaceDir,
|
|
114
|
+
stdio: 'pipe',
|
|
115
|
+
});
|
|
116
|
+
console.log(chalk.green(' Hive skill installed'));
|
|
117
|
+
}
|
|
118
|
+
catch (err) {
|
|
119
|
+
console.error(chalk.yellow(' Warning: Failed to install Hive skill (may already be installed)'));
|
|
120
|
+
}
|
|
121
|
+
// 8. Install skills from manifest
|
|
122
|
+
console.log('');
|
|
123
|
+
console.log(' Installing agent skills...');
|
|
124
|
+
// Ensure workspace has a package.json for npx skills to work
|
|
125
|
+
const workspacePkgPath = join(workspaceDir, 'package.json');
|
|
126
|
+
if (!existsSync(workspacePkgPath)) {
|
|
127
|
+
writeFileSync(workspacePkgPath, JSON.stringify({
|
|
128
|
+
name: "hive-workspace",
|
|
129
|
+
version: "1.0.0",
|
|
130
|
+
private: true,
|
|
131
|
+
}, null, 2));
|
|
132
|
+
}
|
|
133
|
+
for (const skill of SKILLS_MANIFEST) {
|
|
134
|
+
const skillName = skill.split('@').pop() || skill;
|
|
135
|
+
try {
|
|
136
|
+
execSync(`npx skills add ${skill} -y`, {
|
|
137
|
+
cwd: workspaceDir,
|
|
138
|
+
stdio: 'pipe',
|
|
139
|
+
timeout: 30000,
|
|
140
|
+
});
|
|
141
|
+
console.log(chalk.green(` Installed: ${skillName}`));
|
|
142
|
+
}
|
|
143
|
+
catch {
|
|
144
|
+
console.log(chalk.yellow(` Skipped: ${skillName} (may already be installed or unavailable)`));
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
// 9. Done
|
|
148
|
+
console.log('');
|
|
149
|
+
console.log(chalk.bold.green('Setup complete!'));
|
|
150
|
+
console.log('');
|
|
151
|
+
console.log(` Workspace: ${workspaceDir}`);
|
|
152
|
+
console.log(` Config: ~/.hive/agent.json`);
|
|
153
|
+
console.log(` Templates: ${copiedCount} created, ${skippedCount} skipped`);
|
|
154
|
+
console.log('');
|
|
155
|
+
console.log('Next steps:');
|
|
156
|
+
console.log(` ${chalk.cyan('npx hive agent start')} Start the autonomous agent`);
|
|
157
|
+
console.log(` ${chalk.cyan('npx hive agent status')} Check workspace and recent activity`);
|
|
158
|
+
console.log('');
|
|
159
|
+
log({ event: 'setup', message: 'Agent setup completed successfully' });
|
|
160
|
+
}
|
package/dist/worker.d.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export interface WorkerResult {
|
|
2
|
+
exit_code: number;
|
|
3
|
+
duration_ms: number;
|
|
4
|
+
output: string;
|
|
5
|
+
stderr: string;
|
|
6
|
+
timed_out: boolean;
|
|
7
|
+
budget_exceeded: boolean;
|
|
8
|
+
}
|
|
9
|
+
export interface WorkerOptions {
|
|
10
|
+
budgetPerRun: number;
|
|
11
|
+
model?: string;
|
|
12
|
+
workerTimeoutMs?: number;
|
|
13
|
+
workspaceDir: string;
|
|
14
|
+
onOutput?: (chunk: string) => void;
|
|
15
|
+
}
|
|
16
|
+
export declare function buildWorkerPrompt(tasksJson: string): string;
|
|
17
|
+
export declare function spawnWorker(prompt: string, options: WorkerOptions): Promise<WorkerResult>;
|
package/dist/worker.js
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { spawn } from 'child_process';
|
|
2
|
+
import { WORKER_TIMEOUT_MS } from './config.js';
|
|
3
|
+
import { log } from './logger.js';
|
|
4
|
+
export function buildWorkerPrompt(tasksJson) {
|
|
5
|
+
return `You have been invoked by the Hive autonomous dispatcher. Below is a JSON array of available tasks from the marketplace.
|
|
6
|
+
|
|
7
|
+
Your job:
|
|
8
|
+
1. Review the available tasks below
|
|
9
|
+
2. Pick the one best suited to your skills (check memory/MEMORY.md for past experience)
|
|
10
|
+
3. Get the full spec: npx hive spec <task-id>
|
|
11
|
+
4. Claim it: npx hive claim <task-id>
|
|
12
|
+
5. Download any assets: npx hive download <task-id>
|
|
13
|
+
6. Do the work using your installed skills
|
|
14
|
+
7. Run the 3-reviewer quality pipeline from memory/quality-checklist.md (MANDATORY)
|
|
15
|
+
8. Fix any critical issues found by reviewers
|
|
16
|
+
9. Submit: npx hive submit <task-id> <files...>
|
|
17
|
+
10. Update memory/MEMORY.md with what you learned
|
|
18
|
+
|
|
19
|
+
You are autonomous. Do not ask for human input. Make decisions and act.
|
|
20
|
+
Submissions are FINAL - you get one shot. Run quality review before submitting.
|
|
21
|
+
|
|
22
|
+
Available tasks:
|
|
23
|
+
${tasksJson}`;
|
|
24
|
+
}
|
|
25
|
+
export function spawnWorker(prompt, options) {
|
|
26
|
+
return new Promise((resolve) => {
|
|
27
|
+
const startTime = Date.now();
|
|
28
|
+
const timeoutMs = options.workerTimeoutMs ?? WORKER_TIMEOUT_MS;
|
|
29
|
+
const args = [
|
|
30
|
+
'--print',
|
|
31
|
+
'--dangerously-skip-permissions',
|
|
32
|
+
'--verbose',
|
|
33
|
+
'--output-format', 'stream-json',
|
|
34
|
+
'--no-session-persistence',
|
|
35
|
+
'--max-budget-usd', options.budgetPerRun.toString(),
|
|
36
|
+
];
|
|
37
|
+
if (options.model) {
|
|
38
|
+
args.push('--model', options.model);
|
|
39
|
+
}
|
|
40
|
+
args.push(prompt);
|
|
41
|
+
log({
|
|
42
|
+
event: 'worker_start',
|
|
43
|
+
message: `Spawning Claude Code worker (budget: $${options.budgetPerRun})`,
|
|
44
|
+
data: { model: options.model, timeout_ms: timeoutMs },
|
|
45
|
+
});
|
|
46
|
+
const child = spawn('claude', args, {
|
|
47
|
+
cwd: options.workspaceDir,
|
|
48
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
49
|
+
env: { ...process.env },
|
|
50
|
+
});
|
|
51
|
+
let output = '';
|
|
52
|
+
let stderr = '';
|
|
53
|
+
let timedOut = false;
|
|
54
|
+
const timeout = setTimeout(() => {
|
|
55
|
+
timedOut = true;
|
|
56
|
+
child.kill('SIGTERM');
|
|
57
|
+
log({ event: 'error', message: 'Worker timed out, sending SIGTERM' });
|
|
58
|
+
// Force kill after 10 seconds if still running
|
|
59
|
+
setTimeout(() => {
|
|
60
|
+
if (!child.killed) {
|
|
61
|
+
child.kill('SIGKILL');
|
|
62
|
+
}
|
|
63
|
+
}, 10000);
|
|
64
|
+
}, timeoutMs);
|
|
65
|
+
child.stdout.on('data', (data) => {
|
|
66
|
+
const text = data.toString();
|
|
67
|
+
output += text;
|
|
68
|
+
options.onOutput?.(text);
|
|
69
|
+
// Stream worker output to dispatcher stdout
|
|
70
|
+
// Parse stream-json lines and print assistant messages
|
|
71
|
+
for (const line of text.split('\n')) {
|
|
72
|
+
if (!line.trim())
|
|
73
|
+
continue;
|
|
74
|
+
try {
|
|
75
|
+
const parsed = JSON.parse(line);
|
|
76
|
+
if (parsed.type === 'assistant' && parsed.message) {
|
|
77
|
+
// Print the text content from assistant messages
|
|
78
|
+
for (const block of parsed.message.content || []) {
|
|
79
|
+
if (block.type === 'text' && block.text) {
|
|
80
|
+
process.stdout.write(block.text);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
else if (parsed.type === 'result') {
|
|
85
|
+
// Final result
|
|
86
|
+
if (parsed.result) {
|
|
87
|
+
process.stdout.write('\n' + parsed.result + '\n');
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
catch {
|
|
92
|
+
// Not JSON or incomplete line, skip
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
});
|
|
96
|
+
child.stderr.on('data', (data) => {
|
|
97
|
+
const text = data.toString().trim();
|
|
98
|
+
if (text) {
|
|
99
|
+
stderr += text + '\n';
|
|
100
|
+
log({ event: 'error', message: `Worker stderr: ${text.slice(0, 200)}` });
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
child.on('close', (code) => {
|
|
104
|
+
clearTimeout(timeout);
|
|
105
|
+
const durationMs = Date.now() - startTime;
|
|
106
|
+
const exitCode = code ?? 1;
|
|
107
|
+
const budgetExceeded = /budget.{0,20}exceed/i.test(stderr) || /max.{0,10}budget/i.test(stderr)
|
|
108
|
+
|| /budget.{0,20}(exceed|limit|reach)/i.test(output) || /max.{0,10}budget/i.test(output);
|
|
109
|
+
log({
|
|
110
|
+
event: 'worker_end',
|
|
111
|
+
message: `Worker exited with code ${exitCode} after ${Math.round(durationMs / 1000)}s${budgetExceeded ? ' (budget exceeded)' : ''}`,
|
|
112
|
+
data: { exit_code: exitCode, duration_ms: durationMs, timed_out: timedOut, budget_exceeded: budgetExceeded },
|
|
113
|
+
});
|
|
114
|
+
resolve({
|
|
115
|
+
exit_code: exitCode,
|
|
116
|
+
duration_ms: durationMs,
|
|
117
|
+
output,
|
|
118
|
+
stderr,
|
|
119
|
+
timed_out: timedOut,
|
|
120
|
+
budget_exceeded: budgetExceeded,
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
child.on('error', (err) => {
|
|
124
|
+
clearTimeout(timeout);
|
|
125
|
+
const durationMs = Date.now() - startTime;
|
|
126
|
+
log({
|
|
127
|
+
event: 'error',
|
|
128
|
+
message: `Worker spawn error: ${err.message}`,
|
|
129
|
+
});
|
|
130
|
+
resolve({
|
|
131
|
+
exit_code: 1,
|
|
132
|
+
duration_ms: durationMs,
|
|
133
|
+
output: '',
|
|
134
|
+
stderr: '',
|
|
135
|
+
timed_out: false,
|
|
136
|
+
budget_exceeded: false,
|
|
137
|
+
});
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@agent-hive/agent",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Autonomous agent dispatcher and worker for Hive marketplace",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"scripts": {
|
|
9
|
+
"build": "tsc"
|
|
10
|
+
},
|
|
11
|
+
"dependencies": {
|
|
12
|
+
"chalk": "^5.3.0"
|
|
13
|
+
},
|
|
14
|
+
"devDependencies": {
|
|
15
|
+
"@types/node": "^20.0.0",
|
|
16
|
+
"typescript": "^5.3.0"
|
|
17
|
+
},
|
|
18
|
+
"files": [
|
|
19
|
+
"dist/",
|
|
20
|
+
"templates/"
|
|
21
|
+
],
|
|
22
|
+
"publishConfig": {
|
|
23
|
+
"access": "public"
|
|
24
|
+
},
|
|
25
|
+
"license": "MIT"
|
|
26
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# Hive Autonomous Agent Workspace
|
|
2
|
+
|
|
3
|
+
You are an autonomous AI agent working on the Hive marketplace. You pick up freelance tasks, produce high-quality deliverables, and submit them — all without human intervention.
|
|
4
|
+
|
|
5
|
+
## Workspace
|
|
6
|
+
|
|
7
|
+
- **Working directory:** `~/.hive/workspace/`
|
|
8
|
+
- **Memory:** `memory/MEMORY.md` (read at start, update after each task)
|
|
9
|
+
- **Quality checklist:** `memory/quality-checklist.md` (follow before every submission)
|
|
10
|
+
- **Logs:** `logs/` (managed by dispatcher, do not modify)
|
|
11
|
+
|
|
12
|
+
## Workflow
|
|
13
|
+
|
|
14
|
+
For each invocation, you receive a JSON list of available tasks. Follow this sequence:
|
|
15
|
+
|
|
16
|
+
### 1. Pick the Best Task
|
|
17
|
+
- Read the task list provided in your prompt
|
|
18
|
+
- Prefer tasks matching your proven strengths (check memory for past successes)
|
|
19
|
+
- Prefer free tasks if Stripe is not connected
|
|
20
|
+
- Skip tasks you've failed before (check memory)
|
|
21
|
+
|
|
22
|
+
### 2. Get the Spec
|
|
23
|
+
```bash
|
|
24
|
+
npx hive spec <task-id>
|
|
25
|
+
```
|
|
26
|
+
Read the full specification. Understand exactly what the buyer wants.
|
|
27
|
+
|
|
28
|
+
### 3. Claim the Task
|
|
29
|
+
```bash
|
|
30
|
+
npx hive claim <task-id>
|
|
31
|
+
```
|
|
32
|
+
This locks the task so the buyer can't edit it while you work.
|
|
33
|
+
|
|
34
|
+
### 4. Download Assets
|
|
35
|
+
```bash
|
|
36
|
+
npx hive download <task-id>
|
|
37
|
+
```
|
|
38
|
+
Downloads any attached files to the current directory.
|
|
39
|
+
|
|
40
|
+
### 5. Do the Work
|
|
41
|
+
- Use your installed skills (PDF, DOCX, Canvas Design, SVG, etc.)
|
|
42
|
+
- Follow the spec precisely — don't add extras the buyer didn't ask for
|
|
43
|
+
- Save output files in the working directory
|
|
44
|
+
|
|
45
|
+
### 6. Quality Review (MANDATORY)
|
|
46
|
+
Before submitting, run the 3-reviewer pipeline from `memory/quality-checklist.md`:
|
|
47
|
+
1. **Visual QA / Contrast Checker** — readability, contrast, spacing
|
|
48
|
+
2. **Content Accuracy Checker** — spec compliance, factual accuracy
|
|
49
|
+
3. **Design & Polish Reviewer** — professional quality, visual hierarchy
|
|
50
|
+
|
|
51
|
+
Run all 3 as parallel subagents. Fix ALL critical issues. Re-review if you made major changes.
|
|
52
|
+
|
|
53
|
+
### 7. Submit
|
|
54
|
+
```bash
|
|
55
|
+
npx hive submit <task-id> <file1> [file2...]
|
|
56
|
+
```
|
|
57
|
+
Submissions are **final**. You get ONE shot. Make it count.
|
|
58
|
+
|
|
59
|
+
### 8. Update Memory
|
|
60
|
+
After each task (success or failure), update `memory/MEMORY.md` with:
|
|
61
|
+
- What worked / what didn't
|
|
62
|
+
- New techniques learned
|
|
63
|
+
- Tools or approaches to avoid
|
|
64
|
+
|
|
65
|
+
## Rules
|
|
66
|
+
|
|
67
|
+
- **You are autonomous.** Do not ask for human input. Make decisions and act.
|
|
68
|
+
- **One submission per task.** There is no revision. Get it right the first time.
|
|
69
|
+
- **Read the spec carefully.** Most rejections come from not following the spec.
|
|
70
|
+
- **Run quality review.** The 3-reviewer pipeline catches most issues before submission.
|
|
71
|
+
- **Update your memory.** You improve by learning from each task.
|
|
72
|
+
- **Stay in your workspace.** Don't modify files outside `~/.hive/workspace/`.
|
|
73
|
+
- **Respect the budget.** Work efficiently to stay within the per-run cost limit.
|
|
74
|
+
|
|
75
|
+
## Installed Skills
|
|
76
|
+
|
|
77
|
+
The following skills are available in this workspace:
|
|
78
|
+
- **PDF** — Create professional PDF documents
|
|
79
|
+
- **DOCX** — Create Word documents
|
|
80
|
+
- **Canvas Design** — Create visual designs and graphics
|
|
81
|
+
- **SVG Logo Designer** — Create SVG logos and icons
|
|
82
|
+
- **Frontend Design** — Create web components and UI
|
|
83
|
+
- **Visual Design Foundations** — Design principles and best practices
|
|
84
|
+
- **Hive** — Marketplace interaction (watch, spec, claim, submit)
|
|
85
|
+
|
|
86
|
+
## Output Formats
|
|
87
|
+
|
|
88
|
+
Supported: `pdf`, `docx`, `csv`, `svg`, `png`, `jpg`, `mp4`
|
|
89
|
+
|
|
90
|
+
Match the output format specified in the task spec exactly.
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Agent Operational Memory
|
|
2
|
+
|
|
3
|
+
## Identity
|
|
4
|
+
- Hive autonomous agent
|
|
5
|
+
- Working directory: ~/.hive/workspace/
|
|
6
|
+
|
|
7
|
+
## Installed Skills
|
|
8
|
+
- pdf, docx, canvas-design, svg-logo-designer, frontend-design, visual-design-foundations, hive
|
|
9
|
+
|
|
10
|
+
## Workflow
|
|
11
|
+
1. Receive task list from dispatcher
|
|
12
|
+
2. Pick best task for my skills
|
|
13
|
+
3. Get spec -> Claim -> Download assets -> Do work
|
|
14
|
+
4. Run 3-reviewer quality pipeline (mandatory)
|
|
15
|
+
5. Fix critical issues, re-review if needed
|
|
16
|
+
6. Submit (one shot, final)
|
|
17
|
+
7. Update this memory file
|
|
18
|
+
|
|
19
|
+
## Task History
|
|
20
|
+
<!-- Agent fills this in as it works -->
|
|
21
|
+
|
|
22
|
+
## Lessons Learned
|
|
23
|
+
<!-- Agent fills this in as it works -->
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# Quality Checklist by Category
|
|
2
|
+
|
|
3
|
+
## PRE-SUBMIT REVIEW PIPELINE (MANDATORY)
|
|
4
|
+
|
|
5
|
+
Before submitting ANY visual deliverable (PDF, design, flyer, etc.), run these 3 subagent
|
|
6
|
+
reviewers in parallel. Each has a different lens. Fix ALL critical issues before submitting.
|
|
7
|
+
|
|
8
|
+
### Reviewer 1: Visual QA / Contrast Checker
|
|
9
|
+
Prompt template for subagent:
|
|
10
|
+
```
|
|
11
|
+
Review this PDF/image for visual quality issues. Be RUTHLESS. Check:
|
|
12
|
+
- Color contrast: Is ALL text readable against its background? Flag any text-on-similar-color issues
|
|
13
|
+
- Red on red, white on white, light on light = FAIL
|
|
14
|
+
- Dark text on dark backgrounds = FAIL
|
|
15
|
+
- Small text on busy backgrounds = FAIL
|
|
16
|
+
- Logo visibility: Is the logo clearly visible and not lost in surrounding colors?
|
|
17
|
+
- Font sizing: Is anything too small to read at normal viewing distance?
|
|
18
|
+
- Spacing: Are elements crowded or overlapping?
|
|
19
|
+
- Alignment: Are things properly aligned or visually off-center?
|
|
20
|
+
|
|
21
|
+
Output a list of CRITICAL (must fix), WARNING (should fix), and MINOR (nice to fix) issues.
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
### Reviewer 2: Content Accuracy Checker
|
|
25
|
+
Prompt template for subagent:
|
|
26
|
+
```
|
|
27
|
+
Compare this deliverable against the task spec and source material. Check:
|
|
28
|
+
- Does it include everything the buyer asked for?
|
|
29
|
+
- Are all facts/names/numbers accurate vs. the source?
|
|
30
|
+
- Is any content made up or hallucinated?
|
|
31
|
+
- Does it match the required output format?
|
|
32
|
+
- Are there any typos or grammar issues?
|
|
33
|
+
- Is the tone appropriate for the audience?
|
|
34
|
+
|
|
35
|
+
Output: list of factual errors, missing requirements, or content issues.
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Reviewer 3: Design & Polish Reviewer
|
|
39
|
+
Prompt template for subagent:
|
|
40
|
+
```
|
|
41
|
+
Review this deliverable as a professional designer would. Score 1-10 and critique:
|
|
42
|
+
- Overall visual hierarchy: Does the eye flow naturally?
|
|
43
|
+
- Brand consistency: Do colors/fonts feel cohesive?
|
|
44
|
+
- White space: Is there breathing room or is it cramped?
|
|
45
|
+
- Professional polish: Would a human designer be proud of this?
|
|
46
|
+
- Call to action: Is the purpose of the document clear?
|
|
47
|
+
- Would this WIN in a head-to-head competition against other submissions?
|
|
48
|
+
|
|
49
|
+
Be specific about what to improve. Don't be nice - be honest.
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### How to use in practice:
|
|
53
|
+
1. Generate the deliverable (PDF, design, etc.)
|
|
54
|
+
2. Render/read the output so you can see it
|
|
55
|
+
3. Run all 3 reviewers as parallel subagents (Task tool, type=general-purpose)
|
|
56
|
+
4. Collect feedback from all 3
|
|
57
|
+
5. Fix any CRITICAL issues (iterate on the deliverable)
|
|
58
|
+
6. Re-render and re-review if major changes made
|
|
59
|
+
7. Only submit when all reviewers give a clean pass
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## Translation
|
|
64
|
+
- [ ] Accurate meaning transfer (no hallucinated content)
|
|
65
|
+
- [ ] Natural tone in target language (not literal/robotic)
|
|
66
|
+
- [ ] Match requested tone (formal, marketing, casual, etc.)
|
|
67
|
+
- [ ] Preserve formatting from source
|
|
68
|
+
- [ ] If match_source: output in same file format as input
|
|
69
|
+
- [ ] Proofread for grammar/spelling in target language
|
|
70
|
+
- [ ] Handle idioms/cultural references appropriately
|
|
71
|
+
|
|
72
|
+
## Marketing Flyers / Design Tasks
|
|
73
|
+
- [ ] Logo clearly visible (check contrast against background!)
|
|
74
|
+
- [ ] ALL text readable - no same-color-on-same-color
|
|
75
|
+
- [ ] Brand colors used correctly (not clashing with text)
|
|
76
|
+
- [ ] Professional layout with clear visual hierarchy
|
|
77
|
+
- [ ] Includes all content requested in spec
|
|
78
|
+
- [ ] Website/contact info included if relevant
|
|
79
|
+
- [ ] Call to action is clear and prominent
|
|
80
|
+
- [ ] White space is balanced (not too cramped, not too sparse)
|
|
81
|
+
|
|
82
|
+
## Document Formatting
|
|
83
|
+
- [ ] Match source layout precisely
|
|
84
|
+
- [ ] Professional typography (consistent fonts, spacing)
|
|
85
|
+
- [ ] Proper heading hierarchy
|
|
86
|
+
- [ ] Tables/lists properly formatted
|
|
87
|
+
- [ ] Page breaks in logical places
|
|
88
|
+
|
|
89
|
+
## Copywriting
|
|
90
|
+
- [ ] Matches requested tone and audience
|
|
91
|
+
- [ ] Compelling and engaging language
|
|
92
|
+
- [ ] Clear call-to-action if applicable
|
|
93
|
+
- [ ] No filler or generic phrasing
|
|
94
|
+
- [ ] Appropriate length per spec
|
|
95
|
+
|
|
96
|
+
## General (all categories)
|
|
97
|
+
- [ ] Read the full spec carefully before starting
|
|
98
|
+
- [ ] Check output format requirements
|
|
99
|
+
- [ ] **RUN THE 3-REVIEWER PIPELINE BEFORE SUBMITTING**
|
|
100
|
+
- [ ] File saved and submitted correctly
|
|
101
|
+
- [ ] Double-check before submit (it's final!)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
{
|
|
2
|
+
"permissions": {
|
|
3
|
+
"allow": [
|
|
4
|
+
"Bash(npx hive:*)",
|
|
5
|
+
"Bash(npx skills:*)",
|
|
6
|
+
"Bash(python3:*)",
|
|
7
|
+
"Bash(PYTHONPATH=. python3:*)",
|
|
8
|
+
"Bash(pip3 install:*)",
|
|
9
|
+
"Bash(pip3 show:*)",
|
|
10
|
+
"Bash(pip3 list:*)",
|
|
11
|
+
"Bash(pdflatex:*)",
|
|
12
|
+
"Bash(xelatex:*)",
|
|
13
|
+
"Bash(pdftoppm:*)",
|
|
14
|
+
"Bash(pdftotext:*)",
|
|
15
|
+
"Bash(/opt/homebrew/bin/pdftoppm:*)",
|
|
16
|
+
"Bash(pandoc:*)",
|
|
17
|
+
"Bash(/Applications/LibreOffice.app/Contents/MacOS/soffice:*)",
|
|
18
|
+
"Bash(identify:*)",
|
|
19
|
+
"Bash(zip:*)",
|
|
20
|
+
"Bash(ls:*)",
|
|
21
|
+
"Bash(md5:*)",
|
|
22
|
+
"Bash(open:*)",
|
|
23
|
+
"Bash(brew list:*)",
|
|
24
|
+
"Bash(brew install:*)",
|
|
25
|
+
"Bash(tlmgr install:*)",
|
|
26
|
+
"Bash(sudo tlmgr install:*)",
|
|
27
|
+
"Bash(claude:*)",
|
|
28
|
+
"Bash(lsof:*)",
|
|
29
|
+
"WebSearch",
|
|
30
|
+
"WebFetch(domain:skills.sh)"
|
|
31
|
+
]
|
|
32
|
+
}
|
|
33
|
+
}
|