@hive-org/cli 0.0.6 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/analysis.js +78 -0
- package/dist/agent/app.js +32 -0
- package/dist/agent/chat-prompt.js +63 -0
- package/dist/agent/components/AsciiTicker.js +81 -0
- package/dist/agent/components/HoneycombBoot.js +270 -0
- package/dist/agent/components/Spinner.js +37 -0
- package/dist/agent/config.js +52 -0
- package/dist/agent/edit-section.js +59 -0
- package/dist/agent/fetch-rules.js +21 -0
- package/dist/agent/helpers.js +22 -0
- package/dist/agent/hooks/useAgent.js +269 -0
- package/{templates/memory-prompt.ts → dist/agent/memory-prompt.js} +17 -32
- package/dist/agent/model.js +63 -0
- package/dist/agent/objects.js +1 -0
- package/dist/agent/process-lifecycle.js +56 -0
- package/{templates/prompt.ts → dist/agent/prompt.js} +18 -47
- package/dist/agent/theme.js +37 -0
- package/dist/agent/types.js +1 -0
- package/dist/agents.js +30 -21
- package/dist/ai-providers.js +0 -13
- package/dist/create/generate.js +10 -120
- package/dist/index.js +27 -4
- package/dist/migrate-templates/MigrateApp.js +131 -0
- package/dist/migrate-templates/migrate.js +86 -0
- package/dist/start/AgentProcessManager.js +131 -0
- package/dist/start/Dashboard.js +88 -0
- package/dist/start/patch-headless.js +101 -0
- package/dist/start/patch-managed-mode.js +142 -0
- package/dist/start/start-command.js +22 -0
- package/package.json +6 -5
- package/templates/analysis.ts +0 -103
- package/templates/chat-prompt.ts +0 -94
- package/templates/components/AsciiTicker.tsx +0 -113
- package/templates/components/HoneycombBoot.tsx +0 -348
- package/templates/components/Spinner.tsx +0 -64
- package/templates/edit-section.ts +0 -64
- package/templates/fetch-rules.ts +0 -23
- package/templates/helpers.ts +0 -22
- package/templates/hive/agent.ts +0 -2
- package/templates/hive/config.ts +0 -96
- package/templates/hive/memory.ts +0 -1
- package/templates/hive/objects.ts +0 -26
- package/templates/hooks/useAgent.ts +0 -337
- package/templates/index.tsx +0 -257
- package/templates/process-lifecycle.ts +0 -66
- package/templates/theme.ts +0 -40
- package/templates/types.ts +0 -23
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { tool } from 'ai';
|
|
2
|
+
import { z } from 'zod';
|
|
3
|
+
import * as fs from 'fs/promises';
|
|
4
|
+
import * as path from 'path';
|
|
5
|
+
export function replaceSection(fileContent, heading, newContent) {
|
|
6
|
+
const lines = fileContent.split('\n');
|
|
7
|
+
const headingLine = `## ${heading}`;
|
|
8
|
+
let startIdx = -1;
|
|
9
|
+
for (let i = 0; i < lines.length; i++) {
|
|
10
|
+
if (lines[i].trim() === headingLine) {
|
|
11
|
+
startIdx = i;
|
|
12
|
+
break;
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
if (startIdx === -1) {
|
|
16
|
+
throw new Error(`Section "## ${heading}" not found in file.`);
|
|
17
|
+
}
|
|
18
|
+
let endIdx = lines.length;
|
|
19
|
+
for (let i = startIdx + 1; i < lines.length; i++) {
|
|
20
|
+
const trimmed = lines[i].trim();
|
|
21
|
+
if (trimmed.startsWith('## ') || trimmed.startsWith('# ')) {
|
|
22
|
+
endIdx = i;
|
|
23
|
+
break;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
const before = lines.slice(0, startIdx + 1);
|
|
27
|
+
const after = lines.slice(endIdx);
|
|
28
|
+
const trimmedContent = newContent.trim();
|
|
29
|
+
const newSection = ['', ...trimmedContent.split('\n'), ''];
|
|
30
|
+
const result = [...before, ...newSection, ...after].join('\n');
|
|
31
|
+
return result;
|
|
32
|
+
}
|
|
33
|
+
export const editSectionTool = tool({
|
|
34
|
+
description: 'Edit a section of SOUL.md or STRATEGY.md. Only call AFTER user confirms.',
|
|
35
|
+
inputSchema: z.object({
|
|
36
|
+
file: z.enum(['SOUL.md', 'STRATEGY.md']),
|
|
37
|
+
section: z.string().describe('Exact ## heading name, e.g. "Personality", "Conviction Style"'),
|
|
38
|
+
content: z.string().describe('New content for the section (without the ## heading line)'),
|
|
39
|
+
}),
|
|
40
|
+
execute: async ({ file, section, content }) => {
|
|
41
|
+
const filePath = path.join(process.cwd(), file);
|
|
42
|
+
let fileContent;
|
|
43
|
+
try {
|
|
44
|
+
fileContent = await fs.readFile(filePath, 'utf-8');
|
|
45
|
+
}
|
|
46
|
+
catch {
|
|
47
|
+
return `Error: ${file} not found in current directory.`;
|
|
48
|
+
}
|
|
49
|
+
try {
|
|
50
|
+
const updated = replaceSection(fileContent, section, content);
|
|
51
|
+
await fs.writeFile(filePath, updated, 'utf-8');
|
|
52
|
+
return `Updated "${section}" section in ${file}.`;
|
|
53
|
+
}
|
|
54
|
+
catch (err) {
|
|
55
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
56
|
+
return `Error: ${message}`;
|
|
57
|
+
}
|
|
58
|
+
},
|
|
59
|
+
});
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { tool } from 'ai';
|
|
2
|
+
import { z } from 'zod';
|
|
3
|
+
const RULES_URL = 'https://hive.z3n.dev/RULES.md';
|
|
4
|
+
export const fetchRulesTool = tool({
|
|
5
|
+
description: 'Fetch the rules of the Hive game. Call when the user asks about rules, scoring, honey, wax, streaks, or how the platform works.',
|
|
6
|
+
inputSchema: z.object({}),
|
|
7
|
+
execute: async () => {
|
|
8
|
+
try {
|
|
9
|
+
const response = await fetch(RULES_URL);
|
|
10
|
+
if (!response.ok) {
|
|
11
|
+
return `Error: failed to fetch rules (HTTP ${response.status}).`;
|
|
12
|
+
}
|
|
13
|
+
const rules = await response.text();
|
|
14
|
+
return rules;
|
|
15
|
+
}
|
|
16
|
+
catch (err) {
|
|
17
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
18
|
+
return `Error: could not reach Hive to fetch rules. ${message}`;
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
});
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
export function formatTime(date) {
|
|
2
|
+
const hours = String(date.getHours()).padStart(2, '0');
|
|
3
|
+
const minutes = String(date.getMinutes()).padStart(2, '0');
|
|
4
|
+
const seconds = String(date.getSeconds()).padStart(2, '0');
|
|
5
|
+
return `${hours}:${minutes}:${seconds}`;
|
|
6
|
+
}
|
|
7
|
+
export function convictionColor(conviction) {
|
|
8
|
+
if (conviction > 0)
|
|
9
|
+
return 'green';
|
|
10
|
+
if (conviction < 0)
|
|
11
|
+
return 'red';
|
|
12
|
+
return 'gray';
|
|
13
|
+
}
|
|
14
|
+
export function stripCodeFences(text) {
|
|
15
|
+
const trimmed = text.trim();
|
|
16
|
+
const fencePattern = /^```(?:markdown|md)?\s*\n([\s\S]*?)\n```$/;
|
|
17
|
+
const match = trimmed.match(fencePattern);
|
|
18
|
+
if (match) {
|
|
19
|
+
return match[1].trim();
|
|
20
|
+
}
|
|
21
|
+
return trimmed;
|
|
22
|
+
}
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
import { streamText, stepCountIs } from 'ai';
|
|
2
|
+
import { useState, useEffect, useRef, useCallback } from 'react';
|
|
3
|
+
import { HiveAgent } from '@hive-org/sdk';
|
|
4
|
+
import { loadAgentConfig } from '../config.js';
|
|
5
|
+
import { buildChatPrompt } from '../chat-prompt.js';
|
|
6
|
+
import { editSectionTool } from '../edit-section.js';
|
|
7
|
+
import { fetchRulesTool } from '../fetch-rules.js';
|
|
8
|
+
import { loadMemory } from '@hive-org/sdk';
|
|
9
|
+
import { processSignalAndSummarize, extractAndSaveMemory } from '../analysis.js';
|
|
10
|
+
import { registerShutdownAgent } from '../process-lifecycle.js';
|
|
11
|
+
import { getModel } from '../model.js';
|
|
12
|
+
export function useAgent() {
|
|
13
|
+
const [phase, setPhase] = useState('booting');
|
|
14
|
+
const [connected, setConnected] = useState(false);
|
|
15
|
+
const [agentName, setAgentName] = useState('agent');
|
|
16
|
+
const [agentBio, setAgentBio] = useState('');
|
|
17
|
+
const [pollActivity, setPollActivity] = useState([]);
|
|
18
|
+
const [chatActivity, setChatActivity] = useState([]);
|
|
19
|
+
const [input, setInput] = useState('');
|
|
20
|
+
const [chatStreaming, setChatStreaming] = useState(false);
|
|
21
|
+
const [chatBuffer, setChatBuffer] = useState('');
|
|
22
|
+
const [predictionCount, setPredictionCount] = useState(0);
|
|
23
|
+
const [termWidth, setTermWidth] = useState(process.stdout.columns || 60);
|
|
24
|
+
const agentRef = useRef(null);
|
|
25
|
+
const sessionMessagesRef = useRef([]);
|
|
26
|
+
const memoryRef = useRef('');
|
|
27
|
+
const chatCountSinceExtractRef = useRef(0);
|
|
28
|
+
const extractingRef = useRef(false);
|
|
29
|
+
const recentThreadsRef = useRef([]);
|
|
30
|
+
const recentPredictionsRef = useRef([]);
|
|
31
|
+
const predictionCountRef = useRef(0);
|
|
32
|
+
const soulContentRef = useRef('');
|
|
33
|
+
const strategyContentRef = useRef('');
|
|
34
|
+
// ─── Terminal resize tracking ───────────────────────
|
|
35
|
+
useEffect(() => {
|
|
36
|
+
const onResize = () => {
|
|
37
|
+
setTermWidth(process.stdout.columns || 60);
|
|
38
|
+
};
|
|
39
|
+
process.stdout.on('resize', onResize);
|
|
40
|
+
return () => {
|
|
41
|
+
process.stdout.off('resize', onResize);
|
|
42
|
+
};
|
|
43
|
+
}, []);
|
|
44
|
+
// ─── Activity helpers ───────────────────────────────
|
|
45
|
+
const addPollActivity = useCallback((item) => {
|
|
46
|
+
setPollActivity((prev) => {
|
|
47
|
+
const updated = [...prev, { ...item, timestamp: new Date() }];
|
|
48
|
+
const maxItems = 50;
|
|
49
|
+
if (updated.length > maxItems) {
|
|
50
|
+
return updated.slice(updated.length - maxItems);
|
|
51
|
+
}
|
|
52
|
+
return updated;
|
|
53
|
+
});
|
|
54
|
+
}, []);
|
|
55
|
+
const addIdleActivity = useCallback(() => {
|
|
56
|
+
setPollActivity((prev) => {
|
|
57
|
+
const updated = [
|
|
58
|
+
...prev,
|
|
59
|
+
{ type: 'idle', text: 'Polled but no new threads', timestamp: new Date() },
|
|
60
|
+
];
|
|
61
|
+
const maxItems = 50;
|
|
62
|
+
if (updated.length > maxItems) {
|
|
63
|
+
return updated.slice(updated.length - maxItems);
|
|
64
|
+
}
|
|
65
|
+
return updated;
|
|
66
|
+
});
|
|
67
|
+
}, []);
|
|
68
|
+
const removeLastAnalyzing = useCallback(() => {
|
|
69
|
+
setPollActivity((prev) => {
|
|
70
|
+
let lastIdx = -1;
|
|
71
|
+
for (let i = prev.length - 1; i >= 0; i--) {
|
|
72
|
+
if (prev[i].type === 'analyzing') {
|
|
73
|
+
lastIdx = i;
|
|
74
|
+
break;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
if (lastIdx === -1) {
|
|
78
|
+
return prev;
|
|
79
|
+
}
|
|
80
|
+
return [...prev.slice(0, lastIdx), ...prev.slice(lastIdx + 1)];
|
|
81
|
+
});
|
|
82
|
+
}, []);
|
|
83
|
+
const addChatActivity = useCallback((item) => {
|
|
84
|
+
setChatActivity((prev) => {
|
|
85
|
+
const updated = [...prev, { ...item, timestamp: new Date() }];
|
|
86
|
+
const maxItems = 50;
|
|
87
|
+
if (updated.length > maxItems) {
|
|
88
|
+
return updated.slice(updated.length - maxItems);
|
|
89
|
+
}
|
|
90
|
+
return updated;
|
|
91
|
+
});
|
|
92
|
+
}, []);
|
|
93
|
+
// ─── Agent lifecycle ────────────────────────────────
|
|
94
|
+
useEffect(() => {
|
|
95
|
+
const start = async () => {
|
|
96
|
+
const baseUrl = process.env.HIVE_API_URL ?? 'http://localhost:6969';
|
|
97
|
+
const config = await loadAgentConfig();
|
|
98
|
+
setAgentName(config.name);
|
|
99
|
+
setAgentBio(config.bio ?? '');
|
|
100
|
+
soulContentRef.current = config.soulContent;
|
|
101
|
+
strategyContentRef.current = config.strategyContent;
|
|
102
|
+
const initialMemory = await loadMemory();
|
|
103
|
+
memoryRef.current = initialMemory;
|
|
104
|
+
const agent = new HiveAgent(baseUrl, {
|
|
105
|
+
name: config.name,
|
|
106
|
+
avatarUrl: config.avatarUrl,
|
|
107
|
+
bio: config.bio ?? undefined,
|
|
108
|
+
predictionProfile: config.predictionProfile,
|
|
109
|
+
pollIntervalMs: 5000,
|
|
110
|
+
pollLimit: 20,
|
|
111
|
+
onPollEmpty: addIdleActivity,
|
|
112
|
+
onStop: async () => { },
|
|
113
|
+
onNewThread: async (thread) => {
|
|
114
|
+
try {
|
|
115
|
+
const threadPreview = thread.text.length > 80 ? thread.text.slice(0, 80) + '\u2026' : thread.text;
|
|
116
|
+
addPollActivity({
|
|
117
|
+
type: 'signal',
|
|
118
|
+
text: `c/${thread.project_id} \u00B7 ${thread.id.slice(0, 8)}`,
|
|
119
|
+
detail: threadPreview,
|
|
120
|
+
});
|
|
121
|
+
const summary = `[${thread.project_id}] ${threadPreview}`;
|
|
122
|
+
recentThreadsRef.current = [summary, ...recentThreadsRef.current].slice(0, 5);
|
|
123
|
+
addPollActivity({ type: 'analyzing', text: 'Analyzing signal...' });
|
|
124
|
+
const result = await processSignalAndSummarize(thread, agent.recentComments, memoryRef.current, soulContentRef.current, strategyContentRef.current);
|
|
125
|
+
if (result.skip) {
|
|
126
|
+
removeLastAnalyzing();
|
|
127
|
+
addPollActivity({ type: 'skipped', text: 'Skipped \u2014 outside expertise' });
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
await agent.postComment(thread.id, {
|
|
131
|
+
thread_id: thread.id,
|
|
132
|
+
text: result.summary,
|
|
133
|
+
conviction: result.conviction,
|
|
134
|
+
}, thread.text);
|
|
135
|
+
removeLastAnalyzing();
|
|
136
|
+
const sign = result.conviction >= 0 ? '+' : '';
|
|
137
|
+
addPollActivity({
|
|
138
|
+
type: 'posted',
|
|
139
|
+
text: `[${sign}${result.conviction}%] "${result.summary}"`,
|
|
140
|
+
conviction: result.conviction,
|
|
141
|
+
});
|
|
142
|
+
predictionCountRef.current += 1;
|
|
143
|
+
setPredictionCount(predictionCountRef.current);
|
|
144
|
+
const predSummary = `[${sign}${result.conviction}%] ${result.summary}`;
|
|
145
|
+
recentPredictionsRef.current = [predSummary, ...recentPredictionsRef.current].slice(0, 5);
|
|
146
|
+
}
|
|
147
|
+
catch (err) {
|
|
148
|
+
removeLastAnalyzing();
|
|
149
|
+
const raw = err instanceof Error ? err.message : String(err);
|
|
150
|
+
const message = raw.length > 120 ? raw.slice(0, 120) + '\u2026' : raw;
|
|
151
|
+
addPollActivity({ type: 'error', text: message });
|
|
152
|
+
}
|
|
153
|
+
},
|
|
154
|
+
});
|
|
155
|
+
agentRef.current = agent;
|
|
156
|
+
registerShutdownAgent(async () => {
|
|
157
|
+
await agent.stop();
|
|
158
|
+
});
|
|
159
|
+
await agent.start();
|
|
160
|
+
setConnected(true);
|
|
161
|
+
const bio = config.bio ?? '';
|
|
162
|
+
if (bio) {
|
|
163
|
+
addPollActivity({
|
|
164
|
+
type: 'online',
|
|
165
|
+
text: `${config.name} agent online \u2014 "${bio}"`,
|
|
166
|
+
});
|
|
167
|
+
}
|
|
168
|
+
};
|
|
169
|
+
start().catch((err) => {
|
|
170
|
+
const raw = err instanceof Error ? err.message : String(err);
|
|
171
|
+
const isNameTaken = raw.includes('409');
|
|
172
|
+
const hint = isNameTaken ? ' Change the name in SOUL.md under "# Agent: <name>".' : '';
|
|
173
|
+
addPollActivity({ type: 'error', text: `Fatal: ${raw.slice(0, 120)}${hint}` });
|
|
174
|
+
});
|
|
175
|
+
return () => {
|
|
176
|
+
agentRef.current?.stop().catch(() => { });
|
|
177
|
+
};
|
|
178
|
+
}, []);
|
|
179
|
+
// ─── Chat submission ────────────────────────────────
|
|
180
|
+
const handleChatSubmit = useCallback(async (message) => {
|
|
181
|
+
if (!message.trim() || chatStreaming) {
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
184
|
+
addChatActivity({ type: 'chat-user', text: message });
|
|
185
|
+
sessionMessagesRef.current.push({ role: 'user', content: message });
|
|
186
|
+
chatCountSinceExtractRef.current += 1;
|
|
187
|
+
if (chatCountSinceExtractRef.current >= 3 && !extractingRef.current) {
|
|
188
|
+
extractingRef.current = true;
|
|
189
|
+
const messagesSnapshot = [...sessionMessagesRef.current];
|
|
190
|
+
extractAndSaveMemory(messagesSnapshot)
|
|
191
|
+
.then((newMemory) => {
|
|
192
|
+
if (newMemory !== null) {
|
|
193
|
+
memoryRef.current = newMemory;
|
|
194
|
+
}
|
|
195
|
+
chatCountSinceExtractRef.current = 0;
|
|
196
|
+
})
|
|
197
|
+
.catch(() => { })
|
|
198
|
+
.finally(() => {
|
|
199
|
+
extractingRef.current = false;
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
setChatStreaming(true);
|
|
203
|
+
setChatBuffer('');
|
|
204
|
+
try {
|
|
205
|
+
const prompt = buildChatPrompt(soulContentRef.current, strategyContentRef.current, {
|
|
206
|
+
recentThreadSummaries: recentThreadsRef.current,
|
|
207
|
+
recentPredictions: recentPredictionsRef.current,
|
|
208
|
+
sessionMessages: sessionMessagesRef.current.slice(-20),
|
|
209
|
+
memory: memoryRef.current,
|
|
210
|
+
userMessage: message,
|
|
211
|
+
});
|
|
212
|
+
const model = await getModel();
|
|
213
|
+
const result = streamText({
|
|
214
|
+
model,
|
|
215
|
+
prompt,
|
|
216
|
+
tools: { editSection: editSectionTool, fetchRules: fetchRulesTool },
|
|
217
|
+
stopWhen: stepCountIs(2),
|
|
218
|
+
maxOutputTokens: 600,
|
|
219
|
+
});
|
|
220
|
+
let fullResponse = '';
|
|
221
|
+
for await (const chunk of result.textStream) {
|
|
222
|
+
fullResponse += chunk;
|
|
223
|
+
setChatBuffer(fullResponse);
|
|
224
|
+
}
|
|
225
|
+
// Surface tool results when the model didn't produce follow-up text
|
|
226
|
+
const steps = await result.steps;
|
|
227
|
+
for (const step of steps) {
|
|
228
|
+
for (const toolResult of step.toolResults) {
|
|
229
|
+
const output = String(toolResult.output);
|
|
230
|
+
if (!fullResponse.includes(output)) {
|
|
231
|
+
const suffix = `\n[${output}]`;
|
|
232
|
+
fullResponse += suffix;
|
|
233
|
+
setChatBuffer(fullResponse);
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
sessionMessagesRef.current.push({ role: 'assistant', content: fullResponse });
|
|
238
|
+
addChatActivity({ type: 'chat-agent', text: fullResponse });
|
|
239
|
+
setChatBuffer('');
|
|
240
|
+
}
|
|
241
|
+
catch (err) {
|
|
242
|
+
const raw = err instanceof Error ? err.message : String(err);
|
|
243
|
+
addChatActivity({ type: 'chat-error', text: `Chat error: ${raw.slice(0, 120)}` });
|
|
244
|
+
}
|
|
245
|
+
finally {
|
|
246
|
+
setChatStreaming(false);
|
|
247
|
+
}
|
|
248
|
+
}, [chatStreaming, addChatActivity]);
|
|
249
|
+
// ─── Boot transition ────────────────────────────────
|
|
250
|
+
const handleBootComplete = useCallback(() => {
|
|
251
|
+
setPhase('running');
|
|
252
|
+
}, []);
|
|
253
|
+
return {
|
|
254
|
+
phase,
|
|
255
|
+
connected,
|
|
256
|
+
agentName,
|
|
257
|
+
agentBio,
|
|
258
|
+
pollActivity,
|
|
259
|
+
chatActivity,
|
|
260
|
+
input,
|
|
261
|
+
chatStreaming,
|
|
262
|
+
chatBuffer,
|
|
263
|
+
predictionCount,
|
|
264
|
+
termWidth,
|
|
265
|
+
setInput,
|
|
266
|
+
handleChatSubmit,
|
|
267
|
+
handleBootComplete,
|
|
268
|
+
};
|
|
269
|
+
}
|
|
@@ -1,33 +1,19 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
sessionSection = `\n## Session Chat Log\n\n${listed}\n`;
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
const currentMemorySection =
|
|
21
|
-
currentMemory.trim().length > 0
|
|
22
|
-
? `\n## Current MEMORY.md\n\n\`\`\`markdown\n${currentMemory}\n\`\`\`\n`
|
|
23
|
-
: '\n## Current MEMORY.md\n\n(empty - this is a fresh agent)\n';
|
|
24
|
-
|
|
25
|
-
const consolidationNote =
|
|
26
|
-
lineCount > 200
|
|
27
|
-
? `\n**IMPORTANT: The current memory is ${lineCount} lines, exceeding the 200-line soft limit. Aggressively consolidate: merge related items, remove outdated or low-value entries, and keep only the most important context.**\n`
|
|
28
|
-
: '';
|
|
29
|
-
|
|
30
|
-
const prompt = `You are an AI trading agent's memory system. Your job is to maintain conversational continuity between sessions with the agent's operator.
|
|
1
|
+
export function buildMemoryExtractionPrompt(context) {
|
|
2
|
+
const { currentMemory, sessionMessages, lineCount } = context;
|
|
3
|
+
let sessionSection = '';
|
|
4
|
+
if (sessionMessages.length > 0) {
|
|
5
|
+
const listed = sessionMessages
|
|
6
|
+
.map((m) => `${m.role === 'user' ? 'Operator' : 'Agent'}: ${m.content}`)
|
|
7
|
+
.join('\n');
|
|
8
|
+
sessionSection = `\n## Session Chat Log\n\n${listed}\n`;
|
|
9
|
+
}
|
|
10
|
+
const currentMemorySection = currentMemory.trim().length > 0
|
|
11
|
+
? `\n## Current MEMORY.md\n\n\`\`\`markdown\n${currentMemory}\n\`\`\`\n`
|
|
12
|
+
: '\n## Current MEMORY.md\n\n(empty - this is a fresh agent)\n';
|
|
13
|
+
const consolidationNote = lineCount > 200
|
|
14
|
+
? `\n**IMPORTANT: The current memory is ${lineCount} lines, exceeding the 200-line soft limit. Aggressively consolidate: merge related items, remove outdated or low-value entries, and keep only the most important context.**\n`
|
|
15
|
+
: '';
|
|
16
|
+
const prompt = `You are an AI trading agent's memory system. Your job is to maintain conversational continuity between sessions with the agent's operator.
|
|
31
17
|
${currentMemorySection}${consolidationNote}
|
|
32
18
|
## Session Activity
|
|
33
19
|
${sessionSection}
|
|
@@ -55,6 +41,5 @@ Follow these rules:
|
|
|
55
41
|
6. **Keep it under ~200 lines** — This file is injected into every prompt, so brevity matters.
|
|
56
42
|
|
|
57
43
|
Output the complete updated MEMORY.md content. Start with \`# Memory\` as the top-level header. Output ONLY the markdown content, no code fences or explanation.`;
|
|
58
|
-
|
|
59
|
-
return prompt;
|
|
44
|
+
return prompt;
|
|
60
45
|
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
const PROVIDERS = [
|
|
2
|
+
{
|
|
3
|
+
envVar: 'ANTHROPIC_API_KEY',
|
|
4
|
+
defaultModel: 'claude-opus-4-5',
|
|
5
|
+
load: async (modelId) => {
|
|
6
|
+
const { anthropic } = await import('@ai-sdk/anthropic');
|
|
7
|
+
return anthropic(modelId);
|
|
8
|
+
},
|
|
9
|
+
},
|
|
10
|
+
{
|
|
11
|
+
envVar: 'OPENAI_API_KEY',
|
|
12
|
+
defaultModel: 'gpt-4o',
|
|
13
|
+
load: async (modelId) => {
|
|
14
|
+
const { openai } = await import('@ai-sdk/openai');
|
|
15
|
+
return openai(modelId);
|
|
16
|
+
},
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
envVar: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
|
20
|
+
defaultModel: 'gemini-3-pro-preview',
|
|
21
|
+
load: async (modelId) => {
|
|
22
|
+
const { google } = await import('@ai-sdk/google');
|
|
23
|
+
return google(modelId);
|
|
24
|
+
},
|
|
25
|
+
},
|
|
26
|
+
{
|
|
27
|
+
envVar: 'XAI_API_KEY',
|
|
28
|
+
defaultModel: 'grok-4',
|
|
29
|
+
load: async (modelId) => {
|
|
30
|
+
const { xai } = await import('@ai-sdk/xai');
|
|
31
|
+
return xai(modelId);
|
|
32
|
+
},
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
envVar: 'OPENROUTER_API_KEY',
|
|
36
|
+
defaultModel: 'anthropic/claude-3.5-sonnet',
|
|
37
|
+
load: async (modelId) => {
|
|
38
|
+
const { createOpenRouter } = await import('@openrouter/ai-sdk-provider');
|
|
39
|
+
const openrouter = createOpenRouter({ apiKey: process.env.OPENROUTER_API_KEY });
|
|
40
|
+
return openrouter.chat(modelId);
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
];
|
|
44
|
+
let _modelPromise = null;
|
|
45
|
+
export function getModel() {
|
|
46
|
+
if (_modelPromise) {
|
|
47
|
+
return _modelPromise;
|
|
48
|
+
}
|
|
49
|
+
_modelPromise = (async () => {
|
|
50
|
+
const overrideModel = process.env.HIVE_MODEL;
|
|
51
|
+
for (const provider of PROVIDERS) {
|
|
52
|
+
const keyValue = process.env[provider.envVar];
|
|
53
|
+
if (keyValue && keyValue.trim().length > 0) {
|
|
54
|
+
const modelId = overrideModel ?? provider.defaultModel;
|
|
55
|
+
const model = await provider.load(modelId);
|
|
56
|
+
return model;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
throw new Error('No AI provider API key found in environment. ' +
|
|
60
|
+
'Set one of: ANTHROPIC_API_KEY, OPENAI_API_KEY, GOOGLE_GENERATIVE_AI_API_KEY, XAI_API_KEY, OPENROUTER_API_KEY');
|
|
61
|
+
})();
|
|
62
|
+
return _modelPromise;
|
|
63
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import chalk from 'chalk';
|
|
2
|
+
import { symbols } from './theme.js';
|
|
3
|
+
let _shutdownAgent = null;
|
|
4
|
+
let _shuttingDown = false;
|
|
5
|
+
export function registerShutdownAgent(fn) {
|
|
6
|
+
_shutdownAgent = fn;
|
|
7
|
+
}
|
|
8
|
+
export function clearShutdownAgent() {
|
|
9
|
+
_shutdownAgent = null;
|
|
10
|
+
}
|
|
11
|
+
const restoreScreen = () => {
|
|
12
|
+
process.stdout.write('\x1b[?1049l');
|
|
13
|
+
};
|
|
14
|
+
export const gracefulShutdown = async (exitCode = 0) => {
|
|
15
|
+
if (_shuttingDown) {
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
_shuttingDown = true;
|
|
19
|
+
if (_shutdownAgent) {
|
|
20
|
+
try {
|
|
21
|
+
await _shutdownAgent();
|
|
22
|
+
}
|
|
23
|
+
catch {
|
|
24
|
+
// Best-effort memory save on shutdown
|
|
25
|
+
}
|
|
26
|
+
_shutdownAgent = null;
|
|
27
|
+
}
|
|
28
|
+
restoreScreen();
|
|
29
|
+
process.exit(exitCode);
|
|
30
|
+
};
|
|
31
|
+
export function setupProcessLifecycle() {
|
|
32
|
+
// Unhandled rejection handler
|
|
33
|
+
process.on('unhandledRejection', (reason) => {
|
|
34
|
+
const raw = reason instanceof Error ? reason.message : String(reason);
|
|
35
|
+
const message = raw.length > 200 ? raw.slice(0, 200) + '\u2026' : raw;
|
|
36
|
+
console.error(chalk.red(` ${symbols.cross} Unhandled: ${message}`));
|
|
37
|
+
});
|
|
38
|
+
// Use alternate screen buffer (like vim/htop) to prevent text reflow on resize.
|
|
39
|
+
// The normal buffer reflows wrapped lines when the terminal width changes, which
|
|
40
|
+
// desyncs Ink's internal line counter and produces ghost copies of the UI.
|
|
41
|
+
// The alternate buffer is position-based — no reflow, no ghosts.
|
|
42
|
+
process.stdout.write('\x1b[?1049h');
|
|
43
|
+
process.on('exit', restoreScreen);
|
|
44
|
+
process.on('SIGINT', () => {
|
|
45
|
+
gracefulShutdown().catch(() => {
|
|
46
|
+
restoreScreen();
|
|
47
|
+
process.exit(1);
|
|
48
|
+
});
|
|
49
|
+
});
|
|
50
|
+
process.on('SIGTERM', () => {
|
|
51
|
+
gracefulShutdown().catch(() => {
|
|
52
|
+
restoreScreen();
|
|
53
|
+
process.exit(1);
|
|
54
|
+
});
|
|
55
|
+
});
|
|
56
|
+
}
|
|
@@ -1,34 +1,9 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
projectId: string;
|
|
8
|
-
timestamp: string;
|
|
9
|
-
priceOnFetch: number;
|
|
10
|
-
citations: CitationDto[];
|
|
11
|
-
recentPosts?: readonly string[];
|
|
12
|
-
memory?: string;
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
async function loadMarkdownFile(filename: string): Promise<string> {
|
|
16
|
-
const filePath = path.join(process.cwd(), filename);
|
|
17
|
-
const content = await fs.readFile(filePath, 'utf-8');
|
|
18
|
-
return content;
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
export async function buildAnalystPrompt(options: BuildPromptOptions): Promise<string> {
|
|
22
|
-
const { threadText, projectId, timestamp, priceOnFetch, citations, recentPosts, memory } =
|
|
23
|
-
options;
|
|
24
|
-
|
|
25
|
-
const soulContent = await loadMarkdownFile('SOUL.md');
|
|
26
|
-
const strategyContent = await loadMarkdownFile('STRATEGY.md');
|
|
27
|
-
|
|
28
|
-
let recentPostsSection = '';
|
|
29
|
-
if (recentPosts && recentPosts.length > 0) {
|
|
30
|
-
const listed = recentPosts.map((p) => `- "${p}"`).join('\n');
|
|
31
|
-
recentPostsSection = `
|
|
1
|
+
export function buildAnalystPrompt(soulContent, strategyContent, options) {
|
|
2
|
+
const { threadText, projectId, timestamp, priceOnFetch, citations, recentPosts, memory } = options;
|
|
3
|
+
let recentPostsSection = '';
|
|
4
|
+
if (recentPosts && recentPosts.length > 0) {
|
|
5
|
+
const listed = recentPosts.map((p) => `- "${p}"`).join('\n');
|
|
6
|
+
recentPostsSection = `
|
|
32
7
|
## Anti-repetition
|
|
33
8
|
|
|
34
9
|
Your recent posts (do NOT repeat these structures, phrases, or opening patterns):
|
|
@@ -36,29 +11,26 @@ ${listed}
|
|
|
36
11
|
|
|
37
12
|
If you catch yourself writing something that sounds like any of the above - stop and take a completely different angle.
|
|
38
13
|
`;
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
memorySection = `
|
|
14
|
+
}
|
|
15
|
+
let memorySection = '';
|
|
16
|
+
if (memory && memory.trim().length > 0) {
|
|
17
|
+
memorySection = `
|
|
44
18
|
## Agent Memory
|
|
45
19
|
|
|
46
20
|
Your persistent learnings from past sessions:
|
|
47
21
|
${memory}
|
|
48
22
|
`;
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
citationsSection = `
|
|
23
|
+
}
|
|
24
|
+
let citationsSection = '';
|
|
25
|
+
if (citations.length > 0) {
|
|
26
|
+
const listed = citations.map((c) => `- [${c.title}](${c.url})`).join('\n');
|
|
27
|
+
citationsSection = `
|
|
55
28
|
## Sources
|
|
56
29
|
|
|
57
30
|
${listed}
|
|
58
31
|
`;
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
const prompt = `You are a Crypto Twitter personality. Here's who you are:
|
|
32
|
+
}
|
|
33
|
+
const prompt = `You are a Crypto Twitter personality. Here's who you are:
|
|
62
34
|
---
|
|
63
35
|
${soulContent}
|
|
64
36
|
---
|
|
@@ -163,6 +135,5 @@ Conviction calibration — match signal strength to magnitude:
|
|
|
163
135
|
- Black swan, regulatory bombshell, massive exploit → ±12.0 to ±25.0
|
|
164
136
|
|
|
165
137
|
IMPORTANT: Vary your conviction numbers. Do NOT reuse the same number across signals. Each signal has different strength — your conviction should reflect that.`;
|
|
166
|
-
|
|
167
|
-
return prompt;
|
|
138
|
+
return prompt;
|
|
168
139
|
}
|