anorion 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +87 -0
  2. package/agents/001.yaml +32 -0
  3. package/agents/example.yaml +6 -0
  4. package/bin/anorion.js +8093 -0
  5. package/package.json +72 -0
  6. package/scripts/cli.ts +182 -0
  7. package/scripts/postinstall.js +6 -0
  8. package/scripts/setup.ts +255 -0
  9. package/src/agents/pipeline.ts +231 -0
  10. package/src/agents/registry.ts +153 -0
  11. package/src/agents/runtime.ts +593 -0
  12. package/src/agents/session.ts +338 -0
  13. package/src/agents/subagent.ts +185 -0
  14. package/src/bridge/client.ts +221 -0
  15. package/src/bridge/federator.ts +221 -0
  16. package/src/bridge/protocol.ts +88 -0
  17. package/src/bridge/server.ts +221 -0
  18. package/src/channels/base.ts +43 -0
  19. package/src/channels/router.ts +122 -0
  20. package/src/channels/telegram.ts +592 -0
  21. package/src/channels/webhook.ts +143 -0
  22. package/src/cli/index.ts +1036 -0
  23. package/src/cli/interactive.ts +26 -0
  24. package/src/gateway/routes-v2.ts +165 -0
  25. package/src/gateway/server.ts +512 -0
  26. package/src/gateway/ws.ts +75 -0
  27. package/src/index.ts +182 -0
  28. package/src/llm/provider.ts +243 -0
  29. package/src/llm/providers.ts +381 -0
  30. package/src/memory/context.ts +125 -0
  31. package/src/memory/store.ts +214 -0
  32. package/src/scheduler/cron.ts +239 -0
  33. package/src/shared/audit.ts +231 -0
  34. package/src/shared/config.ts +129 -0
  35. package/src/shared/db/index.ts +165 -0
  36. package/src/shared/db/prepared.ts +111 -0
  37. package/src/shared/db/schema.ts +84 -0
  38. package/src/shared/events.ts +79 -0
  39. package/src/shared/logger.ts +10 -0
  40. package/src/shared/metrics.ts +190 -0
  41. package/src/shared/rbac.ts +151 -0
  42. package/src/shared/token-budget.ts +157 -0
  43. package/src/shared/types.ts +166 -0
  44. package/src/tools/builtin/echo.ts +19 -0
  45. package/src/tools/builtin/file-read.ts +78 -0
  46. package/src/tools/builtin/file-write.ts +64 -0
  47. package/src/tools/builtin/http-request.ts +63 -0
  48. package/src/tools/builtin/memory.ts +71 -0
  49. package/src/tools/builtin/shell.ts +94 -0
  50. package/src/tools/builtin/web-search.ts +22 -0
  51. package/src/tools/executor.ts +126 -0
  52. package/src/tools/registry.ts +56 -0
  53. package/src/tools/skill-manager.ts +252 -0
@@ -0,0 +1,75 @@
1
+ import { logger } from '../shared/logger';
2
+ import { eventBus, type EventName } from '../shared/events';
3
+
4
+ type WsClient = { send: (data: string) => void; addEventListener: (type: string, fn: (ev: any) => void) => void; readyState: number };
5
+ const clients = new Map<WsClient, Set<string>>();
6
+
7
+ const WS_OPEN = 1;
8
+
9
+ const AGENT_EVENTS: EventName[] = [
10
+ 'agent:processing',
11
+ 'agent:tool-call',
12
+ 'agent:response',
13
+ 'agent:error',
14
+ 'agent:idle',
15
+ ];
16
+
17
+ export function handleWebSocket(ws: WsClient) {
18
+ clients.set(ws, new Set());
19
+ logger.info('WebSocket client connected');
20
+
21
+ ws.addEventListener('message', (event: { data: string }) => {
22
+ try {
23
+ const msg = JSON.parse(typeof event.data === 'string' ? event.data : String(event.data));
24
+ logger.debug({ msg }, 'WS message received');
25
+
26
+ if (msg.type === 'subscribe') {
27
+ const subs = clients.get(ws);
28
+ if (subs) {
29
+ const agents = msg.agents || [];
30
+ for (const a of agents) subs.add(a);
31
+ }
32
+ ws.send(JSON.stringify({ type: 'subscribed', agents: msg.agents || [] }));
33
+ }
34
+
35
+ if (msg.type === 'unsubscribe') {
36
+ const subs = clients.get(ws);
37
+ if (subs) {
38
+ for (const a of msg.agents || []) subs.delete(a);
39
+ }
40
+ }
41
+ } catch {
42
+ ws.send(JSON.stringify({ type: 'error', message: 'Invalid JSON' }));
43
+ }
44
+ });
45
+
46
+ ws.addEventListener('close', () => {
47
+ clients.delete(ws);
48
+ logger.info('WebSocket client disconnected');
49
+ });
50
+ }
51
+
52
+ function broadcastEvent(eventName: string, data: any) {
53
+ const payload = JSON.stringify({ type: eventName, ...data });
54
+ for (const [ws, subs] of clients) {
55
+ if ((ws as any).readyState !== WS_OPEN) continue;
56
+ // If client subscribed to specific agents, filter; else send all
57
+ if (subs.size > 0 && data.agentId && !subs.has(data.agentId)) continue;
58
+ ws.send(payload);
59
+ }
60
+ }
61
+
62
+ // Subscribe to all agent events from the event bus
63
+ for (const ev of AGENT_EVENTS) {
64
+ eventBus.on(ev, (data: any) => broadcastEvent(ev, data));
65
+ }
66
+
67
+ /** Broadcast a raw message to all connected WebSocket clients */
68
+ export function broadcast(message: object) {
69
+ const data = JSON.stringify(message);
70
+ for (const ws of clients.keys()) {
71
+ if ((ws as any).readyState === WS_OPEN) {
72
+ ws.send(data);
73
+ }
74
+ }
75
+ }
package/src/index.ts ADDED
@@ -0,0 +1,182 @@
1
+ import { loadConfig } from './shared/config';
2
+ import { initDatabase } from './shared/db';
3
+ import { logger } from './shared/logger';
4
+ import { toolRegistry } from './tools/registry';
5
+ import { agentRegistry } from './agents/registry';
6
+ import { sessionManager } from './agents/session';
7
+ import { memoryManager } from './memory/store';
8
+ import { channelRouter } from './channels/router';
9
+ import { TelegramChannel } from './channels/telegram';
10
+ import { WebhookChannel } from './channels/webhook';
11
+ import app, { setApiKeys, setBridge, registerBridgeRoutes } from './gateway/server';
12
+ import routesV2 from './gateway/routes-v2';
13
+ import { skillManager } from './tools/skill-manager';
14
+ import { scheduleManager } from './scheduler/cron';
15
+ import { tokenBudget } from './shared/token-budget';
16
+ import { auditLog } from './shared/audit';
17
+ import { eventBus } from './shared/events';
18
+ import { loadPipelinesFromFile, listPipelines } from './agents/pipeline';
19
+ import { listConfiguredProviders } from './llm/providers';
20
+ import { Federator } from './bridge/federator';
21
+
22
+ import echoTool from './tools/builtin/echo';
23
+ import shellTool from './tools/builtin/shell';
24
+ import httpRequestTool from './tools/builtin/http-request';
25
+ import fileReadTool from './tools/builtin/file-read';
26
+ import fileWriteTool from './tools/builtin/file-write';
27
+ import webSearchTool from './tools/builtin/web-search';
28
+ import { memorySaveTool, memorySearchTool, memoryListTool } from './tools/builtin/memory';
29
+ import { spawnAgentTool } from './agents/subagent';
30
+
31
+ const builtinTools = [
32
+ echoTool, shellTool, httpRequestTool, fileReadTool, fileWriteTool,
33
+ webSearchTool, memorySaveTool, memorySearchTool, memoryListTool, spawnAgentTool,
34
+ ];
35
+
36
+ // ── Bootstrap ──
37
+
38
+ async function main() {
39
+ logger.info('Anorion starting...');
40
+
41
+ // 1. Load config
42
+ const config = loadConfig();
43
+ logger.info({ port: config.gateway.port }, 'Config loaded');
44
+
45
+ // 2. Init database
46
+ const database = initDatabase(config.gateway.database);
47
+
48
+ // 3. Load agents from directory
49
+ if (config.agents.dir) {
50
+ agentRegistry.setDb(database.raw, database.prepared);
51
+ memoryManager.setDb(database.raw);
52
+ sessionManager.setDb(database.db, database.prepared);
53
+ await agentRegistry.loadFromDirectory(config.agents.dir);
54
+ logger.info({ agents: agentRegistry.list().map(a => a.name) }, 'Agents loaded from directory');
55
+ }
56
+
57
+ // 4. Register builtin tools
58
+ for (const tool of builtinTools) {
59
+ try {
60
+ toolRegistry.register(tool);
61
+ } catch {
62
+ // Already registered — skip
63
+ }
64
+ }
65
+ logger.info({ tools: builtinTools.map(t => t.name) }, 'Builtin tools registered');
66
+
67
+ // 5. Set API keys
68
+ if (config.gateway.apiKeys.length > 0) {
69
+ setApiKeys(config.gateway.apiKeys);
70
+ logger.info({ keys: config.gateway.apiKeys.map(k => k.name) }, 'API keys configured');
71
+ }
72
+
73
+ // 6. Register v2 + bridge routes
74
+ registerBridgeRoutes(app);
75
+ try {
76
+ app.route('/', routesV2);
77
+ } catch {
78
+ // May already be mounted
79
+ }
80
+
81
+ // 7. Start HTTP server FIRST (so API is available immediately)
82
+ const server = Bun.serve({
83
+ port: config.gateway.port,
84
+ hostname: config.gateway.host,
85
+ fetch: app.fetch,
86
+ });
87
+
88
+ logger.info(`
89
+ ╔══════════════════════════════════════════════╗
90
+ ║ Anorion Gateway ║
91
+ ║ http://${config.gateway.host}:${config.gateway.port} ║
92
+ ║ Agents: ${agentRegistry.list().length.toString().padEnd(30)}║
93
+ ║ Tools: ${toolRegistry.list().length.toString().padEnd(30)}║
94
+ ║ Bridge: ${(config.bridge.enabled ? 'enabled' : 'disabled').padEnd(29)}║
95
+ ╚══════════════════════════════════════════════╝
96
+ `);
97
+
98
+ // 8. Register and start channels (after HTTP server is up)
99
+ const channels = config.channels;
100
+
101
+ const tgConfig = channels.telegram;
102
+ const tg = new TelegramChannel({
103
+ botToken: tgConfig.botToken,
104
+ allowedUsers: tgConfig.allowedUsers,
105
+ defaultAgent: tgConfig.defaultAgent,
106
+ });
107
+ channelRouter.registerChannel(tg);
108
+ channelRouter.configure({
109
+ routes: [],
110
+ defaultAgent: tgConfig.defaultAgent,
111
+ });
112
+
113
+ if (channels.telegram.enabled) {
114
+ // Don't await — Telegram long-polling blocks
115
+ channelRouter.startChannel('telegram').then(() => {
116
+ logger.info('Telegram channel started');
117
+ }).catch((err: Error) => {
118
+ logger.error({ error: err.message }, 'Telegram channel failed to start');
119
+ });
120
+ }
121
+
122
+ if (channels.webhook.enabled) {
123
+ const wh = new WebhookChannel({
124
+ inboundSecret: channels.webhook.inboundSecret,
125
+ outboundUrls: channels.webhook.outboundUrls,
126
+ });
127
+ channelRouter.registerChannel(wh);
128
+ await channelRouter.startChannel('webhook');
129
+ logger.info('Webhook channel started');
130
+ }
131
+
132
+ // 9. Load pipelines
133
+ const { existsSync } = await import('fs');
134
+ const pipelinesPath = new URL('../../pipelines.yaml', import.meta.url).pathname;
135
+ if (existsSync(pipelinesPath)) {
136
+ loadPipelinesFromFile(pipelinesPath);
137
+ logger.info({ pipelines: listPipelines().length }, 'Pipelines loaded');
138
+ }
139
+
140
+ // 10. Bridge / Federation
141
+ if (config.bridge.enabled) {
142
+ const { BridgeServer } = await import('./bridge/server');
143
+ const bridgeServer = new BridgeServer(config.bridge.port, config.bridge.secret);
144
+ const federator = new Federator(crypto.randomUUID(), config.bridge.secret, bridgeServer);
145
+ setBridge(federator);
146
+
147
+ for (const peer of config.bridge.peers) {
148
+ await federator.connectPeer(peer.url, peer.secret || '').catch((err: Error) => {
149
+ logger.warn({ url: peer.url, error: err.message }, 'Failed to connect to peer');
150
+ });
151
+ }
152
+ logger.info({ peers: config.bridge.peers.length }, 'Bridge enabled');
153
+ }
154
+
155
+ // 11. Scheduler
156
+ if (config.scheduler.enabled) {
157
+ logger.info('Scheduler enabled');
158
+ }
159
+
160
+ // 12. Log provider status
161
+ const providers = listConfiguredProviders();
162
+ const configured = providers.filter(p => p.configured);
163
+ logger.info({ providers: configured.map(p => `${p.icon} ${p.name}`) }, 'Configured LLM providers');
164
+
165
+ // 13. Graceful shutdown
166
+ const shutdown = async (signal: string) => {
167
+ logger.info({ signal }, 'Shutting down...');
168
+ await channelRouter.stopAll();
169
+ server.stop(true);
170
+ database.raw.close();
171
+ logger.info('Goodbye');
172
+ process.exit(0);
173
+ };
174
+
175
+ process.on('SIGINT', () => shutdown('SIGINT'));
176
+ process.on('SIGTERM', () => shutdown('SIGTERM'));
177
+ }
178
+
179
+ main().catch((err) => {
180
+ logger.fatal({ error: err.message, stack: err.stack }, 'Fatal startup error');
181
+ process.exit(1);
182
+ });
@@ -0,0 +1,243 @@
1
+ // LLM Provider — unified interface using multi-provider registry
2
+
3
+ import { generateText, streamText, type ModelMessage, type Tool as AiTool } from 'ai';
4
+ import { resolveModel, type ResolvedModel } from './providers';
5
+ import { logger } from '../shared/logger';
6
+ import { eventBus } from '../shared/events';
7
+ import type { ToolDefinition } from '../shared/types';
8
+ import { tokenBudget } from '../shared/token-budget';
9
+
10
+ export interface LlmOptions {
11
+ systemPrompt: string;
12
+ messages: ModelMessage[];
13
+ tools: ToolDefinition[];
14
+ modelId: string;
15
+ fallbackModelId?: string;
16
+ maxTokens?: number;
17
+ temperature?: number;
18
+ }
19
+
20
+ // ── Retry with exponential backoff + jitter ──
21
+
22
+ interface RetryConfig {
23
+ maxRetries: number;
24
+ baseDelayMs: number;
25
+ maxDelayMs: number;
26
+ }
27
+
28
+ const DEFAULT_RETRY: RetryConfig = {
29
+ maxRetries: 3,
30
+ baseDelayMs: 1000,
31
+ maxDelayMs: 15000,
32
+ };
33
+
34
+ async function withRetry<T>(fn: () => Promise<T>, retry: Partial<RetryConfig> = {}): Promise<T> {
35
+ const cfg = { ...DEFAULT_RETRY, ...retry };
36
+ let lastError: Error | null = null;
37
+
38
+ for (let attempt = 0; attempt <= cfg.maxRetries; attempt++) {
39
+ try {
40
+ return await fn();
41
+ } catch (err) {
42
+ lastError = err as Error;
43
+ if (attempt < cfg.maxRetries) {
44
+ const delay = Math.min(cfg.baseDelayMs * Math.pow(2, attempt) + Math.random() * 1000, cfg.maxDelayMs);
45
+ logger.warn({ attempt: attempt + 1, delay: Math.round(delay), error: lastError.message }, 'LLM call failed, retrying');
46
+ await new Promise((r) => setTimeout(r, delay));
47
+ }
48
+ }
49
+ }
50
+
51
+ throw lastError!;
52
+ }
53
+
54
+ // ── Circuit Breaker ──
55
+
56
+ interface CircuitState {
57
+ failures: number;
58
+ lastFailure: number;
59
+ open: boolean;
60
+ }
61
+
62
+ const circuits = new Map<string, CircuitState>();
63
+ const CIRCUIT_THRESHOLD = 5;
64
+ const CIRCUIT_RESET_MS = 60_000;
65
+
66
+ function checkCircuit(modelId: string): void {
67
+ const state = circuits.get(modelId);
68
+ if (!state || !state.open) return;
69
+ if (Date.now() - state.lastFailure > CIRCUIT_RESET_MS) {
70
+ state.open = false;
71
+ state.failures = 0;
72
+ logger.info({ modelId }, 'Circuit breaker reset');
73
+ return;
74
+ }
75
+ throw new Error(`Circuit breaker open for ${modelId} — too many failures`);
76
+ }
77
+
78
+ function recordFailure(modelId: string): void {
79
+ if (!circuits.has(modelId)) circuits.set(modelId, { failures: 0, lastFailure: 0, open: false });
80
+ const state = circuits.get(modelId)!;
81
+ state.failures++;
82
+ state.lastFailure = Date.now();
83
+ if (state.failures >= CIRCUIT_THRESHOLD) {
84
+ state.open = true;
85
+ logger.warn({ modelId, failures: state.failures }, 'Circuit breaker opened');
86
+ }
87
+ }
88
+
89
+ function recordSuccess(modelId: string): void {
90
+ const state = circuits.get(modelId);
91
+ if (state) { state.failures = 0; state.open = false; }
92
+ }
93
+
94
+ // ── Main LLM Interface ──
95
+
96
+ export async function callLlm(options: LlmOptions): Promise<{
97
+ content: string;
98
+ toolCalls: { id: string; name: string; arguments: string }[];
99
+ usage?: { promptTokens: number; completionTokens: number; totalTokens: number };
100
+ }> {
101
+ const { modelId, fallbackModelId, tools, systemPrompt, messages, maxTokens, temperature } = options;
102
+
103
+ const aiTools: Record<string, AiTool> = {};
104
+ for (const tool of tools) {
105
+ aiTools[tool.name] = {
106
+ description: tool.description,
107
+ parameters: tool.parameters as Record<string, unknown>,
108
+ execute: undefined as unknown as AiTool['execute'],
109
+ };
110
+ }
111
+
112
+ const models = [modelId, ...(fallbackModelId ? [fallbackModelId] : [])];
113
+ let lastError: Error | null = null;
114
+
115
+ for (const mid of models) {
116
+ try {
117
+ checkCircuit(mid);
118
+ const resolved = resolveModel(mid);
119
+
120
+ // Token budget check (estimate ~4 chars per token for check)
121
+ const estimatedTokens = Math.ceil(
122
+ (systemPrompt.length + JSON.stringify(messages).length) / 4 + (maxTokens || 4096)
123
+ );
124
+ const budget = tokenBudget.canSpend('global', 'llm-call', estimatedTokens);
125
+ if (!budget.allowed) {
126
+ throw new Error(`Token budget: ${budget.reason}`);
127
+ }
128
+
129
+ logger.debug({ model: mid, provider: resolved.providerName, toolCount: Object.keys(aiTools).length }, 'Calling LLM');
130
+
131
+ const result = await withRetry(() => generateText({
132
+ model: resolved.instance,
133
+ system: systemPrompt,
134
+ messages,
135
+ tools: Object.keys(aiTools).length > 0 ? aiTools : undefined,
136
+ maxTokens,
137
+ temperature,
138
+ }));
139
+
140
+ recordSuccess(mid);
141
+
142
+ // Emit token usage event
143
+ if (result.usage) {
144
+ eventBus.emit('token:usage', {
145
+ agentId: 'global',
146
+ sessionId: 'llm-call',
147
+ model: mid,
148
+ promptTokens: result.usage.promptTokens,
149
+ completionTokens: result.usage.completionTokens,
150
+ timestamp: Date.now(),
151
+ });
152
+ }
153
+
154
+ const toolCalls = result.toolCalls.map((tc) => ({
155
+ id: tc.toolCallId,
156
+ name: tc.toolName,
157
+ arguments: JSON.stringify(tc.args),
158
+ }));
159
+
160
+ return {
161
+ content: result.text,
162
+ toolCalls,
163
+ usage: result.usage ? {
164
+ promptTokens: result.usage.promptTokens,
165
+ completionTokens: result.usage.completionTokens,
166
+ totalTokens: result.usage.promptTokens + result.usage.completionTokens,
167
+ } : undefined,
168
+ };
169
+ } catch (err) {
170
+ lastError = err as Error;
171
+ recordFailure(mid);
172
+ logger.warn({ model: mid, error: lastError.message }, 'LLM call failed');
173
+ }
174
+ }
175
+
176
+ throw lastError || new Error('All LLM providers failed');
177
+ }
178
+
179
+ export async function* streamLlm(options: LlmOptions) {
180
+ const { modelId, tools, systemPrompt, messages, maxTokens, temperature } = options;
181
+
182
+ const aiTools: Record<string, AiTool> = {};
183
+ for (const tool of tools) {
184
+ aiTools[tool.name] = {
185
+ description: tool.description,
186
+ parameters: tool.parameters as Record<string, unknown>,
187
+ execute: undefined as unknown as AiTool['execute'],
188
+ };
189
+ }
190
+
191
+ const resolved = resolveModel(modelId);
192
+ checkCircuit(modelId);
193
+
194
+ const result = streamText({
195
+ model: resolved.instance,
196
+ system: systemPrompt,
197
+ messages,
198
+ tools: Object.keys(aiTools).length > 0 ? aiTools : undefined,
199
+ maxTokens,
200
+ temperature,
201
+ });
202
+
203
+ let totalInput = 0;
204
+ let totalOutput = 0;
205
+
206
+ for await (const chunk of result.fullStream) {
207
+ switch (chunk.type) {
208
+ case 'text-delta':
209
+ totalOutput += chunk.textDelta.length;
210
+ yield { type: 'delta' as const, content: chunk.textDelta };
211
+ break;
212
+ case 'tool-call':
213
+ yield {
214
+ type: 'tool_call' as const,
215
+ id: chunk.toolCallId,
216
+ name: chunk.toolName,
217
+ arguments: chunk.args,
218
+ };
219
+ break;
220
+ case 'finish':
221
+ if (chunk.usage) {
222
+ totalInput = chunk.usage.promptTokens;
223
+ totalOutput = chunk.usage.completionTokens;
224
+ eventBus.emit('token:usage', {
225
+ agentId: 'global',
226
+ sessionId: 'llm-stream',
227
+ model: modelId,
228
+ promptTokens: totalInput,
229
+ completionTokens: totalOutput,
230
+ timestamp: Date.now(),
231
+ });
232
+ }
233
+ yield { type: 'done' as const, usage: chunk.usage };
234
+ recordSuccess(modelId);
235
+ break;
236
+ }
237
+ }
238
+ }
239
+
240
+ export type StreamChunk =
241
+ | { type: 'delta'; content: string }
242
+ | { type: 'tool_call'; id: string; name: string; arguments: unknown }
243
+ | { type: 'done'; usage?: { promptTokens: number; completionTokens: number; totalTokens: number } };