confused-ai-core 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Readme.md +331 -0
- package/package.json +1 -1
- package/FEATURES.md +0 -169
package/Readme.md
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
# @confused-ai/core
|
|
2
|
+
|
|
3
|
+
**Production-grade TypeScript framework for orchestrating multi-agent workflows.**
|
|
4
|
+
|
|
5
|
+
- **One-line agents** — `createAgent({ name, instructions })` with auto LLM, session, tools, guardrails
|
|
6
|
+
- **Model-agnostic** — OpenAI, OpenRouter, Ollama via `model: "openai:gpt-4o"` or env
|
|
7
|
+
- **Production-ready** — Circuit breaker, rate limiter, health checks, graceful shutdown, OTLP
|
|
8
|
+
- **Pluggable** — Session stores, vector stores, RAG, tools, guardrails, voice (TTS/STT)
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Install
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
npm install @confused-ai/core
|
|
16
|
+
# or
|
|
17
|
+
pnpm add @confused-ai/core
|
|
18
|
+
# or
|
|
19
|
+
bun add @confused-ai/core
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**Peer dependencies (optional):** `openai` (for OpenAIProvider), `better-sqlite3` (for SQLite session store). Install if you use those features.
|
|
23
|
+
|
|
24
|
+
**Requirements:** Node.js >= 18.
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
28
|
+
## Quick Start
|
|
29
|
+
|
|
30
|
+
### Minimal agent (uses `OPENAI_API_KEY` + default tools)
|
|
31
|
+
|
|
32
|
+
```typescript
|
|
33
|
+
import { createAgent } from '@confused-ai/core';
|
|
34
|
+
|
|
35
|
+
const agent = createAgent({
|
|
36
|
+
name: 'Assistant',
|
|
37
|
+
instructions: 'You are helpful and concise.',
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
const result = await agent.run('What is 2 + 2?');
|
|
41
|
+
console.log(result.text);
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
### With conversation memory (session)
|
|
45
|
+
|
|
46
|
+
```typescript
|
|
47
|
+
const agent = createAgent({
|
|
48
|
+
name: 'Assistant',
|
|
49
|
+
instructions: 'You are helpful.',
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
const sessionId = await agent.createSession('user-1');
|
|
53
|
+
const result = await agent.run('What did I just say?', { sessionId });
|
|
54
|
+
console.log(result.text);
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Custom model (OpenRouter, Ollama, etc.)
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
const agent = createAgent({
|
|
61
|
+
name: 'Assistant',
|
|
62
|
+
instructions: 'You are helpful.',
|
|
63
|
+
model: 'openrouter:anthropic/claude-3.5-sonnet',
|
|
64
|
+
// or Ollama: model: 'ollama:llama3.2', baseURL: 'http://localhost:11434/v1'
|
|
65
|
+
});
|
|
66
|
+
const result = await agent.run('Hello!');
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Streaming
|
|
70
|
+
|
|
71
|
+
```typescript
|
|
72
|
+
await agent.run('Explain TypeScript in 3 sentences.', {
|
|
73
|
+
sessionId,
|
|
74
|
+
onChunk: (chunk) => process.stdout.write(chunk),
|
|
75
|
+
});
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## Feature overview
|
|
81
|
+
|
|
82
|
+
| Category | Features |
|
|
83
|
+
| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
84
|
+
| **Learning** | User profiles, persistent memory stores, RAG, always/agentic learning modes. |
|
|
85
|
+
| **Core** | Model-agnostic (OpenAI/OpenRouter/Ollama), type-safe I/O with Zod, async-first, retries, multimodal messages. |
|
|
86
|
+
| **Knowledge** | RAGEngine, hybrid search, reranking, session/state persistence, pluggable vector stores. |
|
|
87
|
+
| **Orchestration** | Human-in-the-loop hooks, guardrails (sensitive data/schema), MCP & A2A, supervisors & sub-agents. |
|
|
88
|
+
| **Production** | Circuit breaker, rate limiter, health checks, graceful shutdown, OTLP export, LLM caching, resumable streaming. |
|
|
89
|
+
| **Artifacts** | Typed artifacts (text, data, reasoning, plan), versioned storage, media (images, audio, video). |
|
|
90
|
+
| **Voice** | TTS/STT with OpenAI and ElevenLabs, voice ID selection, streaming audio. |
|
|
91
|
+
|
|
92
|
+
---
|
|
93
|
+
|
|
94
|
+
## API reference
|
|
95
|
+
|
|
96
|
+
### Main entry: `createAgent`
|
|
97
|
+
|
|
98
|
+
```typescript
|
|
99
|
+
import {
|
|
100
|
+
createAgent,
|
|
101
|
+
type CreateAgentOptions,
|
|
102
|
+
type CreateAgentResult,
|
|
103
|
+
type AgentRunOptions,
|
|
104
|
+
} from '@confused-ai/core';
|
|
105
|
+
|
|
106
|
+
const agent: CreateAgentResult = createAgent({
|
|
107
|
+
name: string;
|
|
108
|
+
instructions: string;
|
|
109
|
+
model?: string; // e.g. 'gpt-4o', 'openrouter:...', 'ollama:...'
|
|
110
|
+
apiKey?: string;
|
|
111
|
+
baseURL?: string;
|
|
112
|
+
tools?: Tool[] | ToolRegistry;
|
|
113
|
+
sessionStore?: SessionStore;
|
|
114
|
+
guardrails?: GuardrailEngine | false;
|
|
115
|
+
maxSteps?: number;
|
|
116
|
+
timeoutMs?: number;
|
|
117
|
+
dev?: boolean;
|
|
118
|
+
// ... see CreateAgentOptions
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
// Run once
|
|
122
|
+
const result = await agent.run(prompt, { sessionId?, onChunk?, onToolCall? });
|
|
123
|
+
|
|
124
|
+
// Sessions
|
|
125
|
+
const sessionId = await agent.createSession(userId?);
|
|
126
|
+
const messages = await agent.getSessionMessages(sessionId);
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
## Subpath imports and examples
|
|
132
|
+
|
|
133
|
+
Use subpath imports for smaller bundles and clear separation.
|
|
134
|
+
|
|
135
|
+
### Production: circuit breaker
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
import { createLLMCircuitBreaker } from '@confused-ai/core/production';
|
|
139
|
+
|
|
140
|
+
const breaker = createLLMCircuitBreaker('openai');
|
|
141
|
+
|
|
142
|
+
const result = await breaker.execute(async () => {
|
|
143
|
+
return await llm.generateText(messages);
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
if (result.success) {
|
|
147
|
+
console.log(result.value);
|
|
148
|
+
} else {
|
|
149
|
+
console.error('Circuit open:', result.error);
|
|
150
|
+
}
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Production: rate limiter
|
|
154
|
+
|
|
155
|
+
```typescript
|
|
156
|
+
import { createOpenAIRateLimiter } from '@confused-ai/core/production';
|
|
157
|
+
|
|
158
|
+
const limiter = createOpenAIRateLimiter('tier1'); // 60 RPM
|
|
159
|
+
|
|
160
|
+
await limiter.execute(async () => {
|
|
161
|
+
return await openai.chat.completions.create({ ... });
|
|
162
|
+
});
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### Production: health checks
|
|
166
|
+
|
|
167
|
+
```typescript
|
|
168
|
+
import {
|
|
169
|
+
HealthCheckManager,
|
|
170
|
+
createLLMHealthCheck,
|
|
171
|
+
} from '@confused-ai/core/production';
|
|
172
|
+
|
|
173
|
+
const health = new HealthCheckManager({ version: '1.0.0' });
|
|
174
|
+
health.addComponent(createLLMHealthCheck(llmProvider));
|
|
175
|
+
|
|
176
|
+
// Express
|
|
177
|
+
app.get('/health', async (req, res) => {
|
|
178
|
+
const result = await health.check();
|
|
179
|
+
res.status(result.status === 'HEALTHY' ? 200 : 503).json(result);
|
|
180
|
+
});
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
### Production: graceful shutdown
|
|
184
|
+
|
|
185
|
+
```typescript
|
|
186
|
+
import { GracefulShutdown, createGracefulShutdown } from '@confused-ai/core/production';
|
|
187
|
+
|
|
188
|
+
const shutdown = createGracefulShutdown({ timeoutMs: 30000 });
|
|
189
|
+
|
|
190
|
+
shutdown.addHandler('database', async () => {
|
|
191
|
+
await db.close();
|
|
192
|
+
});
|
|
193
|
+
shutdown.addHandler('http-server', async () => {
|
|
194
|
+
await server.close();
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
shutdown.listen(); // Handles SIGTERM/SIGINT
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
### Production: resumable streaming
|
|
201
|
+
|
|
202
|
+
```typescript
|
|
203
|
+
import {
|
|
204
|
+
ResumableStreamManager,
|
|
205
|
+
formatSSE,
|
|
206
|
+
} from '@confused-ai/core/production';
|
|
207
|
+
|
|
208
|
+
const manager = new ResumableStreamManager();
|
|
209
|
+
const streamId = manager.createStream();
|
|
210
|
+
|
|
211
|
+
// On each chunk from LLM
|
|
212
|
+
manager.saveChunk(streamId, { type: 'text', content: 'Hello' });
|
|
213
|
+
|
|
214
|
+
// Client reconnects
|
|
215
|
+
const checkpoint = manager.getCheckpoint(streamId);
|
|
216
|
+
const missed = manager.getChunksSince(streamId, clientPosition);
|
|
217
|
+
for (const chunk of missed) {
|
|
218
|
+
res.write(formatSSE(chunk));
|
|
219
|
+
}
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
### LLM: caching
|
|
223
|
+
|
|
224
|
+
```typescript
|
|
225
|
+
import { LLMCache, withCache } from '@confused-ai/core/llm';
|
|
226
|
+
|
|
227
|
+
const cache = new LLMCache({ maxEntries: 1000, ttlMs: 60000 });
|
|
228
|
+
const cachedLLM = withCache(llm, cache);
|
|
229
|
+
|
|
230
|
+
const response = await cachedLLM.generateText(messages);
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Artifacts
|
|
234
|
+
|
|
235
|
+
```typescript
|
|
236
|
+
import {
|
|
237
|
+
InMemoryArtifactStorage,
|
|
238
|
+
createPlanArtifact,
|
|
239
|
+
createTextArtifact,
|
|
240
|
+
createReasoningArtifact,
|
|
241
|
+
} from '@confused-ai/core/artifacts';
|
|
242
|
+
|
|
243
|
+
const storage = new InMemoryArtifactStorage();
|
|
244
|
+
|
|
245
|
+
const plan = await storage.save(
|
|
246
|
+
createPlanArtifact('project-plan', 'Build a chatbot', [
|
|
247
|
+
{ description: 'Design conversation flows' },
|
|
248
|
+
{ description: 'Implement intent detection' },
|
|
249
|
+
])
|
|
250
|
+
);
|
|
251
|
+
|
|
252
|
+
const text = await storage.save(
|
|
253
|
+
createTextArtifact('readme', 'markdown', '# Hello')
|
|
254
|
+
);
|
|
255
|
+
|
|
256
|
+
const retrieved = await storage.get(plan.id);
|
|
257
|
+
```
|
|
258
|
+
|
|
259
|
+
### Voice (TTS / STT)
|
|
260
|
+
|
|
261
|
+
```typescript
|
|
262
|
+
import {
|
|
263
|
+
OpenAIVoiceProvider,
|
|
264
|
+
ElevenLabsVoiceProvider,
|
|
265
|
+
createVoiceProvider,
|
|
266
|
+
} from '@confused-ai/core/voice';
|
|
267
|
+
|
|
268
|
+
const voice = createVoiceProvider({ provider: 'openai' }); // or { provider: 'elevenlabs' }
|
|
269
|
+
|
|
270
|
+
// Text-to-Speech
|
|
271
|
+
const { audio } = await voice.textToSpeech('Hello, world!', {
|
|
272
|
+
voiceId: 'nova',
|
|
273
|
+
speed: 1.0,
|
|
274
|
+
});
|
|
275
|
+
|
|
276
|
+
// Speech-to-Text
|
|
277
|
+
const { text } = await voice.speechToText(audioBuffer);
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
### Observability (OTLP)
|
|
281
|
+
|
|
282
|
+
```typescript
|
|
283
|
+
import {
|
|
284
|
+
OTLPTraceExporter,
|
|
285
|
+
OTLPMetricsExporter,
|
|
286
|
+
} from '@confused-ai/core/observability';
|
|
287
|
+
|
|
288
|
+
// Configure OTLP exporters for traces and metrics
|
|
289
|
+
const traceExporter = new OTLPTraceExporter({ endpoint: 'http://localhost:4318' });
|
|
290
|
+
const metricsExporter = new OTLPMetricsExporter({ endpoint: 'http://localhost:4318' });
|
|
291
|
+
```
|
|
292
|
+
|
|
293
|
+
---
|
|
294
|
+
|
|
295
|
+
## Module map (subpaths)
|
|
296
|
+
|
|
297
|
+
| Import path | Contents |
|
|
298
|
+
| --------------------------- | ------------------------------------------------------------------------ |
|
|
299
|
+
| `@confused-ai/core` | Main API: `createAgent`, Agent, tools, session, guardrails, errors, etc. |
|
|
300
|
+
| `@confused-ai/core/production` | Circuit breaker, rate limiter, health, graceful shutdown, resumable stream |
|
|
301
|
+
| `@confused-ai/core/llm` | OpenAIProvider, OpenRouter, model resolver, LLMCache, withCache |
|
|
302
|
+
| `@confused-ai/core/artifacts` | InMemoryArtifactStorage, createPlanArtifact, createTextArtifact, MediaManager |
|
|
303
|
+
| `@confused-ai/core/voice` | OpenAIVoiceProvider, ElevenLabsVoiceProvider, createVoiceProvider |
|
|
304
|
+
| `@confused-ai/core/observability` | ConsoleLogger, InMemoryTracer, OTLPTraceExporter, OTLPMetricsExporter |
|
|
305
|
+
| `@confused-ai/core/session` | InMemorySessionStore, SQLiteSessionStore, SessionStore types |
|
|
306
|
+
| `@confused-ai/core/guardrails` | GuardrailValidator, createSensitiveDataRule, allowlist |
|
|
307
|
+
| `@confused-ai/core/tools` | HttpClientTool, BrowserTool, registry, base tools |
|
|
308
|
+
| `@confused-ai/core/memory` | Vector store, in-memory memory store |
|
|
309
|
+
| `@confused-ai/core/orchestration` | Orchestrator, pipeline, supervisor, swarm, MCP types |
|
|
310
|
+
| `@confused-ai/core/agentic` | Agentic runner and types (used by createAgent) |
|
|
311
|
+
| `@confused-ai/core/core` | Base agent, context builder, schemas |
|
|
312
|
+
| `@confused-ai/core/planner` | Classical planner, LLM planner |
|
|
313
|
+
| `@confused-ai/core/execution` | Execution engine, graph builder, worker pool |
|
|
314
|
+
|
|
315
|
+
---
|
|
316
|
+
|
|
317
|
+
## Environment variables
|
|
318
|
+
|
|
319
|
+
| Variable | Purpose |
|
|
320
|
+
| ------------------------ | -------------------------------- |
|
|
321
|
+
| `OPENAI_API_KEY` | Default OpenAI API key |
|
|
322
|
+
| `OPENAI_MODEL` | Default model (e.g. `gpt-4o`) |
|
|
323
|
+
| `OPENAI_BASE_URL` | Custom API base (e.g. Ollama) |
|
|
324
|
+
| `OPENROUTER_API_KEY` | OpenRouter API key |
|
|
325
|
+
| `OPENROUTER_MODEL` | OpenRouter model |
|
|
326
|
+
|
|
327
|
+
---
|
|
328
|
+
|
|
329
|
+
## License
|
|
330
|
+
|
|
331
|
+
MIT
|
package/package.json
CHANGED
package/FEATURES.md
DELETED
|
@@ -1,169 +0,0 @@
|
|
|
1
|
-
# Core Framework Features
|
|
2
|
-
|
|
3
|
-
| Category | Features |
|
|
4
|
-
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
|
5
|
-
| **Learning** | User profiles, persistent memory stores, RAG (Retrieval-Augmented Generation), always/agentic learning modes. |
|
|
6
|
-
| **Core** | Model-agnostic (OpenAI/OpenRouter/Ollama via strings), Type-safe I/O with Zod, async-first, long-running operations with retries, multimodal messages. |
|
|
7
|
-
| **Knowledge** | RAGEngine, hybrid search, reranking, session/state persistence, pluggable vector stores. |
|
|
8
|
-
| **Orchestration** | Human-in-the-loop hooks, guardrails (sensitive data/schema validation), MCP & A2A support, supervisors & sub-agents. |
|
|
9
|
-
| **Production** | Circuit breaker, rate limiter, health checks, graceful shutdown, OTLP export, LLM caching, resumable streaming. |
|
|
10
|
-
| **Artifacts** | Typed artifacts (text, data, reasoning, plan), versioned storage, media support (images, audio, video). |
|
|
11
|
-
| **Voice** | TTS/STT with OpenAI and ElevenLabs, voice ID selection, streaming audio. |
|
|
12
|
-
|
|
13
|
-
---
|
|
14
|
-
|
|
15
|
-
## Quick Start Examples
|
|
16
|
-
|
|
17
|
-
### 🔧 Circuit Breaker
|
|
18
|
-
```typescript
|
|
19
|
-
import { createLLMCircuitBreaker } from '@confused-ai/core/production';
|
|
20
|
-
|
|
21
|
-
const breaker = createLLMCircuitBreaker('openai');
|
|
22
|
-
|
|
23
|
-
// Wrap LLM calls - automatically opens circuit on repeated failures
|
|
24
|
-
const result = await breaker.execute(async () => {
|
|
25
|
-
return await llm.generateText({ messages });
|
|
26
|
-
});
|
|
27
|
-
|
|
28
|
-
if (result.success) {
|
|
29
|
-
console.log(result.value);
|
|
30
|
-
} else {
|
|
31
|
-
console.error('Circuit open:', result.error);
|
|
32
|
-
}
|
|
33
|
-
```
|
|
34
|
-
|
|
35
|
-
### ⏱️ Rate Limiter
|
|
36
|
-
```typescript
|
|
37
|
-
import { createOpenAIRateLimiter } from '@confused-ai/core/production';
|
|
38
|
-
|
|
39
|
-
// Create limiter matching OpenAI tier limits
|
|
40
|
-
const limiter = createOpenAIRateLimiter('tier1'); // 60 RPM
|
|
41
|
-
|
|
42
|
-
await limiter.execute(async () => {
|
|
43
|
-
return await openai.chat.completions.create({ ... });
|
|
44
|
-
});
|
|
45
|
-
```
|
|
46
|
-
|
|
47
|
-
### 🏥 Health Checks
|
|
48
|
-
```typescript
|
|
49
|
-
import { HealthCheckManager, createLLMHealthCheck } from '@confused-ai/core/production';
|
|
50
|
-
|
|
51
|
-
const health = new HealthCheckManager({ version: '1.0.0' });
|
|
52
|
-
health.addComponent(createLLMHealthCheck(llmProvider));
|
|
53
|
-
|
|
54
|
-
// Express endpoint
|
|
55
|
-
app.get('/health', async (req, res) => {
|
|
56
|
-
const result = await health.check();
|
|
57
|
-
res.status(result.status === 'HEALTHY' ? 200 : 503).json(result);
|
|
58
|
-
});
|
|
59
|
-
```
|
|
60
|
-
|
|
61
|
-
### 💾 LLM Response Cache
|
|
62
|
-
```typescript
|
|
63
|
-
import { LLMCache, withCache } from '@confused-ai/core/llm';
|
|
64
|
-
|
|
65
|
-
const cache = new LLMCache({ maxEntries: 1000, ttlMs: 60000 });
|
|
66
|
-
const cachedLLM = withCache(llm, cache);
|
|
67
|
-
|
|
68
|
-
// Identical requests return cached responses
|
|
69
|
-
const response = await cachedLLM.generateText({ messages });
|
|
70
|
-
```
|
|
71
|
-
|
|
72
|
-
### 📦 Artifacts
|
|
73
|
-
```typescript
|
|
74
|
-
import { InMemoryArtifactStorage, createPlanArtifact } from '@confused-ai/core/artifacts';
|
|
75
|
-
|
|
76
|
-
const storage = new InMemoryArtifactStorage();
|
|
77
|
-
|
|
78
|
-
// Create a plan artifact
|
|
79
|
-
const plan = await storage.save(createPlanArtifact(
|
|
80
|
-
'project-plan',
|
|
81
|
-
'Build a chatbot',
|
|
82
|
-
[
|
|
83
|
-
{ description: 'Design conversation flows' },
|
|
84
|
-
{ description: 'Implement intent detection' },
|
|
85
|
-
{ description: 'Add response generation' },
|
|
86
|
-
]
|
|
87
|
-
));
|
|
88
|
-
|
|
89
|
-
// Retrieve with versioning
|
|
90
|
-
const retrieved = await storage.get(plan.id);
|
|
91
|
-
```
|
|
92
|
-
|
|
93
|
-
### 🎤 Voice (TTS/STT)
|
|
94
|
-
```typescript
|
|
95
|
-
import { OpenAIVoiceProvider } from '@confused-ai/core/voice';
|
|
96
|
-
|
|
97
|
-
const voice = new OpenAIVoiceProvider();
|
|
98
|
-
|
|
99
|
-
// Text-to-Speech
|
|
100
|
-
const { audio } = await voice.textToSpeech('Hello, world!', {
|
|
101
|
-
voiceId: 'nova',
|
|
102
|
-
speed: 1.0
|
|
103
|
-
});
|
|
104
|
-
|
|
105
|
-
// Speech-to-Text
|
|
106
|
-
const { text } = await voice.speechToText(audioBuffer);
|
|
107
|
-
```
|
|
108
|
-
|
|
109
|
-
### 🔄 Resumable Streaming
|
|
110
|
-
```typescript
|
|
111
|
-
import { ResumableStreamManager, formatSSE } from '@confused-ai/core/production';
|
|
112
|
-
|
|
113
|
-
const manager = new ResumableStreamManager();
|
|
114
|
-
|
|
115
|
-
// Create stream
|
|
116
|
-
const streamId = manager.createStream();
|
|
117
|
-
|
|
118
|
-
// On each chunk from LLM
|
|
119
|
-
manager.saveChunk(streamId, { type: 'text', content: 'Hello' });
|
|
120
|
-
|
|
121
|
-
// Client reconnects after disconnect
|
|
122
|
-
const checkpoint = manager.getCheckpoint(streamId);
|
|
123
|
-
const missed = manager.getChunksSince(streamId, clientPosition);
|
|
124
|
-
|
|
125
|
-
for (const chunk of missed) {
|
|
126
|
-
res.write(formatSSE(chunk)); // SSE compatible
|
|
127
|
-
}
|
|
128
|
-
```
|
|
129
|
-
|
|
130
|
-
### 🛡️ Graceful Shutdown
|
|
131
|
-
```typescript
|
|
132
|
-
import { GracefulShutdown } from '@confused-ai/core/production';
|
|
133
|
-
|
|
134
|
-
const shutdown = new GracefulShutdown({ timeoutMs: 30000 });
|
|
135
|
-
|
|
136
|
-
shutdown.addHandler('database', async () => {
|
|
137
|
-
await db.close();
|
|
138
|
-
});
|
|
139
|
-
|
|
140
|
-
shutdown.addHandler('http-server', async () => {
|
|
141
|
-
await server.close();
|
|
142
|
-
});
|
|
143
|
-
|
|
144
|
-
shutdown.listen(); // Handles SIGTERM/SIGINT
|
|
145
|
-
```
|
|
146
|
-
|
|
147
|
-
---
|
|
148
|
-
|
|
149
|
-
## Module Imports
|
|
150
|
-
|
|
151
|
-
```typescript
|
|
152
|
-
// Production resilience
|
|
153
|
-
import {
|
|
154
|
-
CircuitBreaker, RateLimiter, HealthCheckManager,
|
|
155
|
-
GracefulShutdown, ResumableStreamManager
|
|
156
|
-
} from '@confused-ai/core/production';
|
|
157
|
-
|
|
158
|
-
// Observability
|
|
159
|
-
import { OTLPTraceExporter, OTLPMetricsExporter } from '@confused-ai/core/observability';
|
|
160
|
-
|
|
161
|
-
// LLM with caching
|
|
162
|
-
import { LLMCache, withCache } from '@confused-ai/core/llm';
|
|
163
|
-
|
|
164
|
-
// Artifacts
|
|
165
|
-
import { InMemoryArtifactStorage, MediaManager } from '@confused-ai/core/artifacts';
|
|
166
|
-
|
|
167
|
-
// Voice
|
|
168
|
-
import { OpenAIVoiceProvider, ElevenLabsVoiceProvider } from '@confused-ai/core/voice';
|
|
169
|
-
```
|