@yesvara/svara 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +497 -0
  3. package/dist/chunk-CIESM3BP.mjs +33 -0
  4. package/dist/chunk-FEA5KIJN.mjs +418 -0
  5. package/dist/cli/index.d.mts +1 -0
  6. package/dist/cli/index.d.ts +1 -0
  7. package/dist/cli/index.js +328 -0
  8. package/dist/cli/index.mjs +39 -0
  9. package/dist/dev-OYGXXK2B.mjs +69 -0
  10. package/dist/index.d.mts +967 -0
  11. package/dist/index.d.ts +967 -0
  12. package/dist/index.js +1976 -0
  13. package/dist/index.mjs +1502 -0
  14. package/dist/new-7K4NIDZO.mjs +177 -0
  15. package/dist/retriever-4QY667XF.mjs +7 -0
  16. package/examples/01-basic/index.ts +26 -0
  17. package/examples/02-with-tools/index.ts +73 -0
  18. package/examples/03-rag-knowledge/index.ts +41 -0
  19. package/examples/04-multi-channel/index.ts +91 -0
  20. package/package.json +74 -0
  21. package/src/app/index.ts +176 -0
  22. package/src/channels/telegram.ts +122 -0
  23. package/src/channels/web.ts +118 -0
  24. package/src/channels/whatsapp.ts +161 -0
  25. package/src/cli/commands/dev.ts +87 -0
  26. package/src/cli/commands/new.ts +213 -0
  27. package/src/cli/index.ts +78 -0
  28. package/src/core/agent.ts +607 -0
  29. package/src/core/llm.ts +406 -0
  30. package/src/core/types.ts +183 -0
  31. package/src/database/schema.ts +79 -0
  32. package/src/database/sqlite.ts +239 -0
  33. package/src/index.ts +94 -0
  34. package/src/memory/context.ts +49 -0
  35. package/src/memory/conversation.ts +51 -0
  36. package/src/rag/chunker.ts +165 -0
  37. package/src/rag/loader.ts +216 -0
  38. package/src/rag/retriever.ts +248 -0
  39. package/src/tools/executor.ts +54 -0
  40. package/src/tools/index.ts +89 -0
  41. package/src/tools/registry.ts +44 -0
  42. package/src/types.ts +131 -0
  43. package/tsconfig.json +26 -0
@@ -0,0 +1,406 @@
1
+ /**
2
+ * @internal
3
+ * LLM abstraction layer with automatic provider detection.
4
+ *
5
+ * Users never touch this directly — they just pass a model name:
6
+ * 'gpt-4o' → OpenAI (auto)
7
+ * 'claude-opus-4-6' → Anthropic (auto)
8
+ * 'llama3' → Ollama (auto, local)
9
+ * 'llama-3.1-70b-...' → Groq (auto, if GROQ_API_KEY set)
10
+ */
11
+
12
+ import type {
13
+ LLMConfig,
14
+ LLMMessage,
15
+ LLMResponse,
16
+ LLMToolCall,
17
+ InternalTool,
18
+ LLMProviderName,
19
+ } from './types.js';
20
+
21
+ // ─── Model → Provider Auto-Detection ─────────────────────────────────────────
22
+
23
+ const OPENAI_PREFIXES = ['gpt-', 'o1', 'o3', 'text-davinci', 'chatgpt'];
24
+ const ANTHROPIC_PREFIXES = ['claude-'];
25
+ const GROQ_MODELS = [
26
+ 'llama-3.1-405b', 'llama-3.1-70b', 'llama-3.1-8b',
27
+ 'mixtral-8x7b', 'gemma-7b', 'gemma2-9b',
28
+ ];
29
+
30
+ /**
31
+ * Auto-detect the LLM provider from a model name string.
32
+ * Priority: OpenAI → Anthropic → Groq (if key present) → Ollama
33
+ */
34
+ export function detectProvider(model: string): LLMProviderName {
35
+ const m = model.toLowerCase();
36
+
37
+ if (OPENAI_PREFIXES.some((p) => m.startsWith(p))) return 'openai';
38
+ if (ANTHROPIC_PREFIXES.some((p) => m.startsWith(p))) return 'anthropic';
39
+
40
+ // Groq uses Llama/Mixtral model names — differentiate by env key
41
+ if (GROQ_MODELS.some((gm) => m.includes(gm)) && process.env.GROQ_API_KEY) {
42
+ return 'groq';
43
+ }
44
+
45
+ // Anything else → Ollama (local self-hosted)
46
+ return 'ollama';
47
+ }
48
+
49
+ /**
50
+ * Build an LLMConfig from just a model name string.
51
+ * This is what enables the magic: `model: 'gpt-4o'` just works.
52
+ */
53
+ export function resolveConfig(model: string, overrides: Partial<LLMConfig> = {}): LLMConfig {
54
+ const provider = overrides.provider ?? detectProvider(model);
55
+ return {
56
+ provider,
57
+ model,
58
+ temperature: 0.7,
59
+ timeout: 60_000,
60
+ ...overrides,
61
+ };
62
+ }
63
+
64
+ // ─── Provider Interface ───────────────────────────────────────────────────────
65
+
66
+ export interface LLMAdapter {
67
+ chat(messages: LLMMessage[], tools?: InternalTool[], temperature?: number): Promise<LLMResponse>;
68
+ countTokens(text: string): number;
69
+ }
70
+
71
+ // ─── OpenAI Adapter ──────────────────────────────────────────────────────────
72
+
73
+ class OpenAIAdapter implements LLMAdapter {
74
+ private client: unknown;
75
+
76
+ constructor(private config: LLMConfig) {
77
+ this.client = this.init();
78
+ }
79
+
80
+ private init(): unknown {
81
+ try {
82
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
83
+ const { default: OpenAI } = require('openai');
84
+ return new OpenAI({
85
+ apiKey: this.config.apiKey ?? process.env.OPENAI_API_KEY,
86
+ baseURL: this.config.baseURL,
87
+ timeout: this.config.timeout,
88
+ });
89
+ } catch {
90
+ throw new SvaraLLMError(
91
+ 'openai',
92
+ 'Package not found. Run: npm install openai'
93
+ );
94
+ }
95
+ }
96
+
97
+ async chat(messages: LLMMessage[], tools?: InternalTool[], temperature?: number): Promise<LLMResponse> {
98
+ const client = this.client as {
99
+ chat: { completions: { create: (p: unknown) => Promise<unknown> } };
100
+ };
101
+
102
+ const response = await client.chat.completions.create({
103
+ model: this.config.model,
104
+ messages: messages.map(toOpenAIMessage),
105
+ tools: tools?.length ? tools.map(toOpenAITool) : undefined,
106
+ tool_choice: tools?.length ? 'auto' : undefined,
107
+ temperature: temperature ?? this.config.temperature ?? 0.7,
108
+ max_tokens: this.config.maxTokens,
109
+ }) as OpenAIResponse;
110
+
111
+ const choice = response.choices[0];
112
+ const toolCalls = (choice.message.tool_calls ?? []).map((tc) => ({
113
+ id: tc.id,
114
+ name: tc.function.name,
115
+ arguments: safeParseJSON(tc.function.arguments),
116
+ }));
117
+
118
+ return {
119
+ content: choice.message.content ?? '',
120
+ toolCalls: toolCalls.length ? toolCalls : undefined,
121
+ usage: {
122
+ promptTokens: response.usage.prompt_tokens,
123
+ completionTokens: response.usage.completion_tokens,
124
+ totalTokens: response.usage.total_tokens,
125
+ },
126
+ model: response.model,
127
+ finishReason: choice.finish_reason === 'tool_calls' ? 'tool_calls' : 'stop',
128
+ };
129
+ }
130
+
131
+ countTokens(text: string): number {
132
+ return Math.ceil(text.length / 4);
133
+ }
134
+ }
135
+
136
+ // ─── Anthropic Adapter ───────────────────────────────────────────────────────
137
+
138
+ class AnthropicAdapter implements LLMAdapter {
139
+ private client: unknown;
140
+
141
+ constructor(private config: LLMConfig) {
142
+ this.client = this.init();
143
+ }
144
+
145
+ private init(): unknown {
146
+ try {
147
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
148
+ const { default: Anthropic } = require('@anthropic-ai/sdk');
149
+ return new Anthropic({
150
+ apiKey: this.config.apiKey ?? process.env.ANTHROPIC_API_KEY,
151
+ baseURL: this.config.baseURL,
152
+ timeout: this.config.timeout,
153
+ });
154
+ } catch {
155
+ throw new SvaraLLMError(
156
+ 'anthropic',
157
+ 'Package not found. Run: npm install @anthropic-ai/sdk'
158
+ );
159
+ }
160
+ }
161
+
162
+ async chat(messages: LLMMessage[], tools?: InternalTool[], temperature?: number): Promise<LLMResponse> {
163
+ const client = this.client as {
164
+ messages: { create: (p: unknown) => Promise<unknown> };
165
+ };
166
+
167
+ const systemMsg = messages.find((m) => m.role === 'system')?.content;
168
+ const chatMsgs = messages.filter((m) => m.role !== 'system').map(toAnthropicMessage);
169
+
170
+ const response = await client.messages.create({
171
+ model: this.config.model,
172
+ system: systemMsg,
173
+ messages: chatMsgs,
174
+ tools: tools?.length ? tools.map(toAnthropicTool) : undefined,
175
+ max_tokens: this.config.maxTokens ?? 4096,
176
+ temperature: temperature ?? this.config.temperature ?? 0.7,
177
+ }) as AnthropicResponse;
178
+
179
+ const textParts = response.content.filter((c) => c.type === 'text');
180
+ const toolParts = response.content.filter((c) => c.type === 'tool_use');
181
+
182
+ const toolCalls: LLMToolCall[] = toolParts.map((c) => ({
183
+ id: (c as { id: string; name: string; input: Record<string, unknown> }).id,
184
+ name: (c as { id: string; name: string; input: Record<string, unknown> }).name,
185
+ arguments: (c as { id: string; name: string; input: Record<string, unknown> }).input,
186
+ }));
187
+
188
+ return {
189
+ content: textParts.map((c) => (c as { text: string }).text).join(''),
190
+ toolCalls: toolCalls.length ? toolCalls : undefined,
191
+ usage: {
192
+ promptTokens: response.usage.input_tokens,
193
+ completionTokens: response.usage.output_tokens,
194
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
195
+ },
196
+ model: response.model,
197
+ finishReason: response.stop_reason === 'tool_use' ? 'tool_calls' : 'stop',
198
+ };
199
+ }
200
+
201
+ countTokens(text: string): number {
202
+ return Math.ceil(text.length / 4);
203
+ }
204
+ }
205
+
206
+ // ─── Ollama Adapter (local) ──────────────────────────────────────────────────
207
+
208
+ class OllamaAdapter implements LLMAdapter {
209
+ private baseURL: string;
210
+
211
+ constructor(private config: LLMConfig) {
212
+ this.baseURL = config.baseURL ?? 'http://localhost:11434';
213
+ }
214
+
215
+ async chat(messages: LLMMessage[], _tools?: InternalTool[], temperature?: number): Promise<LLMResponse> {
216
+ const response = await fetch(`${this.baseURL}/api/chat`, {
217
+ method: 'POST',
218
+ headers: { 'Content-Type': 'application/json' },
219
+ body: JSON.stringify({
220
+ model: this.config.model,
221
+ messages: messages.map((m) => ({ role: m.role, content: m.content })),
222
+ options: { temperature: temperature ?? this.config.temperature ?? 0.7 },
223
+ stream: false,
224
+ }),
225
+ signal: AbortSignal.timeout(this.config.timeout ?? 60_000),
226
+ });
227
+
228
+ if (!response.ok) {
229
+ throw new SvaraLLMError('ollama', `Request failed: ${response.statusText}. Is Ollama running?`);
230
+ }
231
+
232
+ const data = await response.json() as OllamaResponse;
233
+
234
+ return {
235
+ content: data.message.content,
236
+ usage: {
237
+ promptTokens: data.prompt_eval_count ?? 0,
238
+ completionTokens: data.eval_count ?? 0,
239
+ totalTokens: (data.prompt_eval_count ?? 0) + (data.eval_count ?? 0),
240
+ },
241
+ model: data.model ?? this.config.model,
242
+ finishReason: 'stop',
243
+ };
244
+ }
245
+
246
+ countTokens(text: string): number {
247
+ return Math.ceil(text.length / 4);
248
+ }
249
+ }
250
+
251
+ // ─── Groq Adapter (OpenAI-compatible) ───────────────────────────────────────
252
+
253
+ class GroqAdapter extends OpenAIAdapter {
254
+ constructor(config: LLMConfig) {
255
+ super({
256
+ ...config,
257
+ baseURL: config.baseURL ?? 'https://api.groq.com/openai/v1',
258
+ apiKey: config.apiKey ?? process.env.GROQ_API_KEY,
259
+ });
260
+ }
261
+ }
262
+
263
+ // ─── Factory ─────────────────────────────────────────────────────────────────
264
+
265
+ /**
266
+ * Create an LLM adapter from a resolved config.
267
+ * @internal — use resolveConfig() to build the config from a model name.
268
+ */
269
+ export function createAdapter(config: LLMConfig): LLMAdapter {
270
+ switch (config.provider) {
271
+ case 'openai': return new OpenAIAdapter(config);
272
+ case 'anthropic': return new AnthropicAdapter(config);
273
+ case 'ollama': return new OllamaAdapter(config);
274
+ case 'groq': return new GroqAdapter(config);
275
+ default:
276
+ throw new Error(
277
+ `[@yesvara/svara] Unknown LLM provider: "${config.provider as string}".\n` +
278
+ 'Auto-supported: openai, anthropic, ollama, groq'
279
+ );
280
+ }
281
+ }
282
+
283
+ // ─── Format Converters ────────────────────────────────────────────────────────
284
+
285
+ function toOpenAIMessage(msg: LLMMessage): Record<string, unknown> {
286
+ if (msg.role === 'tool') {
287
+ return { role: 'tool', tool_call_id: msg.toolCallId, content: msg.content };
288
+ }
289
+ if (msg.toolCalls?.length) {
290
+ return {
291
+ role: 'assistant',
292
+ content: msg.content || null,
293
+ tool_calls: msg.toolCalls.map((tc) => ({
294
+ id: tc.id,
295
+ type: 'function',
296
+ function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
297
+ })),
298
+ };
299
+ }
300
+ return { role: msg.role, content: msg.content };
301
+ }
302
+
303
+ function toOpenAITool(tool: InternalTool): Record<string, unknown> {
304
+ return {
305
+ type: 'function',
306
+ function: {
307
+ name: tool.name,
308
+ description: tool.description,
309
+ parameters: {
310
+ type: 'object',
311
+ properties: Object.fromEntries(
312
+ Object.entries(tool.parameters).map(([k, p]) => [
313
+ k,
314
+ { type: p.type, description: p.description, enum: p.enum },
315
+ ])
316
+ ),
317
+ required: Object.entries(tool.parameters)
318
+ .filter(([, p]) => p.required)
319
+ .map(([k]) => k),
320
+ },
321
+ },
322
+ };
323
+ }
324
+
325
+ function toAnthropicMessage(msg: LLMMessage): Record<string, unknown> {
326
+ if (msg.role === 'tool') {
327
+ return {
328
+ role: 'user',
329
+ content: [{ type: 'tool_result', tool_use_id: msg.toolCallId, content: msg.content }],
330
+ };
331
+ }
332
+ if (msg.toolCalls?.length) {
333
+ return {
334
+ role: 'assistant',
335
+ content: [
336
+ ...(msg.content ? [{ type: 'text', text: msg.content }] : []),
337
+ ...msg.toolCalls.map((tc) => ({
338
+ type: 'tool_use', id: tc.id, name: tc.name, input: tc.arguments,
339
+ })),
340
+ ],
341
+ };
342
+ }
343
+ return { role: msg.role, content: msg.content };
344
+ }
345
+
346
+ function toAnthropicTool(tool: InternalTool): Record<string, unknown> {
347
+ return {
348
+ name: tool.name,
349
+ description: tool.description,
350
+ input_schema: {
351
+ type: 'object',
352
+ properties: Object.fromEntries(
353
+ Object.entries(tool.parameters).map(([k, p]) => [
354
+ k,
355
+ { type: p.type, description: p.description, enum: p.enum },
356
+ ])
357
+ ),
358
+ required: Object.entries(tool.parameters)
359
+ .filter(([, p]) => p.required)
360
+ .map(([k]) => k),
361
+ },
362
+ };
363
+ }
364
+
365
+ // ─── Error Class ──────────────────────────────────────────────────────────────
366
+
367
+ export class SvaraLLMError extends Error {
368
+ constructor(public provider: string, message: string) {
369
+ super(`[@yesvara/svara] LLM error (${provider}): ${message}`);
370
+ this.name = 'SvaraLLMError';
371
+ }
372
+ }
373
+
374
+ // ─── Helpers ──────────────────────────────────────────────────────────────────
375
+
376
+ function safeParseJSON(str: string): Record<string, unknown> {
377
+ try { return JSON.parse(str); } catch { return {}; }
378
+ }
379
+
380
+ // ─── Private Response Types ───────────────────────────────────────────────────
381
+
382
+ interface OpenAIResponse {
383
+ choices: Array<{
384
+ message: {
385
+ content: string | null;
386
+ tool_calls?: Array<{ id: string; function: { name: string; arguments: string } }>;
387
+ };
388
+ finish_reason: string;
389
+ }>;
390
+ usage: { prompt_tokens: number; completion_tokens: number; total_tokens: number };
391
+ model: string;
392
+ }
393
+
394
+ interface AnthropicResponse {
395
+ content: Array<{ type: string; text?: string; id?: string; name?: string; input?: unknown }>;
396
+ stop_reason: string;
397
+ usage: { input_tokens: number; output_tokens: number };
398
+ model: string;
399
+ }
400
+
401
+ interface OllamaResponse {
402
+ message: { content: string };
403
+ prompt_eval_count?: number;
404
+ eval_count?: number;
405
+ model?: string;
406
+ }
@@ -0,0 +1,183 @@
1
+ /**
2
+ * @internal
3
+ * Internal types for the SvaraJS framework engine.
4
+ * These are NOT exported to users — see src/types.ts for the public API.
5
+ */
6
+
7
+ // ─── LLM Internals ───────────────────────────────────────────────────────────
8
+
9
+ export type LLMProviderName = 'openai' | 'anthropic' | 'ollama' | 'groq';
10
+
11
+ export interface LLMConfig {
12
+ provider: LLMProviderName;
13
+ model: string;
14
+ apiKey?: string;
15
+ baseURL?: string;
16
+ temperature?: number;
17
+ maxTokens?: number;
18
+ timeout?: number;
19
+ }
20
+
21
+ export interface LLMMessage {
22
+ role: 'system' | 'user' | 'assistant' | 'tool';
23
+ content: string;
24
+ toolCallId?: string;
25
+ toolCalls?: LLMToolCall[];
26
+ name?: string;
27
+ }
28
+
29
+ export interface LLMResponse {
30
+ content: string;
31
+ toolCalls?: LLMToolCall[];
32
+ usage: TokenUsage;
33
+ model: string;
34
+ finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
35
+ }
36
+
37
+ export interface LLMToolCall {
38
+ id: string;
39
+ name: string;
40
+ arguments: Record<string, unknown>;
41
+ }
42
+
43
+ export interface TokenUsage {
44
+ promptTokens: number;
45
+ completionTokens: number;
46
+ totalTokens: number;
47
+ }
48
+
49
+ // ─── Tool Internals ───────────────────────────────────────────────────────────
50
+
51
+ export interface InternalTool {
52
+ name: string;
53
+ description: string;
54
+ parameters: Record<string, InternalToolParam>;
55
+ run: (args: Record<string, unknown>, ctx: InternalAgentContext) => Promise<unknown>;
56
+ category?: string;
57
+ timeout?: number;
58
+ }
59
+
60
+ export interface InternalToolParam {
61
+ type: 'string' | 'number' | 'boolean' | 'object' | 'array';
62
+ description: string;
63
+ required?: boolean;
64
+ enum?: string[];
65
+ default?: unknown;
66
+ }
67
+
68
+ export interface ToolExecution {
69
+ toolCallId: string;
70
+ name: string;
71
+ result: unknown;
72
+ error?: string;
73
+ duration: number;
74
+ }
75
+
76
+ // ─── Agent Internals ──────────────────────────────────────────────────────────
77
+
78
+ export interface InternalAgentContext {
79
+ sessionId: string;
80
+ userId: string;
81
+ agentName: string;
82
+ history: LLMMessage[];
83
+ metadata: Record<string, unknown>;
84
+ }
85
+
86
+ export interface AgentRunOptions {
87
+ sessionId?: string;
88
+ userId?: string;
89
+ metadata?: Record<string, unknown>;
90
+ }
91
+
92
+ export interface AgentRunResult {
93
+ response: string;
94
+ sessionId: string;
95
+ toolsUsed: string[];
96
+ iterations: number;
97
+ usage: TokenUsage;
98
+ duration: number;
99
+ }
100
+
101
+ // ─── Memory Internals ────────────────────────────────────────────────────────
102
+
103
+ export interface SessionStore {
104
+ messages: LLMMessage[];
105
+ createdAt: Date;
106
+ updatedAt: Date;
107
+ }
108
+
109
+ // ─── RAG Internals ───────────────────────────────────────────────────────────
110
+
111
+ export type DocumentType = 'text' | 'markdown' | 'pdf' | 'html' | 'json' | 'docx';
112
+
113
+ export interface Document {
114
+ id: string;
115
+ content: string;
116
+ type: DocumentType;
117
+ source: string;
118
+ metadata: {
119
+ filename: string;
120
+ extension: string;
121
+ size: number;
122
+ lastModified: string;
123
+ };
124
+ }
125
+
126
+ export interface DocumentChunk {
127
+ id: string;
128
+ documentId: string;
129
+ content: string;
130
+ index: number;
131
+ metadata: {
132
+ filename: string;
133
+ extension: string;
134
+ size: number;
135
+ lastModified: string;
136
+ chunkIndex: number;
137
+ strategy: 'fixed' | 'sentence' | 'paragraph';
138
+ charCount: number;
139
+ };
140
+ }
141
+
142
+ export interface RAGConfig {
143
+ embeddings?: {
144
+ provider: 'openai' | 'ollama';
145
+ apiKey?: string;
146
+ model?: string;
147
+ };
148
+ chunking?: {
149
+ strategy?: 'fixed' | 'sentence' | 'paragraph';
150
+ size?: number;
151
+ overlap?: number;
152
+ };
153
+ retrieval?: {
154
+ threshold?: number;
155
+ };
156
+ }
157
+
158
+ export interface RetrievedContext {
159
+ chunks: DocumentChunk[];
160
+ query: string;
161
+ totalFound: number;
162
+ }
163
+
164
+ export interface RAGRetriever {
165
+ init(config: RAGConfig): Promise<void>;
166
+ addDocuments(filePaths: string[]): Promise<void>;
167
+ retrieve(query: string, topK?: number): Promise<string>;
168
+ retrieveChunks(query: string, topK?: number): Promise<RetrievedContext>;
169
+ }
170
+
171
+ // ─── Channel Internals ───────────────────────────────────────────────────────
172
+
173
+ export type ChannelName = 'web' | 'whatsapp' | 'telegram' | 'discord' | 'slack';
174
+
175
+ export interface IncomingMessage {
176
+ id: string;
177
+ sessionId: string;
178
+ userId: string;
179
+ channel: ChannelName;
180
+ text: string;
181
+ timestamp: Date;
182
+ raw?: unknown;
183
+ }
@@ -0,0 +1,79 @@
1
+ /**
2
+ * @module database/schema
3
+ * SvaraJS — SQLite database schema
4
+ *
5
+ * DDL for all internal SvaraJS tables.
6
+ * Users can extend this with their own tables via db.exec().
7
+ */
8
+
9
+ export const SCHEMA_VERSION = 1;
10
+
11
+ export const CREATE_TABLES_SQL = `
12
+ -- Schema version tracking
13
+ CREATE TABLE IF NOT EXISTS svara_meta (
14
+ key TEXT PRIMARY KEY,
15
+ value TEXT NOT NULL
16
+ );
17
+
18
+ -- Conversation history persistence
19
+ CREATE TABLE IF NOT EXISTS svara_messages (
20
+ id TEXT PRIMARY KEY,
21
+ session_id TEXT NOT NULL,
22
+ role TEXT NOT NULL CHECK(role IN ('user', 'assistant', 'system', 'tool')),
23
+ content TEXT NOT NULL,
24
+ tool_call_id TEXT,
25
+ created_at INTEGER NOT NULL DEFAULT (unixepoch())
26
+ );
27
+
28
+ CREATE INDEX IF NOT EXISTS idx_messages_session
29
+ ON svara_messages (session_id, created_at);
30
+
31
+ -- Session metadata
32
+ CREATE TABLE IF NOT EXISTS svara_sessions (
33
+ id TEXT PRIMARY KEY,
34
+ user_id TEXT,
35
+ channel TEXT NOT NULL,
36
+ created_at INTEGER NOT NULL DEFAULT (unixepoch()),
37
+ updated_at INTEGER NOT NULL DEFAULT (unixepoch()),
38
+ metadata TEXT DEFAULT '{}'
39
+ );
40
+
41
+ -- Vector store chunks for RAG
42
+ CREATE TABLE IF NOT EXISTS svara_chunks (
43
+ id TEXT PRIMARY KEY,
44
+ document_id TEXT NOT NULL,
45
+ content TEXT NOT NULL,
46
+ chunk_index INTEGER NOT NULL,
47
+ embedding BLOB, -- stored as binary float32 array
48
+ source TEXT NOT NULL,
49
+ metadata TEXT DEFAULT '{}',
50
+ created_at INTEGER NOT NULL DEFAULT (unixepoch())
51
+ );
52
+
53
+ CREATE INDEX IF NOT EXISTS idx_chunks_document
54
+ ON svara_chunks (document_id);
55
+
56
+ -- Document registry
57
+ CREATE TABLE IF NOT EXISTS svara_documents (
58
+ id TEXT PRIMARY KEY,
59
+ source TEXT NOT NULL UNIQUE,
60
+ type TEXT NOT NULL,
61
+ size INTEGER,
62
+ hash TEXT,
63
+ indexed_at INTEGER NOT NULL DEFAULT (unixepoch()),
64
+ metadata TEXT DEFAULT '{}'
65
+ );
66
+
67
+ -- Key-value store for arbitrary agent state
68
+ CREATE TABLE IF NOT EXISTS svara_kv (
69
+ key TEXT PRIMARY KEY,
70
+ value TEXT NOT NULL,
71
+ expires_at INTEGER, -- unix timestamp, NULL = no expiry
72
+ updated_at INTEGER NOT NULL DEFAULT (unixepoch())
73
+ );
74
+ `;
75
+
76
+ export const INSERT_META_SQL = `
77
+ INSERT OR REPLACE INTO svara_meta (key, value)
78
+ VALUES ('schema_version', ?), ('created_at', ?);
79
+ `;