confused-ai-core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/FEATURES.md +169 -0
  2. package/package.json +119 -0
  3. package/src/agent.ts +187 -0
  4. package/src/agentic/index.ts +87 -0
  5. package/src/agentic/runner.ts +386 -0
  6. package/src/agentic/types.ts +91 -0
  7. package/src/artifacts/artifact.ts +417 -0
  8. package/src/artifacts/index.ts +42 -0
  9. package/src/artifacts/media.ts +304 -0
  10. package/src/cli/index.ts +122 -0
  11. package/src/core/base-agent.ts +151 -0
  12. package/src/core/context-builder.ts +106 -0
  13. package/src/core/index.ts +8 -0
  14. package/src/core/schemas.ts +17 -0
  15. package/src/core/types.ts +158 -0
  16. package/src/create-agent.ts +309 -0
  17. package/src/debug-logger.ts +188 -0
  18. package/src/dx/agent.ts +88 -0
  19. package/src/dx/define-agent.ts +183 -0
  20. package/src/dx/dev-logger.ts +57 -0
  21. package/src/dx/index.ts +11 -0
  22. package/src/errors.ts +175 -0
  23. package/src/execution/engine.ts +522 -0
  24. package/src/execution/graph-builder.ts +362 -0
  25. package/src/execution/index.ts +8 -0
  26. package/src/execution/types.ts +257 -0
  27. package/src/execution/worker-pool.ts +308 -0
  28. package/src/extensions/index.ts +123 -0
  29. package/src/guardrails/allowlist.ts +155 -0
  30. package/src/guardrails/index.ts +17 -0
  31. package/src/guardrails/types.ts +159 -0
  32. package/src/guardrails/validator.ts +265 -0
  33. package/src/index.ts +74 -0
  34. package/src/knowledge/index.ts +5 -0
  35. package/src/knowledge/types.ts +52 -0
  36. package/src/learning/in-memory-store.ts +72 -0
  37. package/src/learning/index.ts +6 -0
  38. package/src/learning/types.ts +42 -0
  39. package/src/llm/cache.ts +300 -0
  40. package/src/llm/index.ts +22 -0
  41. package/src/llm/model-resolver.ts +81 -0
  42. package/src/llm/openai-provider.ts +313 -0
  43. package/src/llm/openrouter-provider.ts +29 -0
  44. package/src/llm/types.ts +131 -0
  45. package/src/memory/in-memory-store.ts +255 -0
  46. package/src/memory/index.ts +7 -0
  47. package/src/memory/types.ts +193 -0
  48. package/src/memory/vector-store.ts +251 -0
  49. package/src/observability/console-logger.ts +123 -0
  50. package/src/observability/index.ts +12 -0
  51. package/src/observability/metrics.ts +85 -0
  52. package/src/observability/otlp-exporter.ts +417 -0
  53. package/src/observability/tracer.ts +105 -0
  54. package/src/observability/types.ts +341 -0
  55. package/src/orchestration/agent-adapter.ts +33 -0
  56. package/src/orchestration/index.ts +34 -0
  57. package/src/orchestration/load-balancer.ts +151 -0
  58. package/src/orchestration/mcp-types.ts +59 -0
  59. package/src/orchestration/message-bus.ts +192 -0
  60. package/src/orchestration/orchestrator.ts +349 -0
  61. package/src/orchestration/pipeline.ts +66 -0
  62. package/src/orchestration/supervisor.ts +107 -0
  63. package/src/orchestration/swarm.ts +1099 -0
  64. package/src/orchestration/toolkit.ts +47 -0
  65. package/src/orchestration/types.ts +339 -0
  66. package/src/planner/classical-planner.ts +383 -0
  67. package/src/planner/index.ts +8 -0
  68. package/src/planner/llm-planner.ts +353 -0
  69. package/src/planner/types.ts +227 -0
  70. package/src/planner/validator.ts +297 -0
  71. package/src/production/circuit-breaker.ts +290 -0
  72. package/src/production/graceful-shutdown.ts +251 -0
  73. package/src/production/health.ts +333 -0
  74. package/src/production/index.ts +57 -0
  75. package/src/production/latency-eval.ts +62 -0
  76. package/src/production/rate-limiter.ts +287 -0
  77. package/src/production/resumable-stream.ts +289 -0
  78. package/src/production/types.ts +81 -0
  79. package/src/sdk/index.ts +374 -0
  80. package/src/session/db-driver.ts +50 -0
  81. package/src/session/in-memory-store.ts +235 -0
  82. package/src/session/index.ts +12 -0
  83. package/src/session/sql-store.ts +315 -0
  84. package/src/session/sqlite-store.ts +61 -0
  85. package/src/session/types.ts +153 -0
  86. package/src/tools/base-tool.ts +223 -0
  87. package/src/tools/browser-tool.ts +123 -0
  88. package/src/tools/calculator-tool.ts +265 -0
  89. package/src/tools/file-tools.ts +394 -0
  90. package/src/tools/github-tool.ts +432 -0
  91. package/src/tools/hackernews-tool.ts +187 -0
  92. package/src/tools/http-tool.ts +118 -0
  93. package/src/tools/index.ts +99 -0
  94. package/src/tools/jira-tool.ts +373 -0
  95. package/src/tools/notion-tool.ts +322 -0
  96. package/src/tools/openai-tool.ts +236 -0
  97. package/src/tools/registry.ts +131 -0
  98. package/src/tools/serpapi-tool.ts +234 -0
  99. package/src/tools/shell-tool.ts +118 -0
  100. package/src/tools/slack-tool.ts +327 -0
  101. package/src/tools/telegram-tool.ts +127 -0
  102. package/src/tools/types.ts +229 -0
  103. package/src/tools/websearch-tool.ts +335 -0
  104. package/src/tools/wikipedia-tool.ts +177 -0
  105. package/src/tools/yfinance-tool.ts +33 -0
  106. package/src/voice/index.ts +17 -0
  107. package/src/voice/voice-provider.ts +228 -0
  108. package/tests/artifact.test.ts +241 -0
  109. package/tests/circuit-breaker.test.ts +171 -0
  110. package/tests/health.test.ts +192 -0
  111. package/tests/llm-cache.test.ts +186 -0
  112. package/tests/rate-limiter.test.ts +161 -0
  113. package/tsconfig.json +29 -0
  114. package/vitest.config.ts +47 -0
@@ -0,0 +1,300 @@
1
+ /**
2
+ * LLM Response Cache - Content-Addressable Caching for LLM Responses
3
+ *
4
+ * Agno-style caching layer to reduce costs and improve latency:
5
+ * - Content-addressable cache (hash of messages + model + params)
6
+ * - Configurable TTL and max entries
7
+ * - LRU eviction when full
8
+ * - In-memory with pluggable adapter interface
9
+ */
10
+
11
+ import type { Message, GenerateOptions, GenerateResult } from './types.js';
12
+ import type { MetricsCollector } from '../observability/types.js';
13
+
14
+ /** Cache entry */
15
+ interface CacheEntry {
16
+ readonly key: string;
17
+ readonly result: GenerateResult;
18
+ readonly createdAt: number;
19
+ readonly expiresAt: number;
20
+ accessedAt: number;
21
+ accessCount: number;
22
+ }
23
+
24
+ /** Cache configuration */
25
+ export interface LLMCacheConfig {
26
+ /** Maximum number of cached entries (default: 1000) */
27
+ readonly maxEntries?: number;
28
+ /** TTL in milliseconds (default: 3600000 = 1 hour) */
29
+ readonly ttlMs?: number;
30
+ /** Enable cache (default: true) */
31
+ readonly enabled?: boolean;
32
+ /** Optional metrics collector */
33
+ readonly metrics?: MetricsCollector;
34
+ /** Hash function override (default: JSON hash) */
35
+ readonly hashFn?: (key: CacheKeyInput) => string;
36
+ }
37
+
38
+ /** Input for cache key generation */
39
+ export interface CacheKeyInput {
40
+ readonly messages: Message[];
41
+ readonly model?: string;
42
+ readonly temperature?: number;
43
+ readonly maxTokens?: number;
44
+ readonly tools?: unknown[];
45
+ }
46
+
47
+ /** Cache statistics */
48
+ export interface CacheStats {
49
+ readonly hits: number;
50
+ readonly misses: number;
51
+ readonly entries: number;
52
+ readonly hitRate: number;
53
+ readonly evictions: number;
54
+ }
55
+
56
+ /**
57
+ * LLM Response Cache with LRU eviction.
58
+ *
59
+ * @example
60
+ * const cache = new LLMCache({
61
+ * maxEntries: 500,
62
+ * ttlMs: 60 * 60 * 1000, // 1 hour
63
+ * });
64
+ *
65
+ * // Check cache before calling LLM
66
+ * const cached = cache.get({
67
+ * messages: [{ role: 'user', content: 'Hello' }],
68
+ * model: 'gpt-4o',
69
+ * });
70
+ *
71
+ * if (cached) {
72
+ * return cached;
73
+ * }
74
+ *
75
+ * const result = await llm.generateText(messages, options);
76
+ * cache.set({ messages, model: 'gpt-4o' }, result);
77
+ */
78
+ export class LLMCache {
79
+ private readonly cache = new Map<string, CacheEntry>();
80
+ private readonly config: Required<Omit<LLMCacheConfig, 'metrics' | 'hashFn'>> &
81
+ Pick<LLMCacheConfig, 'metrics' | 'hashFn'>;
82
+
83
+ private hits = 0;
84
+ private misses = 0;
85
+ private evictions = 0;
86
+
87
+ constructor(config: LLMCacheConfig = {}) {
88
+ this.config = {
89
+ maxEntries: config.maxEntries ?? 1000,
90
+ ttlMs: config.ttlMs ?? 3600_000, // 1 hour
91
+ enabled: config.enabled ?? true,
92
+ metrics: config.metrics,
93
+ hashFn: config.hashFn,
94
+ };
95
+ }
96
+
97
+ /** Check if caching is enabled */
98
+ isEnabled(): boolean {
99
+ return this.config.enabled;
100
+ }
101
+
102
+ /** Get cached result */
103
+ get(input: CacheKeyInput): GenerateResult | null {
104
+ if (!this.config.enabled) return null;
105
+
106
+ const key = this.getKey(input);
107
+ const entry = this.cache.get(key);
108
+
109
+ if (!entry) {
110
+ this.misses++;
111
+ this.recordMetric('cache_miss', 1);
112
+ return null;
113
+ }
114
+
115
+ // Check expiration
116
+ if (Date.now() > entry.expiresAt) {
117
+ this.cache.delete(key);
118
+ this.misses++;
119
+ this.recordMetric('cache_expired', 1);
120
+ return null;
121
+ }
122
+
123
+ // Update access info (LRU tracking)
124
+ entry.accessedAt = Date.now();
125
+ entry.accessCount++;
126
+
127
+ this.hits++;
128
+ this.recordMetric('cache_hit', 1);
129
+ return entry.result;
130
+ }
131
+
132
+ /** Set cached result */
133
+ set(input: CacheKeyInput, result: GenerateResult): void {
134
+ if (!this.config.enabled) return;
135
+
136
+ const key = this.getKey(input);
137
+ const now = Date.now();
138
+
139
+ // Evict if at capacity
140
+ if (this.cache.size >= this.config.maxEntries) {
141
+ this.evictLRU();
142
+ }
143
+
144
+ this.cache.set(key, {
145
+ key,
146
+ result,
147
+ createdAt: now,
148
+ expiresAt: now + this.config.ttlMs,
149
+ accessedAt: now,
150
+ accessCount: 1,
151
+ });
152
+
153
+ this.recordMetric('cache_set', 1);
154
+ }
155
+
156
+ /** Delete a cached entry */
157
+ delete(input: CacheKeyInput): boolean {
158
+ const key = this.getKey(input);
159
+ return this.cache.delete(key);
160
+ }
161
+
162
+ /** Check if entry exists (without affecting LRU) */
163
+ has(input: CacheKeyInput): boolean {
164
+ const key = this.getKey(input);
165
+ const entry = this.cache.get(key);
166
+ return entry !== undefined && Date.now() <= entry.expiresAt;
167
+ }
168
+
169
+ /** Clear all cached entries */
170
+ clear(): void {
171
+ this.cache.clear();
172
+ this.hits = 0;
173
+ this.misses = 0;
174
+ this.evictions = 0;
175
+ }
176
+
177
+ /** Get cache statistics */
178
+ getStats(): CacheStats {
179
+ const total = this.hits + this.misses;
180
+ return {
181
+ hits: this.hits,
182
+ misses: this.misses,
183
+ entries: this.cache.size,
184
+ hitRate: total > 0 ? this.hits / total : 0,
185
+ evictions: this.evictions,
186
+ };
187
+ }
188
+
189
+ /** Cleanup expired entries */
190
+ cleanup(): number {
191
+ const now = Date.now();
192
+ let removed = 0;
193
+
194
+ for (const [key, entry] of this.cache.entries()) {
195
+ if (now > entry.expiresAt) {
196
+ this.cache.delete(key);
197
+ removed++;
198
+ }
199
+ }
200
+
201
+ return removed;
202
+ }
203
+
204
+ // --- Private methods ---
205
+
206
+ private getKey(input: CacheKeyInput): string {
207
+ if (this.config.hashFn) {
208
+ return this.config.hashFn(input);
209
+ }
210
+
211
+ // Default: JSON-based hash
212
+ const normalized = {
213
+ messages: input.messages.map(m => ({
214
+ role: m.role,
215
+ content: m.content,
216
+ })),
217
+ model: input.model,
218
+ temperature: input.temperature,
219
+ maxTokens: input.maxTokens,
220
+ // Exclude tools from cache key by default (too variable)
221
+ };
222
+
223
+ return this.simpleHash(JSON.stringify(normalized));
224
+ }
225
+
226
+ private simpleHash(str: string): string {
227
+ let hash = 0;
228
+ for (let i = 0; i < str.length; i++) {
229
+ const char = str.charCodeAt(i);
230
+ hash = ((hash << 5) - hash) + char;
231
+ hash = hash & hash; // Convert to 32bit integer
232
+ }
233
+ return hash.toString(36);
234
+ }
235
+
236
+ private evictLRU(): void {
237
+ let oldest: CacheEntry | null = null;
238
+ let oldestKey = '';
239
+
240
+ for (const [key, entry] of this.cache.entries()) {
241
+ if (!oldest || entry.accessedAt < oldest.accessedAt) {
242
+ oldest = entry;
243
+ oldestKey = key;
244
+ }
245
+ }
246
+
247
+ if (oldestKey) {
248
+ this.cache.delete(oldestKey);
249
+ this.evictions++;
250
+ this.recordMetric('cache_eviction', 1);
251
+ }
252
+ }
253
+
254
+ private recordMetric(name: string, value: number): void {
255
+ this.config.metrics?.counter(`llm_cache.${name}`, value);
256
+ }
257
+ }
258
+
259
+ /**
260
+ * Wrap an LLM provider with caching.
261
+ * Note: Model should be passed separately to the cache key.
262
+ */
263
+ export function withCache<T extends {
264
+ generateText(messages: Message[], options?: GenerateOptions): Promise<GenerateResult>;
265
+ }>(
266
+ llm: T,
267
+ cache: LLMCache,
268
+ model?: string
269
+ ): T {
270
+ const originalGenerate = llm.generateText.bind(llm);
271
+
272
+ llm.generateText = async (messages: Message[], options?: GenerateOptions): Promise<GenerateResult> => {
273
+ const cacheInput: CacheKeyInput = {
274
+ messages,
275
+ model,
276
+ temperature: options?.temperature,
277
+ maxTokens: options?.maxTokens,
278
+ tools: options?.tools,
279
+ };
280
+
281
+ // Check cache
282
+ const cached = cache.get(cacheInput);
283
+ if (cached) {
284
+ return cached;
285
+ }
286
+
287
+ // Call LLM
288
+ const result = await originalGenerate(messages, options);
289
+
290
+ // Cache successful results
291
+ if (result.finishReason !== 'error') {
292
+ cache.set(cacheInput, result);
293
+ }
294
+
295
+ return result;
296
+ };
297
+
298
+ return llm;
299
+ }
300
+
@@ -0,0 +1,22 @@
1
+ /**
2
+ * LLM provider abstraction
3
+ */
4
+
5
+ export * from './types.js';
6
+ export { OpenAIProvider } from './openai-provider.js';
7
+ export { createOpenRouterProvider } from './openrouter-provider.js';
8
+ export type { OpenRouterProviderConfig } from './openrouter-provider.js';
9
+ export {
10
+ resolveModelString,
11
+ isModelString,
12
+ PROVIDER as MODEL_PROVIDER,
13
+ OPENROUTER_BASE_URL,
14
+ OLLAMA_BASE_URL,
15
+ LLAMABARN_BASE_URL,
16
+ } from './model-resolver.js';
17
+ export type { ResolvedModelConfig } from './model-resolver.js';
18
+
19
+ // LLM Caching
20
+ export { LLMCache, withCache } from './cache.js';
21
+ export type { LLMCacheConfig, CacheKeyInput, CacheStats } from './cache.js';
22
+
@@ -0,0 +1,81 @@
1
+ /**
2
+ * Model string resolver — Agno-style "provider:model_id" support.
3
+ * Use a single string to pick provider and model without importing provider classes.
4
+ *
5
+ * Format: "provider:model_id" (case-insensitive provider)
6
+ * Examples: "openai:gpt-4o", "openrouter:anthropic/claude-3.5-sonnet", "ollama:llama3.2", "llamabarn:gpt-oss-20b"
7
+ */
8
+
9
+ export const PROVIDER = {
10
+ OPENAI: 'openai',
11
+ OPENROUTER: 'openrouter',
12
+ OLLAMA: 'ollama',
13
+ LLAMABARN: 'llamabarn',
14
+ } as const;
15
+
16
+ export const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
17
+ export const OLLAMA_BASE_URL = 'http://localhost:11434/v1';
18
+ export const LLAMABARN_BASE_URL = 'http://localhost:2276/v1';
19
+
20
+ const ENV_OPENAI_API_KEY = 'OPENAI_API_KEY';
21
+ const ENV_OPENROUTER_API_KEY = 'OPENROUTER_API_KEY';
22
+ const ENV_LLAMABARN_API_KEY = 'LLAMABARN_API_KEY';
23
+
24
+ export interface ResolvedModelConfig {
25
+ baseURL?: string;
26
+ apiKey?: string;
27
+ model: string;
28
+ }
29
+
30
+ /**
31
+ * Resolve a "provider:model_id" string to OpenAI-compatible config.
32
+ * Returns undefined if the string does not look like "provider:model_id".
33
+ */
34
+ export function resolveModelString(
35
+ modelStr: string,
36
+ getEnv?: (key: string) => string | undefined
37
+ ): ResolvedModelConfig | undefined {
38
+ const env = getEnv ?? (typeof process !== 'undefined' ? (k: string) => process.env?.[k] : undefined);
39
+ const colon = modelStr.indexOf(':');
40
+ if (colon <= 0) return undefined;
41
+
42
+ const provider = modelStr.slice(0, colon).trim().toLowerCase();
43
+ const modelId = modelStr.slice(colon + 1).trim();
44
+ if (!modelId) return undefined;
45
+
46
+ switch (provider) {
47
+ case PROVIDER.OPENAI:
48
+ return {
49
+ apiKey: env?.(ENV_OPENAI_API_KEY),
50
+ model: modelId,
51
+ };
52
+ case PROVIDER.OPENROUTER:
53
+ return {
54
+ baseURL: OPENROUTER_BASE_URL,
55
+ apiKey: env?.(ENV_OPENROUTER_API_KEY),
56
+ model: modelId,
57
+ };
58
+ case PROVIDER.OLLAMA:
59
+ return {
60
+ baseURL: OLLAMA_BASE_URL,
61
+ apiKey: 'not-needed',
62
+ model: modelId,
63
+ };
64
+ case PROVIDER.LLAMABARN:
65
+ return {
66
+ baseURL: LLAMABARN_BASE_URL,
67
+ apiKey: env?.(ENV_LLAMABARN_API_KEY) ?? 'not-needed',
68
+ model: modelId,
69
+ };
70
+ default:
71
+ return undefined;
72
+ }
73
+ }
74
+
75
+ /**
76
+ * Check if a string looks like "provider:model_id".
77
+ */
78
+ export function isModelString(s: string): boolean {
79
+ const colon = s.indexOf(':');
80
+ return colon > 0 && s.slice(colon + 1).trim().length > 0;
81
+ }