claude-flow 2.0.0-alpha.66 → 2.0.0-alpha.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. package/.claude/cache/agent-pool.json +33 -0
  2. package/.claude/cache/memory-optimization.json +19 -0
  3. package/.claude/cache/neural-optimization.json +25 -0
  4. package/.claude/cache/optimized-hooks.json +19 -0
  5. package/.claude/cache/parallel-processing.json +26 -0
  6. package/.claude/optimized-settings.json +270 -0
  7. package/.claude/settings-backup.json +186 -0
  8. package/.claude/settings-enhanced.json +278 -0
  9. package/.claude/settings-fixed.json +186 -0
  10. package/.claude/settings.json +105 -8
  11. package/CHANGELOG.md +38 -0
  12. package/bin/claude-flow +1 -1
  13. package/dist/cli/simple-commands/hive-mind.js +1 -1
  14. package/dist/cli/simple-commands/hive-mind.js.map +1 -1
  15. package/dist/cli/simple-commands/hooks.js +6 -4
  16. package/dist/cli/simple-commands/hooks.js.map +1 -1
  17. package/dist/providers/anthropic-provider.d.ts +27 -0
  18. package/dist/providers/anthropic-provider.d.ts.map +1 -0
  19. package/dist/providers/anthropic-provider.js +247 -0
  20. package/dist/providers/anthropic-provider.js.map +1 -0
  21. package/dist/providers/base-provider.d.ts +134 -0
  22. package/dist/providers/base-provider.d.ts.map +1 -0
  23. package/dist/providers/base-provider.js +407 -0
  24. package/dist/providers/base-provider.js.map +1 -0
  25. package/dist/providers/cohere-provider.d.ts +28 -0
  26. package/dist/providers/cohere-provider.d.ts.map +1 -0
  27. package/dist/providers/cohere-provider.js +407 -0
  28. package/dist/providers/cohere-provider.js.map +1 -0
  29. package/dist/providers/google-provider.d.ts +23 -0
  30. package/dist/providers/google-provider.d.ts.map +1 -0
  31. package/dist/providers/google-provider.js +362 -0
  32. package/dist/providers/google-provider.js.map +1 -0
  33. package/dist/providers/index.d.ts +14 -0
  34. package/dist/providers/index.d.ts.map +1 -0
  35. package/dist/providers/index.js +18 -0
  36. package/dist/providers/index.js.map +1 -0
  37. package/dist/providers/ollama-provider.d.ts +23 -0
  38. package/dist/providers/ollama-provider.d.ts.map +1 -0
  39. package/dist/providers/ollama-provider.js +374 -0
  40. package/dist/providers/ollama-provider.js.map +1 -0
  41. package/dist/providers/openai-provider.d.ts +23 -0
  42. package/dist/providers/openai-provider.d.ts.map +1 -0
  43. package/dist/providers/openai-provider.js +349 -0
  44. package/dist/providers/openai-provider.js.map +1 -0
  45. package/dist/providers/provider-manager.d.ts +139 -0
  46. package/dist/providers/provider-manager.d.ts.map +1 -0
  47. package/dist/providers/provider-manager.js +513 -0
  48. package/dist/providers/provider-manager.js.map +1 -0
  49. package/dist/providers/types.d.ts +356 -0
  50. package/dist/providers/types.d.ts.map +1 -0
  51. package/dist/providers/types.js +61 -0
  52. package/dist/providers/types.js.map +1 -0
  53. package/dist/providers/utils.d.ts +37 -0
  54. package/dist/providers/utils.d.ts.map +1 -0
  55. package/dist/providers/utils.js +322 -0
  56. package/dist/providers/utils.js.map +1 -0
  57. package/dist/services/agentic-flow-hooks/hook-manager.d.ts +70 -0
  58. package/dist/services/agentic-flow-hooks/hook-manager.d.ts.map +1 -0
  59. package/dist/services/agentic-flow-hooks/hook-manager.js +512 -0
  60. package/dist/services/agentic-flow-hooks/hook-manager.js.map +1 -0
  61. package/dist/services/agentic-flow-hooks/index.d.ts +36 -0
  62. package/dist/services/agentic-flow-hooks/index.d.ts.map +1 -0
  63. package/dist/services/agentic-flow-hooks/index.js +325 -0
  64. package/dist/services/agentic-flow-hooks/index.js.map +1 -0
  65. package/dist/services/agentic-flow-hooks/llm-hooks.d.ts +33 -0
  66. package/dist/services/agentic-flow-hooks/llm-hooks.d.ts.map +1 -0
  67. package/dist/services/agentic-flow-hooks/llm-hooks.js +415 -0
  68. package/dist/services/agentic-flow-hooks/llm-hooks.js.map +1 -0
  69. package/dist/services/agentic-flow-hooks/memory-hooks.d.ts +45 -0
  70. package/dist/services/agentic-flow-hooks/memory-hooks.d.ts.map +1 -0
  71. package/dist/services/agentic-flow-hooks/memory-hooks.js +532 -0
  72. package/dist/services/agentic-flow-hooks/memory-hooks.js.map +1 -0
  73. package/dist/services/agentic-flow-hooks/neural-hooks.d.ts +39 -0
  74. package/dist/services/agentic-flow-hooks/neural-hooks.d.ts.map +1 -0
  75. package/dist/services/agentic-flow-hooks/neural-hooks.js +561 -0
  76. package/dist/services/agentic-flow-hooks/neural-hooks.js.map +1 -0
  77. package/dist/services/agentic-flow-hooks/performance-hooks.d.ts +33 -0
  78. package/dist/services/agentic-flow-hooks/performance-hooks.d.ts.map +1 -0
  79. package/dist/services/agentic-flow-hooks/performance-hooks.js +621 -0
  80. package/dist/services/agentic-flow-hooks/performance-hooks.js.map +1 -0
  81. package/dist/services/agentic-flow-hooks/types.d.ts +379 -0
  82. package/dist/services/agentic-flow-hooks/types.d.ts.map +1 -0
  83. package/dist/services/agentic-flow-hooks/types.js +8 -0
  84. package/dist/services/agentic-flow-hooks/types.js.map +1 -0
  85. package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts +39 -0
  86. package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts.map +1 -0
  87. package/dist/services/agentic-flow-hooks/workflow-hooks.js +742 -0
  88. package/dist/services/agentic-flow-hooks/workflow-hooks.js.map +1 -0
  89. package/package.json +1 -1
  90. package/scripts/optimize-performance.js +400 -0
  91. package/scripts/performance-monitor.js +263 -0
  92. package/src/cli/help-text.js +1 -1
  93. package/src/cli/simple-cli.js +1 -1
  94. package/src/cli/simple-commands/hive-mind.js +1 -1
  95. package/src/providers/anthropic-provider.ts +282 -0
  96. package/src/providers/base-provider.ts +560 -0
  97. package/src/providers/cohere-provider.ts +521 -0
  98. package/src/providers/google-provider.ts +477 -0
  99. package/src/providers/index.ts +21 -0
  100. package/src/providers/ollama-provider.ts +489 -0
  101. package/src/providers/openai-provider.ts +476 -0
  102. package/src/providers/provider-manager.ts +654 -0
  103. package/src/providers/types.ts +531 -0
  104. package/src/providers/utils.ts +376 -0
  105. package/src/services/agentic-flow-hooks/hook-manager.ts +701 -0
  106. package/src/services/agentic-flow-hooks/index.ts +386 -0
  107. package/src/services/agentic-flow-hooks/llm-hooks.ts +557 -0
  108. package/src/services/agentic-flow-hooks/memory-hooks.ts +710 -0
  109. package/src/services/agentic-flow-hooks/neural-hooks.ts +758 -0
  110. package/src/services/agentic-flow-hooks/performance-hooks.ts +827 -0
  111. package/src/services/agentic-flow-hooks/types.ts +503 -0
  112. package/src/services/agentic-flow-hooks/workflow-hooks.ts +1026 -0
@@ -0,0 +1,557 @@
1
+ /**
2
+ * LLM-specific hooks for agentic-flow integration
3
+ *
4
+ * Provides pre/post operation hooks for all LLM calls with
5
+ * memory persistence and performance optimization.
6
+ */
7
+
8
+ import { agenticHookManager } from './hook-manager.js';
9
+ import type {
10
+ AgenticHookContext,
11
+ HookHandlerResult,
12
+ LLMHookPayload,
13
+ LLMMetrics,
14
+ Pattern,
15
+ SideEffect,
16
+ } from './types.js';
17
+
18
+ // ===== Pre-LLM Call Hook =====
19
+
20
+ export const preLLMCallHook = {
21
+ id: 'agentic-pre-llm-call',
22
+ type: 'pre-llm-call' as const,
23
+ priority: 100,
24
+ handler: async (
25
+ payload: LLMHookPayload,
26
+ context: AgenticHookContext
27
+ ): Promise<HookHandlerResult> => {
28
+ const { provider, model, operation, request } = payload;
29
+
30
+ // Check memory for similar requests
31
+ const cacheKey = generateCacheKey(provider, model, request);
32
+ const cached = await checkMemoryCache(cacheKey, context);
33
+
34
+ if (cached) {
35
+ return {
36
+ continue: false, // Skip LLM call
37
+ modified: true,
38
+ payload: {
39
+ ...payload,
40
+ response: cached.response,
41
+ metrics: {
42
+ ...cached.metrics,
43
+ cacheHit: true,
44
+ },
45
+ },
46
+ sideEffects: [
47
+ {
48
+ type: 'metric',
49
+ action: 'increment',
50
+ data: { name: 'llm.cache.hits' },
51
+ },
52
+ ],
53
+ };
54
+ }
55
+
56
+ // Load provider-specific optimizations
57
+ const optimizations = await loadProviderOptimizations(provider, context);
58
+
59
+ // Apply request optimizations
60
+ const optimizedRequest = applyRequestOptimizations(
61
+ request,
62
+ optimizations,
63
+ context
64
+ );
65
+
66
+ // Track pre-call metrics
67
+ const sideEffects: SideEffect[] = [
68
+ {
69
+ type: 'metric',
70
+ action: 'increment',
71
+ data: { name: `llm.calls.${provider}.${model}` },
72
+ },
73
+ {
74
+ type: 'memory',
75
+ action: 'store',
76
+ data: {
77
+ key: `llm:request:${context.correlationId}`,
78
+ value: {
79
+ provider,
80
+ model,
81
+ operation,
82
+ request: optimizedRequest,
83
+ timestamp: Date.now(),
84
+ },
85
+ ttl: 3600, // 1 hour
86
+ },
87
+ },
88
+ ];
89
+
90
+ return {
91
+ continue: true,
92
+ modified: true,
93
+ payload: {
94
+ ...payload,
95
+ request: optimizedRequest,
96
+ },
97
+ sideEffects,
98
+ };
99
+ },
100
+ };
101
+
102
+ // ===== Post-LLM Call Hook =====
103
+
104
+ export const postLLMCallHook = {
105
+ id: 'agentic-post-llm-call',
106
+ type: 'post-llm-call' as const,
107
+ priority: 100,
108
+ handler: async (
109
+ payload: LLMHookPayload,
110
+ context: AgenticHookContext
111
+ ): Promise<HookHandlerResult> => {
112
+ const { provider, model, request, response, metrics } = payload;
113
+
114
+ if (!response || !metrics) {
115
+ return { continue: true };
116
+ }
117
+
118
+ const sideEffects: SideEffect[] = [];
119
+
120
+ // Store response in memory for caching
121
+ const cacheKey = generateCacheKey(provider, model, request);
122
+ sideEffects.push({
123
+ type: 'memory',
124
+ action: 'store',
125
+ data: {
126
+ key: `llm:cache:${cacheKey}`,
127
+ value: {
128
+ response,
129
+ metrics,
130
+ timestamp: Date.now(),
131
+ },
132
+ ttl: determineCacheTTL(operation, response),
133
+ },
134
+ });
135
+
136
+ // Extract patterns for neural training
137
+ const patterns = extractResponsePatterns(request, response, metrics);
138
+ if (patterns.length > 0) {
139
+ sideEffects.push({
140
+ type: 'neural',
141
+ action: 'train',
142
+ data: {
143
+ patterns,
144
+ modelId: `llm-optimizer-${provider}`,
145
+ },
146
+ });
147
+ }
148
+
149
+ // Update performance metrics
150
+ sideEffects.push(
151
+ {
152
+ type: 'metric',
153
+ action: 'update',
154
+ data: {
155
+ name: `llm.latency.${provider}.${model}`,
156
+ value: metrics.latency,
157
+ },
158
+ },
159
+ {
160
+ type: 'metric',
161
+ action: 'update',
162
+ data: {
163
+ name: `llm.tokens.${provider}.${model}`,
164
+ value: response.usage.totalTokens,
165
+ },
166
+ },
167
+ {
168
+ type: 'metric',
169
+ action: 'update',
170
+ data: {
171
+ name: `llm.cost.${provider}.${model}`,
172
+ value: metrics.costEstimate,
173
+ },
174
+ }
175
+ );
176
+
177
+ // Check for performance issues
178
+ if (metrics.latency > getLatencyThreshold(provider, model)) {
179
+ sideEffects.push({
180
+ type: 'notification',
181
+ action: 'send',
182
+ data: {
183
+ level: 'warning',
184
+ message: `High latency detected for ${provider}/${model}: ${metrics.latency}ms`,
185
+ },
186
+ });
187
+ }
188
+
189
+ // Store provider health score
190
+ await updateProviderHealth(provider, metrics.providerHealth, context);
191
+
192
+ return {
193
+ continue: true,
194
+ sideEffects,
195
+ };
196
+ },
197
+ };
198
+
199
+ // ===== LLM Error Hook =====
200
+
201
+ export const llmErrorHook = {
202
+ id: 'agentic-llm-error',
203
+ type: 'llm-error' as const,
204
+ priority: 100,
205
+ handler: async (
206
+ payload: LLMHookPayload,
207
+ context: AgenticHookContext
208
+ ): Promise<HookHandlerResult> => {
209
+ const { provider, model, error } = payload;
210
+
211
+ if (!error) {
212
+ return { continue: true };
213
+ }
214
+
215
+ const sideEffects: SideEffect[] = [];
216
+
217
+ // Log error details
218
+ sideEffects.push({
219
+ type: 'log',
220
+ action: 'write',
221
+ data: {
222
+ level: 'error',
223
+ message: `LLM error from ${provider}/${model}`,
224
+ data: {
225
+ error: error.message,
226
+ stack: error.stack,
227
+ request: payload.request,
228
+ },
229
+ },
230
+ });
231
+
232
+ // Update error metrics
233
+ sideEffects.push({
234
+ type: 'metric',
235
+ action: 'increment',
236
+ data: { name: `llm.errors.${provider}.${model}` },
237
+ });
238
+
239
+ // Check if we should fallback
240
+ const fallbackProvider = await selectFallbackProvider(
241
+ provider,
242
+ model,
243
+ error,
244
+ context
245
+ );
246
+
247
+ if (fallbackProvider) {
248
+ return {
249
+ continue: false, // Don't propagate error
250
+ modified: true,
251
+ payload: {
252
+ ...payload,
253
+ provider: fallbackProvider.provider,
254
+ model: fallbackProvider.model,
255
+ error: undefined, // Clear error for retry
256
+ },
257
+ sideEffects: [
258
+ ...sideEffects,
259
+ {
260
+ type: 'notification',
261
+ action: 'send',
262
+ data: {
263
+ level: 'info',
264
+ message: `Falling back from ${provider}/${model} to ${fallbackProvider.provider}/${fallbackProvider.model}`,
265
+ },
266
+ },
267
+ ],
268
+ };
269
+ }
270
+
271
+ return {
272
+ continue: true,
273
+ sideEffects,
274
+ };
275
+ },
276
+ };
277
+
278
+ // ===== LLM Retry Hook =====
279
+
280
+ export const llmRetryHook = {
281
+ id: 'agentic-llm-retry',
282
+ type: 'llm-retry' as const,
283
+ priority: 90,
284
+ handler: async (
285
+ payload: LLMHookPayload,
286
+ context: AgenticHookContext
287
+ ): Promise<HookHandlerResult> => {
288
+ const { provider, model, metrics } = payload;
289
+ const retryCount = metrics?.retryCount || 0;
290
+
291
+ // Adjust request parameters for retry
292
+ const adjustedRequest = adjustRequestForRetry(
293
+ payload.request,
294
+ retryCount
295
+ );
296
+
297
+ const sideEffects: SideEffect[] = [
298
+ {
299
+ type: 'metric',
300
+ action: 'increment',
301
+ data: { name: `llm.retries.${provider}.${model}` },
302
+ },
303
+ ];
304
+
305
+ // Apply exponential backoff
306
+ const backoffMs = Math.min(1000 * Math.pow(2, retryCount), 10000);
307
+ await new Promise(resolve => setTimeout(resolve, backoffMs));
308
+
309
+ return {
310
+ continue: true,
311
+ modified: true,
312
+ payload: {
313
+ ...payload,
314
+ request: adjustedRequest,
315
+ metrics: {
316
+ ...metrics,
317
+ retryCount: retryCount + 1,
318
+ },
319
+ },
320
+ sideEffects,
321
+ };
322
+ },
323
+ };
324
+
325
+ // ===== Helper Functions =====
326
+
327
+ function generateCacheKey(
328
+ provider: string,
329
+ model: string,
330
+ request: LLMHookPayload['request']
331
+ ): string {
332
+ const normalized = {
333
+ provider,
334
+ model,
335
+ messages: request.messages?.map(m => ({
336
+ role: m.role,
337
+ content: m.content.substring(0, 100), // First 100 chars
338
+ })),
339
+ temperature: request.temperature,
340
+ maxTokens: request.maxTokens,
341
+ };
342
+
343
+ return Buffer.from(JSON.stringify(normalized)).toString('base64');
344
+ }
345
+
346
+ async function checkMemoryCache(
347
+ cacheKey: string,
348
+ context: AgenticHookContext
349
+ ): Promise<any | null> {
350
+ // Implementation would integrate with memory service
351
+ // This is a placeholder
352
+ return null;
353
+ }
354
+
355
+ async function loadProviderOptimizations(
356
+ provider: string,
357
+ context: AgenticHookContext
358
+ ): Promise<any> {
359
+ // Load provider-specific optimizations from memory
360
+ // This is a placeholder
361
+ return {
362
+ maxRetries: 3,
363
+ timeout: 30000,
364
+ rateLimit: 100,
365
+ };
366
+ }
367
+
368
+ function applyRequestOptimizations(
369
+ request: LLMHookPayload['request'],
370
+ optimizations: any,
371
+ context: AgenticHookContext
372
+ ): LLMHookPayload['request'] {
373
+ // Apply various optimizations
374
+ const optimized = { ...request };
375
+
376
+ // Optimize token usage
377
+ if (optimized.maxTokens && optimized.maxTokens > 4000) {
378
+ optimized.maxTokens = 4000; // Cap at reasonable limit
379
+ }
380
+
381
+ // Optimize temperature for consistency
382
+ if (optimized.temperature === undefined) {
383
+ optimized.temperature = 0.7;
384
+ }
385
+
386
+ // Add stop sequences if missing
387
+ if (!optimized.stopSequences && optimized.messages) {
388
+ optimized.stopSequences = ['\n\nHuman:', '\n\nAssistant:'];
389
+ }
390
+
391
+ return optimized;
392
+ }
393
+
394
+ function determineCacheTTL(
395
+ operation: string,
396
+ response: LLMHookPayload['response']
397
+ ): number {
398
+ // Determine cache TTL based on operation and response
399
+ switch (operation) {
400
+ case 'embedding':
401
+ return 86400; // 24 hours for embeddings
402
+ case 'completion':
403
+ // Shorter TTL for completions
404
+ return response?.usage?.totalTokens && response.usage.totalTokens > 1000
405
+ ? 1800 // 30 minutes for long responses
406
+ : 3600; // 1 hour for short responses
407
+ default:
408
+ return 3600; // 1 hour default
409
+ }
410
+ }
411
+
412
+ function extractResponsePatterns(
413
+ request: LLMHookPayload['request'],
414
+ response: LLMHookPayload['response'],
415
+ metrics: LLMMetrics
416
+ ): Pattern[] {
417
+ const patterns: Pattern[] = [];
418
+
419
+ // Extract performance patterns
420
+ if (metrics.latency > 1000) {
421
+ patterns.push({
422
+ id: `perf_${Date.now()}`,
423
+ type: 'optimization',
424
+ confidence: 0.8,
425
+ occurrences: 1,
426
+ context: {
427
+ provider: metrics.providerHealth < 0.8 ? 'unhealthy' : 'healthy',
428
+ requestSize: JSON.stringify(request).length,
429
+ responseTokens: response?.usage?.totalTokens || 0,
430
+ latency: metrics.latency,
431
+ },
432
+ });
433
+ }
434
+
435
+ // Extract success patterns
436
+ if (response?.choices?.[0]?.finishReason === 'stop') {
437
+ patterns.push({
438
+ id: `success_${Date.now()}`,
439
+ type: 'success',
440
+ confidence: 0.9,
441
+ occurrences: 1,
442
+ context: {
443
+ temperature: request.temperature,
444
+ maxTokens: request.maxTokens,
445
+ actualTokens: response.usage?.totalTokens || 0,
446
+ },
447
+ });
448
+ }
449
+
450
+ return patterns;
451
+ }
452
+
453
+ function getLatencyThreshold(provider: string, model: string): number {
454
+ // Provider/model specific thresholds
455
+ const thresholds: Record<string, number> = {
456
+ 'openai:gpt-4': 5000,
457
+ 'openai:gpt-3.5-turbo': 2000,
458
+ 'anthropic:claude-3': 4000,
459
+ 'anthropic:claude-instant': 1500,
460
+ };
461
+
462
+ return thresholds[`${provider}:${model}`] || 3000;
463
+ }
464
+
465
+ async function updateProviderHealth(
466
+ provider: string,
467
+ health: number,
468
+ context: AgenticHookContext
469
+ ): Promise<void> {
470
+ // Update provider health in memory
471
+ const healthKey = `provider:health:${provider}`;
472
+ const currentHealth = await context.memory.cache.get(healthKey) || [];
473
+
474
+ currentHealth.push({
475
+ timestamp: Date.now(),
476
+ health,
477
+ });
478
+
479
+ // Keep last 100 health checks
480
+ if (currentHealth.length > 100) {
481
+ currentHealth.shift();
482
+ }
483
+
484
+ await context.memory.cache.set(healthKey, currentHealth);
485
+ }
486
+
487
+ async function selectFallbackProvider(
488
+ provider: string,
489
+ model: string,
490
+ error: Error,
491
+ context: AgenticHookContext
492
+ ): Promise<{ provider: string; model: string } | null> {
493
+ // Implement intelligent fallback selection
494
+ const fallbacks: Record<string, { provider: string; model: string }[]> = {
495
+ 'openai': [
496
+ { provider: 'anthropic', model: 'claude-3' },
497
+ { provider: 'cohere', model: 'command' },
498
+ ],
499
+ 'anthropic': [
500
+ { provider: 'openai', model: 'gpt-4' },
501
+ { provider: 'cohere', model: 'command' },
502
+ ],
503
+ };
504
+
505
+ const candidates = fallbacks[provider] || [];
506
+
507
+ // Select based on health scores
508
+ for (const candidate of candidates) {
509
+ const healthKey = `provider:health:${candidate.provider}`;
510
+ const healthData = await context.memory.cache.get(healthKey) || [];
511
+
512
+ if (healthData.length > 0) {
513
+ const avgHealth = healthData.reduce((sum: number, h: any) =>
514
+ sum + h.health, 0
515
+ ) / healthData.length;
516
+
517
+ if (avgHealth > 0.7) {
518
+ return candidate;
519
+ }
520
+ }
521
+ }
522
+
523
+ return null;
524
+ }
525
+
526
+ function adjustRequestForRetry(
527
+ request: LLMHookPayload['request'],
528
+ retryCount: number
529
+ ): LLMHookPayload['request'] {
530
+ const adjusted = { ...request };
531
+
532
+ // Increase temperature slightly for variety
533
+ if (adjusted.temperature !== undefined) {
534
+ adjusted.temperature = Math.min(
535
+ adjusted.temperature + (0.1 * retryCount),
536
+ 1.0
537
+ );
538
+ }
539
+
540
+ // Reduce max tokens to improve success rate
541
+ if (adjusted.maxTokens !== undefined) {
542
+ adjusted.maxTokens = Math.floor(
543
+ adjusted.maxTokens * Math.pow(0.9, retryCount)
544
+ );
545
+ }
546
+
547
+ return adjusted;
548
+ }
549
+
550
+ // ===== Register Hooks =====
551
+
552
+ export function registerLLMHooks(): void {
553
+ agenticHookManager.register(preLLMCallHook);
554
+ agenticHookManager.register(postLLMCallHook);
555
+ agenticHookManager.register(llmErrorHook);
556
+ agenticHookManager.register(llmRetryHook);
557
+ }