alma-memory 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -194
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -322
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -264
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -81
  26. alma/graph/backends/__init__.py +32 -18
  27. alma/graph/backends/kuzu.py +624 -0
  28. alma/graph/backends/memgraph.py +432 -0
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -432
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -511
  55. alma/observability/__init__.py +91 -0
  56. alma/observability/config.py +302 -0
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -0
  59. alma/observability/metrics.py +583 -0
  60. alma/observability/tracing.py +440 -0
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -366
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -61
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1048
  80. alma/storage/base.py +1083 -525
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -0
  83. alma/storage/file_based.py +614 -619
  84. alma/storage/migrations/__init__.py +21 -0
  85. alma/storage/migrations/base.py +321 -0
  86. alma/storage/migrations/runner.py +323 -0
  87. alma/storage/migrations/version_stores.py +337 -0
  88. alma/storage/migrations/versions/__init__.py +11 -0
  89. alma/storage/migrations/versions/v1_0_0.py +373 -0
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1452
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1358
  95. alma/testing/__init__.py +46 -0
  96. alma/testing/factories.py +301 -0
  97. alma/testing/mocks.py +389 -0
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/METADATA +244 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.0.dist-info/RECORD +0 -76
  110. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
@@ -1,471 +1,471 @@
1
- """
2
- ALMA Victor Integration.
3
-
4
- Victor-specific integration for backend/API QA testing with ALMA memory.
5
-
6
- Victor specializes in:
7
- - API endpoint testing
8
- - Database validation
9
- - Performance testing
10
- - Health check verification
11
- - Authentication/authorization testing
12
-
13
- This module provides Victor-specific memory categories, prompts, and utilities.
14
- """
15
-
16
- import logging
17
- from dataclasses import dataclass, field
18
- from typing import Any, Dict, List, Optional
19
-
20
- from alma.core import ALMA
21
- from alma.harness.domains import CodingDomain
22
- from alma.integration.claude_agents import (
23
- AgentType,
24
- ClaudeAgentHooks,
25
- TaskContext,
26
- TaskOutcome,
27
- )
28
- from alma.types import MemorySlice
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
-
33
- # Victor's learning categories
34
- VICTOR_CATEGORIES = [
35
- "api_design_patterns",
36
- "authentication_patterns",
37
- "error_handling",
38
- "performance_optimization",
39
- "database_query_patterns",
40
- "caching_strategies",
41
- ]
42
-
43
- # Categories Victor should NOT learn
44
- VICTOR_FORBIDDEN = [
45
- "frontend_styling",
46
- "ui_testing",
47
- "marketing_content",
48
- ]
49
-
50
-
51
- @dataclass
52
- class APITestContext(TaskContext):
53
- """
54
- Victor-specific test context.
55
-
56
- Extends TaskContext with API/backend testing-specific fields.
57
- """
58
-
59
- endpoint: Optional[str] = None
60
- method: str = "GET"
61
- expected_status: Optional[int] = None
62
- request_body: Optional[Dict[str, Any]] = None
63
- headers: Dict[str, str] = field(default_factory=dict)
64
- is_auth_test: bool = False
65
- is_performance_test: bool = False
66
- is_database_test: bool = False
67
-
68
- def __post_init__(self):
69
- # Ensure agent name is victor
70
- self.agent_name = "victor"
71
- # Set default task type if not specified
72
- if not self.task_type:
73
- self.task_type = self._infer_task_type()
74
-
75
- def _infer_task_type(self) -> str:
76
- """Infer task type from context."""
77
- if self.is_auth_test:
78
- return "authentication_patterns"
79
- if self.is_performance_test:
80
- return "performance_optimization"
81
- if self.is_database_test:
82
- return "database_query_patterns"
83
- if self.endpoint:
84
- return "api_design_patterns"
85
- return "api_testing"
86
-
87
-
88
- @dataclass
89
- class APITestOutcome(TaskOutcome):
90
- """
91
- Victor-specific test outcome.
92
-
93
- Extends TaskOutcome with API/backend testing-specific results.
94
- """
95
-
96
- response_status: Optional[int] = None
97
- response_time_ms: Optional[int] = None
98
- response_body: Optional[Dict[str, Any]] = None
99
- database_queries_count: int = 0
100
- cache_hits: int = 0
101
- cache_misses: int = 0
102
- validation_errors: List[str] = field(default_factory=list)
103
- performance_metrics: Dict[str, float] = field(default_factory=dict)
104
-
105
- def __post_init__(self):
106
- # Use response_time_ms as duration if not set
107
- if self.duration_ms is None and self.response_time_ms is not None:
108
- self.duration_ms = self.response_time_ms
109
-
110
-
111
- class VictorHooks(ClaudeAgentHooks):
112
- """
113
- Victor-specific integration hooks.
114
-
115
- Extends ClaudeAgentHooks with backend/API testing-specific functionality.
116
- """
117
-
118
- def __init__(self, alma: ALMA, auto_learn: bool = True):
119
- """
120
- Initialize Victor hooks.
121
-
122
- Args:
123
- alma: ALMA instance
124
- auto_learn: Whether to automatically learn from outcomes
125
- """
126
- harness = CodingDomain.create_victor(alma)
127
- super().__init__(
128
- alma=alma,
129
- agent_type=AgentType.VICTOR,
130
- harness=harness,
131
- auto_learn=auto_learn,
132
- )
133
-
134
- def get_api_patterns(
135
- self,
136
- endpoint_type: str,
137
- top_k: int = 5,
138
- ) -> List[Dict[str, Any]]:
139
- """
140
- Get proven API patterns for an endpoint type.
141
-
142
- Args:
143
- endpoint_type: Type of endpoint (CRUD, auth, search, etc.)
144
- top_k: Maximum patterns to return
145
-
146
- Returns:
147
- List of API patterns with success rates
148
- """
149
- memories = self.alma.retrieve(
150
- task=f"API patterns for {endpoint_type} endpoints",
151
- agent=self.agent_name,
152
- top_k=top_k,
153
- )
154
-
155
- patterns = []
156
- for h in memories.heuristics:
157
- if "api" in h.condition.lower() or "endpoint" in h.condition.lower():
158
- patterns.append(
159
- {
160
- "pattern": h.strategy,
161
- "condition": h.condition,
162
- "confidence": h.confidence,
163
- "occurrences": h.occurrence_count,
164
- }
165
- )
166
-
167
- return patterns
168
-
169
- def get_error_handling_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
170
- """
171
- Get proven error handling patterns.
172
-
173
- Returns strategies for handling API errors, validation, and responses.
174
- """
175
- memories = self.alma.retrieve(
176
- task="error handling patterns validation responses",
177
- agent=self.agent_name,
178
- top_k=top_k,
179
- )
180
-
181
- patterns = []
182
- for h in memories.heuristics:
183
- if any(
184
- kw in h.condition.lower() for kw in ["error", "exception", "validation"]
185
- ):
186
- patterns.append(
187
- {
188
- "condition": h.condition,
189
- "strategy": h.strategy,
190
- "confidence": h.confidence,
191
- }
192
- )
193
-
194
- return patterns
195
-
196
- def get_performance_strategies(self, top_k: int = 5) -> List[Dict[str, Any]]:
197
- """
198
- Get performance optimization strategies.
199
-
200
- Returns strategies for caching, query optimization, etc.
201
- """
202
- memories = self.alma.retrieve(
203
- task="performance optimization caching query database",
204
- agent=self.agent_name,
205
- top_k=top_k,
206
- )
207
-
208
- strategies = []
209
- for h in memories.heuristics:
210
- if any(
211
- kw in h.condition.lower()
212
- for kw in ["performance", "cache", "query", "slow"]
213
- ):
214
- strategies.append(
215
- {
216
- "condition": h.condition,
217
- "strategy": h.strategy,
218
- "confidence": h.confidence,
219
- }
220
- )
221
-
222
- for dk in memories.domain_knowledge:
223
- if dk.domain in ["performance_optimization", "caching_strategies"]:
224
- strategies.append(
225
- {
226
- "fact": dk.fact,
227
- "source": dk.source,
228
- }
229
- )
230
-
231
- return strategies
232
-
233
- def get_auth_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
234
- """
235
- Get authentication/authorization testing patterns.
236
-
237
- Returns patterns for testing auth flows, tokens, permissions.
238
- """
239
- memories = self.alma.retrieve(
240
- task="authentication authorization JWT token permission testing",
241
- agent=self.agent_name,
242
- top_k=top_k,
243
- )
244
-
245
- patterns = []
246
- for h in memories.heuristics:
247
- if any(
248
- kw in h.condition.lower()
249
- for kw in ["auth", "token", "permission", "jwt"]
250
- ):
251
- patterns.append(
252
- {
253
- "condition": h.condition,
254
- "strategy": h.strategy,
255
- "confidence": h.confidence,
256
- }
257
- )
258
-
259
- return patterns
260
-
261
- def record_api_pattern(
262
- self,
263
- endpoint: str,
264
- method: str,
265
- pattern_type: str,
266
- description: str,
267
- success: bool,
268
- ) -> bool:
269
- """
270
- Record an API pattern for learning.
271
-
272
- Args:
273
- endpoint: API endpoint tested
274
- method: HTTP method
275
- pattern_type: Type of pattern (error_handling, validation, etc.)
276
- description: Pattern description
277
- success: Whether the pattern worked
278
-
279
- Returns:
280
- True if recorded successfully
281
- """
282
- fact = f"[{method}] {endpoint}: {description}"
283
- return self.add_knowledge(
284
- domain=pattern_type,
285
- fact=fact,
286
- source=f"api_test:success={success}",
287
- )
288
-
289
- def record_performance_metric(
290
- self,
291
- endpoint: str,
292
- response_time_ms: int,
293
- query_count: int,
294
- threshold_ms: int = 500,
295
- ) -> bool:
296
- """
297
- Record a performance metric for learning.
298
-
299
- Args:
300
- endpoint: API endpoint tested
301
- response_time_ms: Response time in milliseconds
302
- query_count: Number of database queries
303
- threshold_ms: Performance threshold
304
-
305
- Returns:
306
- True if recorded successfully
307
- """
308
- is_slow = response_time_ms > threshold_ms
309
- status = "SLOW" if is_slow else "OK"
310
- fact = f"{endpoint}: {response_time_ms}ms, {query_count} queries [{status}]"
311
-
312
- return self.add_knowledge(
313
- domain="performance_optimization",
314
- fact=fact,
315
- source=f"performance_test:threshold={threshold_ms}ms",
316
- )
317
-
318
- def format_api_test_prompt(
319
- self,
320
- memories: MemorySlice,
321
- test_context: APITestContext,
322
- ) -> str:
323
- """
324
- Format memories for Victor's API testing prompt.
325
-
326
- Provides Victor-specific formatting with test context.
327
- """
328
- sections = []
329
-
330
- # Base memory formatting
331
- base_format = self.format_memories_for_prompt(memories)
332
- if base_format:
333
- sections.append(base_format)
334
-
335
- # Add test context
336
- sections.append("\n## Current Test Context")
337
- sections.append(f"- **Task**: {test_context.task_description}")
338
- sections.append(f"- **Task Type**: {test_context.task_type}")
339
-
340
- if test_context.endpoint:
341
- sections.append(
342
- f"- **Endpoint**: {test_context.method} {test_context.endpoint}"
343
- )
344
- if test_context.expected_status:
345
- sections.append(f"- **Expected Status**: {test_context.expected_status}")
346
- if test_context.request_body:
347
- sections.append(
348
- f"- **Request Body**: {len(test_context.request_body)} fields"
349
- )
350
-
351
- if test_context.is_auth_test:
352
- sections.append("- **Focus**: Authentication/Authorization")
353
- if test_context.is_performance_test:
354
- sections.append("- **Focus**: Performance testing")
355
- if test_context.is_database_test:
356
- sections.append("- **Focus**: Database validation")
357
-
358
- return "\n".join(sections)
359
-
360
-
361
- def create_victor_hooks(alma: ALMA, auto_learn: bool = True) -> VictorHooks:
362
- """
363
- Convenience function to create Victor hooks.
364
-
365
- Args:
366
- alma: ALMA instance
367
- auto_learn: Whether to automatically learn
368
-
369
- Returns:
370
- Configured VictorHooks
371
- """
372
- return VictorHooks(alma=alma, auto_learn=auto_learn)
373
-
374
-
375
- def victor_pre_task(
376
- alma: ALMA,
377
- task: str,
378
- endpoint: Optional[str] = None,
379
- method: str = "GET",
380
- project_id: str = "default",
381
- top_k: int = 5,
382
- ) -> Dict[str, Any]:
383
- """
384
- Convenience function for Victor pre-task hook.
385
-
386
- Quick integration without creating full hooks object.
387
-
388
- Args:
389
- alma: ALMA instance
390
- task: Task description
391
- endpoint: Optional API endpoint
392
- method: HTTP method
393
- project_id: Project ID
394
- top_k: Max memories per type
395
-
396
- Returns:
397
- Dict with memories and formatted prompt
398
- """
399
- hooks = VictorHooks(alma=alma, auto_learn=False)
400
-
401
- context = APITestContext(
402
- task_description=task,
403
- task_type="", # Will be inferred
404
- agent_name="victor",
405
- project_id=project_id,
406
- endpoint=endpoint,
407
- method=method,
408
- )
409
-
410
- memories = hooks.pre_task(context, top_k=top_k)
411
- prompt = hooks.format_api_test_prompt(memories, context)
412
-
413
- return {
414
- "memories": memories,
415
- "prompt": prompt,
416
- "context": context,
417
- }
418
-
419
-
420
- def victor_post_task(
421
- alma: ALMA,
422
- task: str,
423
- success: bool,
424
- strategy_used: str,
425
- endpoint: Optional[str] = None,
426
- method: str = "GET",
427
- response_status: Optional[int] = None,
428
- response_time_ms: Optional[int] = None,
429
- project_id: str = "default",
430
- error_message: Optional[str] = None,
431
- ) -> bool:
432
- """
433
- Convenience function for Victor post-task hook.
434
-
435
- Quick integration without creating full hooks object.
436
-
437
- Args:
438
- alma: ALMA instance
439
- task: Task description
440
- success: Whether task succeeded
441
- strategy_used: Strategy used
442
- endpoint: API endpoint tested
443
- method: HTTP method
444
- response_status: HTTP response status
445
- response_time_ms: Response time
446
- project_id: Project ID
447
- error_message: Error if failed
448
-
449
- Returns:
450
- True if learning was recorded
451
- """
452
- hooks = VictorHooks(alma=alma, auto_learn=True)
453
-
454
- context = APITestContext(
455
- task_description=task,
456
- task_type="", # Will be inferred
457
- agent_name="victor",
458
- project_id=project_id,
459
- endpoint=endpoint,
460
- method=method,
461
- )
462
-
463
- outcome = APITestOutcome(
464
- success=success,
465
- strategy_used=strategy_used,
466
- response_status=response_status,
467
- response_time_ms=response_time_ms,
468
- error_message=error_message,
469
- )
470
-
471
- return hooks.post_task(context, outcome)
1
+ """
2
+ ALMA Victor Integration.
3
+
4
+ Victor-specific integration for backend/API QA testing with ALMA memory.
5
+
6
+ Victor specializes in:
7
+ - API endpoint testing
8
+ - Database validation
9
+ - Performance testing
10
+ - Health check verification
11
+ - Authentication/authorization testing
12
+
13
+ This module provides Victor-specific memory categories, prompts, and utilities.
14
+ """
15
+
16
+ import logging
17
+ from dataclasses import dataclass, field
18
+ from typing import Any, Dict, List, Optional
19
+
20
+ from alma.core import ALMA
21
+ from alma.harness.domains import CodingDomain
22
+ from alma.integration.claude_agents import (
23
+ AgentType,
24
+ ClaudeAgentHooks,
25
+ TaskContext,
26
+ TaskOutcome,
27
+ )
28
+ from alma.types import MemorySlice
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # Victor's learning categories
34
+ VICTOR_CATEGORIES = [
35
+ "api_design_patterns",
36
+ "authentication_patterns",
37
+ "error_handling",
38
+ "performance_optimization",
39
+ "database_query_patterns",
40
+ "caching_strategies",
41
+ ]
42
+
43
+ # Categories Victor should NOT learn
44
+ VICTOR_FORBIDDEN = [
45
+ "frontend_styling",
46
+ "ui_testing",
47
+ "marketing_content",
48
+ ]
49
+
50
+
51
+ @dataclass
52
+ class APITestContext(TaskContext):
53
+ """
54
+ Victor-specific test context.
55
+
56
+ Extends TaskContext with API/backend testing-specific fields.
57
+ """
58
+
59
+ endpoint: Optional[str] = None
60
+ method: str = "GET"
61
+ expected_status: Optional[int] = None
62
+ request_body: Optional[Dict[str, Any]] = None
63
+ headers: Dict[str, str] = field(default_factory=dict)
64
+ is_auth_test: bool = False
65
+ is_performance_test: bool = False
66
+ is_database_test: bool = False
67
+
68
+ def __post_init__(self):
69
+ # Ensure agent name is victor
70
+ self.agent_name = "victor"
71
+ # Set default task type if not specified
72
+ if not self.task_type:
73
+ self.task_type = self._infer_task_type()
74
+
75
+ def _infer_task_type(self) -> str:
76
+ """Infer task type from context."""
77
+ if self.is_auth_test:
78
+ return "authentication_patterns"
79
+ if self.is_performance_test:
80
+ return "performance_optimization"
81
+ if self.is_database_test:
82
+ return "database_query_patterns"
83
+ if self.endpoint:
84
+ return "api_design_patterns"
85
+ return "api_testing"
86
+
87
+
88
+ @dataclass
89
+ class APITestOutcome(TaskOutcome):
90
+ """
91
+ Victor-specific test outcome.
92
+
93
+ Extends TaskOutcome with API/backend testing-specific results.
94
+ """
95
+
96
+ response_status: Optional[int] = None
97
+ response_time_ms: Optional[int] = None
98
+ response_body: Optional[Dict[str, Any]] = None
99
+ database_queries_count: int = 0
100
+ cache_hits: int = 0
101
+ cache_misses: int = 0
102
+ validation_errors: List[str] = field(default_factory=list)
103
+ performance_metrics: Dict[str, float] = field(default_factory=dict)
104
+
105
+ def __post_init__(self):
106
+ # Use response_time_ms as duration if not set
107
+ if self.duration_ms is None and self.response_time_ms is not None:
108
+ self.duration_ms = self.response_time_ms
109
+
110
+
111
+ class VictorHooks(ClaudeAgentHooks):
112
+ """
113
+ Victor-specific integration hooks.
114
+
115
+ Extends ClaudeAgentHooks with backend/API testing-specific functionality.
116
+ """
117
+
118
+ def __init__(self, alma: ALMA, auto_learn: bool = True):
119
+ """
120
+ Initialize Victor hooks.
121
+
122
+ Args:
123
+ alma: ALMA instance
124
+ auto_learn: Whether to automatically learn from outcomes
125
+ """
126
+ harness = CodingDomain.create_victor(alma)
127
+ super().__init__(
128
+ alma=alma,
129
+ agent_type=AgentType.VICTOR,
130
+ harness=harness,
131
+ auto_learn=auto_learn,
132
+ )
133
+
134
+ def get_api_patterns(
135
+ self,
136
+ endpoint_type: str,
137
+ top_k: int = 5,
138
+ ) -> List[Dict[str, Any]]:
139
+ """
140
+ Get proven API patterns for an endpoint type.
141
+
142
+ Args:
143
+ endpoint_type: Type of endpoint (CRUD, auth, search, etc.)
144
+ top_k: Maximum patterns to return
145
+
146
+ Returns:
147
+ List of API patterns with success rates
148
+ """
149
+ memories = self.alma.retrieve(
150
+ task=f"API patterns for {endpoint_type} endpoints",
151
+ agent=self.agent_name,
152
+ top_k=top_k,
153
+ )
154
+
155
+ patterns = []
156
+ for h in memories.heuristics:
157
+ if "api" in h.condition.lower() or "endpoint" in h.condition.lower():
158
+ patterns.append(
159
+ {
160
+ "pattern": h.strategy,
161
+ "condition": h.condition,
162
+ "confidence": h.confidence,
163
+ "occurrences": h.occurrence_count,
164
+ }
165
+ )
166
+
167
+ return patterns
168
+
169
+ def get_error_handling_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
170
+ """
171
+ Get proven error handling patterns.
172
+
173
+ Returns strategies for handling API errors, validation, and responses.
174
+ """
175
+ memories = self.alma.retrieve(
176
+ task="error handling patterns validation responses",
177
+ agent=self.agent_name,
178
+ top_k=top_k,
179
+ )
180
+
181
+ patterns = []
182
+ for h in memories.heuristics:
183
+ if any(
184
+ kw in h.condition.lower() for kw in ["error", "exception", "validation"]
185
+ ):
186
+ patterns.append(
187
+ {
188
+ "condition": h.condition,
189
+ "strategy": h.strategy,
190
+ "confidence": h.confidence,
191
+ }
192
+ )
193
+
194
+ return patterns
195
+
196
+ def get_performance_strategies(self, top_k: int = 5) -> List[Dict[str, Any]]:
197
+ """
198
+ Get performance optimization strategies.
199
+
200
+ Returns strategies for caching, query optimization, etc.
201
+ """
202
+ memories = self.alma.retrieve(
203
+ task="performance optimization caching query database",
204
+ agent=self.agent_name,
205
+ top_k=top_k,
206
+ )
207
+
208
+ strategies = []
209
+ for h in memories.heuristics:
210
+ if any(
211
+ kw in h.condition.lower()
212
+ for kw in ["performance", "cache", "query", "slow"]
213
+ ):
214
+ strategies.append(
215
+ {
216
+ "condition": h.condition,
217
+ "strategy": h.strategy,
218
+ "confidence": h.confidence,
219
+ }
220
+ )
221
+
222
+ for dk in memories.domain_knowledge:
223
+ if dk.domain in ["performance_optimization", "caching_strategies"]:
224
+ strategies.append(
225
+ {
226
+ "fact": dk.fact,
227
+ "source": dk.source,
228
+ }
229
+ )
230
+
231
+ return strategies
232
+
233
+ def get_auth_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
234
+ """
235
+ Get authentication/authorization testing patterns.
236
+
237
+ Returns patterns for testing auth flows, tokens, permissions.
238
+ """
239
+ memories = self.alma.retrieve(
240
+ task="authentication authorization JWT token permission testing",
241
+ agent=self.agent_name,
242
+ top_k=top_k,
243
+ )
244
+
245
+ patterns = []
246
+ for h in memories.heuristics:
247
+ if any(
248
+ kw in h.condition.lower()
249
+ for kw in ["auth", "token", "permission", "jwt"]
250
+ ):
251
+ patterns.append(
252
+ {
253
+ "condition": h.condition,
254
+ "strategy": h.strategy,
255
+ "confidence": h.confidence,
256
+ }
257
+ )
258
+
259
+ return patterns
260
+
261
+ def record_api_pattern(
262
+ self,
263
+ endpoint: str,
264
+ method: str,
265
+ pattern_type: str,
266
+ description: str,
267
+ success: bool,
268
+ ) -> bool:
269
+ """
270
+ Record an API pattern for learning.
271
+
272
+ Args:
273
+ endpoint: API endpoint tested
274
+ method: HTTP method
275
+ pattern_type: Type of pattern (error_handling, validation, etc.)
276
+ description: Pattern description
277
+ success: Whether the pattern worked
278
+
279
+ Returns:
280
+ True if recorded successfully
281
+ """
282
+ fact = f"[{method}] {endpoint}: {description}"
283
+ return self.add_knowledge(
284
+ domain=pattern_type,
285
+ fact=fact,
286
+ source=f"api_test:success={success}",
287
+ )
288
+
289
+ def record_performance_metric(
290
+ self,
291
+ endpoint: str,
292
+ response_time_ms: int,
293
+ query_count: int,
294
+ threshold_ms: int = 500,
295
+ ) -> bool:
296
+ """
297
+ Record a performance metric for learning.
298
+
299
+ Args:
300
+ endpoint: API endpoint tested
301
+ response_time_ms: Response time in milliseconds
302
+ query_count: Number of database queries
303
+ threshold_ms: Performance threshold
304
+
305
+ Returns:
306
+ True if recorded successfully
307
+ """
308
+ is_slow = response_time_ms > threshold_ms
309
+ status = "SLOW" if is_slow else "OK"
310
+ fact = f"{endpoint}: {response_time_ms}ms, {query_count} queries [{status}]"
311
+
312
+ return self.add_knowledge(
313
+ domain="performance_optimization",
314
+ fact=fact,
315
+ source=f"performance_test:threshold={threshold_ms}ms",
316
+ )
317
+
318
+ def format_api_test_prompt(
319
+ self,
320
+ memories: MemorySlice,
321
+ test_context: APITestContext,
322
+ ) -> str:
323
+ """
324
+ Format memories for Victor's API testing prompt.
325
+
326
+ Provides Victor-specific formatting with test context.
327
+ """
328
+ sections = []
329
+
330
+ # Base memory formatting
331
+ base_format = self.format_memories_for_prompt(memories)
332
+ if base_format:
333
+ sections.append(base_format)
334
+
335
+ # Add test context
336
+ sections.append("\n## Current Test Context")
337
+ sections.append(f"- **Task**: {test_context.task_description}")
338
+ sections.append(f"- **Task Type**: {test_context.task_type}")
339
+
340
+ if test_context.endpoint:
341
+ sections.append(
342
+ f"- **Endpoint**: {test_context.method} {test_context.endpoint}"
343
+ )
344
+ if test_context.expected_status:
345
+ sections.append(f"- **Expected Status**: {test_context.expected_status}")
346
+ if test_context.request_body:
347
+ sections.append(
348
+ f"- **Request Body**: {len(test_context.request_body)} fields"
349
+ )
350
+
351
+ if test_context.is_auth_test:
352
+ sections.append("- **Focus**: Authentication/Authorization")
353
+ if test_context.is_performance_test:
354
+ sections.append("- **Focus**: Performance testing")
355
+ if test_context.is_database_test:
356
+ sections.append("- **Focus**: Database validation")
357
+
358
+ return "\n".join(sections)
359
+
360
+
361
+ def create_victor_hooks(alma: ALMA, auto_learn: bool = True) -> VictorHooks:
362
+ """
363
+ Convenience function to create Victor hooks.
364
+
365
+ Args:
366
+ alma: ALMA instance
367
+ auto_learn: Whether to automatically learn
368
+
369
+ Returns:
370
+ Configured VictorHooks
371
+ """
372
+ return VictorHooks(alma=alma, auto_learn=auto_learn)
373
+
374
+
375
+ def victor_pre_task(
376
+ alma: ALMA,
377
+ task: str,
378
+ endpoint: Optional[str] = None,
379
+ method: str = "GET",
380
+ project_id: str = "default",
381
+ top_k: int = 5,
382
+ ) -> Dict[str, Any]:
383
+ """
384
+ Convenience function for Victor pre-task hook.
385
+
386
+ Quick integration without creating full hooks object.
387
+
388
+ Args:
389
+ alma: ALMA instance
390
+ task: Task description
391
+ endpoint: Optional API endpoint
392
+ method: HTTP method
393
+ project_id: Project ID
394
+ top_k: Max memories per type
395
+
396
+ Returns:
397
+ Dict with memories and formatted prompt
398
+ """
399
+ hooks = VictorHooks(alma=alma, auto_learn=False)
400
+
401
+ context = APITestContext(
402
+ task_description=task,
403
+ task_type="", # Will be inferred
404
+ agent_name="victor",
405
+ project_id=project_id,
406
+ endpoint=endpoint,
407
+ method=method,
408
+ )
409
+
410
+ memories = hooks.pre_task(context, top_k=top_k)
411
+ prompt = hooks.format_api_test_prompt(memories, context)
412
+
413
+ return {
414
+ "memories": memories,
415
+ "prompt": prompt,
416
+ "context": context,
417
+ }
418
+
419
+
420
+ def victor_post_task(
421
+ alma: ALMA,
422
+ task: str,
423
+ success: bool,
424
+ strategy_used: str,
425
+ endpoint: Optional[str] = None,
426
+ method: str = "GET",
427
+ response_status: Optional[int] = None,
428
+ response_time_ms: Optional[int] = None,
429
+ project_id: str = "default",
430
+ error_message: Optional[str] = None,
431
+ ) -> bool:
432
+ """
433
+ Convenience function for Victor post-task hook.
434
+
435
+ Quick integration without creating full hooks object.
436
+
437
+ Args:
438
+ alma: ALMA instance
439
+ task: Task description
440
+ success: Whether task succeeded
441
+ strategy_used: Strategy used
442
+ endpoint: API endpoint tested
443
+ method: HTTP method
444
+ response_status: HTTP response status
445
+ response_time_ms: Response time
446
+ project_id: Project ID
447
+ error_message: Error if failed
448
+
449
+ Returns:
450
+ True if learning was recorded
451
+ """
452
+ hooks = VictorHooks(alma=alma, auto_learn=True)
453
+
454
+ context = APITestContext(
455
+ task_description=task,
456
+ task_type="", # Will be inferred
457
+ agent_name="victor",
458
+ project_id=project_id,
459
+ endpoint=endpoint,
460
+ method=method,
461
+ )
462
+
463
+ outcome = APITestOutcome(
464
+ success=success,
465
+ strategy_used=strategy_used,
466
+ response_status=response_status,
467
+ response_time_ms=response_time_ms,
468
+ error_message=error_message,
469
+ )
470
+
471
+ return hooks.post_task(context, outcome)