alma-memory 0.5.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -194
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -322
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -264
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -81
  26. alma/graph/backends/__init__.py +32 -18
  27. alma/graph/backends/kuzu.py +624 -0
  28. alma/graph/backends/memgraph.py +432 -0
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -432
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -511
  55. alma/observability/__init__.py +91 -0
  56. alma/observability/config.py +302 -0
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -0
  59. alma/observability/metrics.py +583 -0
  60. alma/observability/tracing.py +440 -0
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -366
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -61
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1048
  80. alma/storage/base.py +1083 -525
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -0
  83. alma/storage/file_based.py +614 -619
  84. alma/storage/migrations/__init__.py +21 -0
  85. alma/storage/migrations/base.py +321 -0
  86. alma/storage/migrations/runner.py +323 -0
  87. alma/storage/migrations/version_stores.py +337 -0
  88. alma/storage/migrations/versions/__init__.py +11 -0
  89. alma/storage/migrations/versions/v1_0_0.py +373 -0
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1452
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1358
  95. alma/testing/__init__.py +46 -0
  96. alma/testing/factories.py +301 -0
  97. alma/testing/mocks.py +389 -0
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/METADATA +244 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.0.dist-info/RECORD +0 -76
  110. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.0.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
@@ -1,423 +1,423 @@
1
- """
2
- ALMA Helena Integration.
3
-
4
- Helena-specific integration for frontend QA testing with ALMA memory.
5
-
6
- Helena specializes in:
7
- - Playwright automation
8
- - UI/UX testing
9
- - Accessibility validation
10
- - Visual regression testing
11
- - Form testing patterns
12
-
13
- This module provides Helena-specific memory categories, prompts, and utilities.
14
- """
15
-
16
- import logging
17
- from dataclasses import dataclass, field
18
- from typing import Any, Dict, List, Optional
19
-
20
- from alma.core import ALMA
21
- from alma.harness.domains import CodingDomain
22
- from alma.integration.claude_agents import (
23
- AgentType,
24
- ClaudeAgentHooks,
25
- TaskContext,
26
- TaskOutcome,
27
- )
28
- from alma.types import MemorySlice
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
-
33
- # Helena's learning categories
34
- HELENA_CATEGORIES = [
35
- "testing_strategies",
36
- "selector_patterns",
37
- "ui_component_patterns",
38
- "form_testing",
39
- "accessibility_testing",
40
- ]
41
-
42
- # Categories Helena should NOT learn
43
- HELENA_FORBIDDEN = [
44
- "backend_logic",
45
- "database_queries",
46
- "api_design",
47
- "infrastructure",
48
- ]
49
-
50
-
51
- @dataclass
52
- class UITestContext(TaskContext):
53
- """
54
- Helena-specific test context.
55
-
56
- Extends TaskContext with UI testing-specific fields.
57
- """
58
-
59
- component_type: Optional[str] = None
60
- page_url: Optional[str] = None
61
- viewport: Optional[Dict[str, int]] = None
62
- browser: str = "chromium"
63
- is_accessibility_test: bool = False
64
- is_visual_test: bool = False
65
-
66
- def __post_init__(self):
67
- # Ensure agent name is helena
68
- self.agent_name = "helena"
69
- # Set default task type if not specified
70
- if not self.task_type:
71
- self.task_type = self._infer_task_type()
72
-
73
- def _infer_task_type(self) -> str:
74
- """Infer task type from context."""
75
- if self.is_accessibility_test:
76
- return "accessibility_testing"
77
- if self.is_visual_test:
78
- return "visual_testing"
79
- if self.component_type:
80
- if "form" in self.component_type.lower():
81
- return "form_testing"
82
- return "ui_component_patterns"
83
- return "testing_strategies"
84
-
85
-
86
- @dataclass
87
- class UITestOutcome(TaskOutcome):
88
- """
89
- Helena-specific test outcome.
90
-
91
- Extends TaskOutcome with UI testing-specific results.
92
- """
93
-
94
- selectors_used: List[str] = field(default_factory=list)
95
- accessibility_issues: List[Dict[str, Any]] = field(default_factory=list)
96
- visual_diffs: List[str] = field(default_factory=list)
97
- flaky_elements: List[str] = field(default_factory=list)
98
- screenshots: List[str] = field(default_factory=list)
99
-
100
- def __post_init__(self):
101
- # Add selectors to tools_used
102
- if self.selectors_used and not self.tools_used:
103
- self.tools_used = self.selectors_used
104
-
105
-
106
- class HelenaHooks(ClaudeAgentHooks):
107
- """
108
- Helena-specific integration hooks.
109
-
110
- Extends ClaudeAgentHooks with UI testing-specific functionality.
111
- """
112
-
113
- def __init__(self, alma: ALMA, auto_learn: bool = True):
114
- """
115
- Initialize Helena hooks.
116
-
117
- Args:
118
- alma: ALMA instance
119
- auto_learn: Whether to automatically learn from outcomes
120
- """
121
- harness = CodingDomain.create_helena(alma)
122
- super().__init__(
123
- alma=alma,
124
- agent_type=AgentType.HELENA,
125
- harness=harness,
126
- auto_learn=auto_learn,
127
- )
128
-
129
- def get_selector_patterns(
130
- self,
131
- component_type: str,
132
- top_k: int = 5,
133
- ) -> List[Dict[str, Any]]:
134
- """
135
- Get proven selector patterns for a component type.
136
-
137
- Args:
138
- component_type: Type of UI component (button, input, modal, etc.)
139
- top_k: Maximum patterns to return
140
-
141
- Returns:
142
- List of selector patterns with success rates
143
- """
144
- memories = self.alma.retrieve(
145
- task=f"selector patterns for {component_type}",
146
- agent=self.agent_name,
147
- top_k=top_k,
148
- )
149
-
150
- patterns = []
151
- for h in memories.heuristics:
152
- if "selector" in h.condition.lower():
153
- patterns.append(
154
- {
155
- "pattern": h.strategy,
156
- "confidence": h.confidence,
157
- "occurrences": h.occurrence_count,
158
- }
159
- )
160
-
161
- return patterns
162
-
163
- def get_form_testing_strategies(self, top_k: int = 5) -> List[Dict[str, Any]]:
164
- """
165
- Get proven form testing strategies.
166
-
167
- Returns strategies for testing forms, validation, and submissions.
168
- """
169
- memories = self.alma.retrieve(
170
- task="form testing validation submit strategies",
171
- agent=self.agent_name,
172
- top_k=top_k,
173
- )
174
-
175
- strategies = []
176
- for h in memories.heuristics:
177
- if any(kw in h.condition.lower() for kw in ["form", "validation", "input"]):
178
- strategies.append(
179
- {
180
- "condition": h.condition,
181
- "strategy": h.strategy,
182
- "confidence": h.confidence,
183
- }
184
- )
185
-
186
- return strategies
187
-
188
- def get_accessibility_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
189
- """
190
- Get accessibility testing patterns.
191
-
192
- Returns patterns for ARIA, keyboard navigation, screen readers.
193
- """
194
- memories = self.alma.retrieve(
195
- task="accessibility ARIA keyboard screen reader testing",
196
- agent=self.agent_name,
197
- top_k=top_k,
198
- )
199
-
200
- patterns = []
201
- for h in memories.heuristics:
202
- if any(kw in h.condition.lower() for kw in ["access", "aria", "keyboard"]):
203
- patterns.append(
204
- {
205
- "condition": h.condition,
206
- "strategy": h.strategy,
207
- "confidence": h.confidence,
208
- }
209
- )
210
-
211
- for dk in memories.domain_knowledge:
212
- if dk.domain == "accessibility_testing":
213
- patterns.append(
214
- {
215
- "fact": dk.fact,
216
- "source": dk.source,
217
- }
218
- )
219
-
220
- return patterns
221
-
222
- def record_selector_pattern(
223
- self,
224
- selector: str,
225
- component_type: str,
226
- success: bool,
227
- stability_score: float = 1.0,
228
- ) -> bool:
229
- """
230
- Record a selector pattern for learning.
231
-
232
- Args:
233
- selector: The CSS/XPath selector used
234
- component_type: Type of component targeted
235
- success: Whether the selector worked
236
- stability_score: How stable the selector is (0-1)
237
-
238
- Returns:
239
- True if recorded successfully
240
- """
241
- # Add as domain knowledge with stability metadata
242
- description = f"Selector '{selector}' for {component_type}"
243
- if stability_score < 0.7:
244
- description += " (may be flaky)"
245
-
246
- return self.add_knowledge(
247
- domain="selector_patterns",
248
- fact=description,
249
- source=f"test_run:stability={stability_score:.2f}",
250
- )
251
-
252
- def record_accessibility_issue(
253
- self,
254
- issue_type: str,
255
- element: str,
256
- fix_suggestion: str,
257
- ) -> bool:
258
- """
259
- Record an accessibility issue found.
260
-
261
- Args:
262
- issue_type: Type of issue (ARIA, contrast, keyboard, etc.)
263
- element: Element with the issue
264
- fix_suggestion: How to fix it
265
-
266
- Returns:
267
- True if recorded successfully
268
- """
269
- fact = f"Issue: {issue_type} on {element}. Fix: {fix_suggestion}"
270
- return self.add_knowledge(
271
- domain="accessibility_testing",
272
- fact=fact,
273
- source="accessibility_audit",
274
- )
275
-
276
- def format_ui_test_prompt(
277
- self,
278
- memories: MemorySlice,
279
- test_context: UITestContext,
280
- ) -> str:
281
- """
282
- Format memories for Helena's UI testing prompt.
283
-
284
- Provides Helena-specific formatting with test context.
285
- """
286
- sections = []
287
-
288
- # Base memory formatting
289
- base_format = self.format_memories_for_prompt(memories)
290
- if base_format:
291
- sections.append(base_format)
292
-
293
- # Add test context
294
- sections.append("\n## Current Test Context")
295
- sections.append(f"- **Task**: {test_context.task_description}")
296
- sections.append(f"- **Task Type**: {test_context.task_type}")
297
-
298
- if test_context.component_type:
299
- sections.append(f"- **Component**: {test_context.component_type}")
300
- if test_context.page_url:
301
- sections.append(f"- **Page**: {test_context.page_url}")
302
- if test_context.viewport:
303
- sections.append(
304
- f"- **Viewport**: {test_context.viewport.get('width', '?')}x"
305
- f"{test_context.viewport.get('height', '?')}"
306
- )
307
-
308
- if test_context.is_accessibility_test:
309
- sections.append("- **Focus**: Accessibility validation")
310
- if test_context.is_visual_test:
311
- sections.append("- **Focus**: Visual regression testing")
312
-
313
- return "\n".join(sections)
314
-
315
-
316
- def create_helena_hooks(alma: ALMA, auto_learn: bool = True) -> HelenaHooks:
317
- """
318
- Convenience function to create Helena hooks.
319
-
320
- Args:
321
- alma: ALMA instance
322
- auto_learn: Whether to automatically learn
323
-
324
- Returns:
325
- Configured HelenaHooks
326
- """
327
- return HelenaHooks(alma=alma, auto_learn=auto_learn)
328
-
329
-
330
- def helena_pre_task(
331
- alma: ALMA,
332
- task: str,
333
- component_type: Optional[str] = None,
334
- page_url: Optional[str] = None,
335
- project_id: str = "default",
336
- top_k: int = 5,
337
- ) -> Dict[str, Any]:
338
- """
339
- Convenience function for Helena pre-task hook.
340
-
341
- Quick integration without creating full hooks object.
342
-
343
- Args:
344
- alma: ALMA instance
345
- task: Task description
346
- component_type: Optional component type
347
- page_url: Optional page URL
348
- project_id: Project ID
349
- top_k: Max memories per type
350
-
351
- Returns:
352
- Dict with memories and formatted prompt
353
- """
354
- hooks = HelenaHooks(alma=alma, auto_learn=False)
355
-
356
- context = UITestContext(
357
- task_description=task,
358
- task_type="", # Will be inferred
359
- agent_name="helena",
360
- project_id=project_id,
361
- component_type=component_type,
362
- page_url=page_url,
363
- )
364
-
365
- memories = hooks.pre_task(context, top_k=top_k)
366
- prompt = hooks.format_ui_test_prompt(memories, context)
367
-
368
- return {
369
- "memories": memories,
370
- "prompt": prompt,
371
- "context": context,
372
- }
373
-
374
-
375
- def helena_post_task(
376
- alma: ALMA,
377
- task: str,
378
- success: bool,
379
- strategy_used: str,
380
- selectors_used: Optional[List[str]] = None,
381
- accessibility_issues: Optional[List[Dict[str, Any]]] = None,
382
- project_id: str = "default",
383
- duration_ms: Optional[int] = None,
384
- error_message: Optional[str] = None,
385
- ) -> bool:
386
- """
387
- Convenience function for Helena post-task hook.
388
-
389
- Quick integration without creating full hooks object.
390
-
391
- Args:
392
- alma: ALMA instance
393
- task: Task description
394
- success: Whether task succeeded
395
- strategy_used: Strategy used
396
- selectors_used: Selectors used during test
397
- accessibility_issues: Any accessibility issues found
398
- project_id: Project ID
399
- duration_ms: Task duration
400
- error_message: Error if failed
401
-
402
- Returns:
403
- True if learning was recorded
404
- """
405
- hooks = HelenaHooks(alma=alma, auto_learn=True)
406
-
407
- context = UITestContext(
408
- task_description=task,
409
- task_type="", # Will be inferred
410
- agent_name="helena",
411
- project_id=project_id,
412
- )
413
-
414
- outcome = UITestOutcome(
415
- success=success,
416
- strategy_used=strategy_used,
417
- selectors_used=selectors_used or [],
418
- accessibility_issues=accessibility_issues or [],
419
- duration_ms=duration_ms,
420
- error_message=error_message,
421
- )
422
-
423
- return hooks.post_task(context, outcome)
1
+ """
2
+ ALMA Helena Integration.
3
+
4
+ Helena-specific integration for frontend QA testing with ALMA memory.
5
+
6
+ Helena specializes in:
7
+ - Playwright automation
8
+ - UI/UX testing
9
+ - Accessibility validation
10
+ - Visual regression testing
11
+ - Form testing patterns
12
+
13
+ This module provides Helena-specific memory categories, prompts, and utilities.
14
+ """
15
+
16
+ import logging
17
+ from dataclasses import dataclass, field
18
+ from typing import Any, Dict, List, Optional
19
+
20
+ from alma.core import ALMA
21
+ from alma.harness.domains import CodingDomain
22
+ from alma.integration.claude_agents import (
23
+ AgentType,
24
+ ClaudeAgentHooks,
25
+ TaskContext,
26
+ TaskOutcome,
27
+ )
28
+ from alma.types import MemorySlice
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # Helena's learning categories
34
+ HELENA_CATEGORIES = [
35
+ "testing_strategies",
36
+ "selector_patterns",
37
+ "ui_component_patterns",
38
+ "form_testing",
39
+ "accessibility_testing",
40
+ ]
41
+
42
+ # Categories Helena should NOT learn
43
+ HELENA_FORBIDDEN = [
44
+ "backend_logic",
45
+ "database_queries",
46
+ "api_design",
47
+ "infrastructure",
48
+ ]
49
+
50
+
51
+ @dataclass
52
+ class UITestContext(TaskContext):
53
+ """
54
+ Helena-specific test context.
55
+
56
+ Extends TaskContext with UI testing-specific fields.
57
+ """
58
+
59
+ component_type: Optional[str] = None
60
+ page_url: Optional[str] = None
61
+ viewport: Optional[Dict[str, int]] = None
62
+ browser: str = "chromium"
63
+ is_accessibility_test: bool = False
64
+ is_visual_test: bool = False
65
+
66
+ def __post_init__(self):
67
+ # Ensure agent name is helena
68
+ self.agent_name = "helena"
69
+ # Set default task type if not specified
70
+ if not self.task_type:
71
+ self.task_type = self._infer_task_type()
72
+
73
+ def _infer_task_type(self) -> str:
74
+ """Infer task type from context."""
75
+ if self.is_accessibility_test:
76
+ return "accessibility_testing"
77
+ if self.is_visual_test:
78
+ return "visual_testing"
79
+ if self.component_type:
80
+ if "form" in self.component_type.lower():
81
+ return "form_testing"
82
+ return "ui_component_patterns"
83
+ return "testing_strategies"
84
+
85
+
86
+ @dataclass
87
+ class UITestOutcome(TaskOutcome):
88
+ """
89
+ Helena-specific test outcome.
90
+
91
+ Extends TaskOutcome with UI testing-specific results.
92
+ """
93
+
94
+ selectors_used: List[str] = field(default_factory=list)
95
+ accessibility_issues: List[Dict[str, Any]] = field(default_factory=list)
96
+ visual_diffs: List[str] = field(default_factory=list)
97
+ flaky_elements: List[str] = field(default_factory=list)
98
+ screenshots: List[str] = field(default_factory=list)
99
+
100
+ def __post_init__(self):
101
+ # Add selectors to tools_used
102
+ if self.selectors_used and not self.tools_used:
103
+ self.tools_used = self.selectors_used
104
+
105
+
106
+ class HelenaHooks(ClaudeAgentHooks):
107
+ """
108
+ Helena-specific integration hooks.
109
+
110
+ Extends ClaudeAgentHooks with UI testing-specific functionality.
111
+ """
112
+
113
+ def __init__(self, alma: ALMA, auto_learn: bool = True):
114
+ """
115
+ Initialize Helena hooks.
116
+
117
+ Args:
118
+ alma: ALMA instance
119
+ auto_learn: Whether to automatically learn from outcomes
120
+ """
121
+ harness = CodingDomain.create_helena(alma)
122
+ super().__init__(
123
+ alma=alma,
124
+ agent_type=AgentType.HELENA,
125
+ harness=harness,
126
+ auto_learn=auto_learn,
127
+ )
128
+
129
+ def get_selector_patterns(
130
+ self,
131
+ component_type: str,
132
+ top_k: int = 5,
133
+ ) -> List[Dict[str, Any]]:
134
+ """
135
+ Get proven selector patterns for a component type.
136
+
137
+ Args:
138
+ component_type: Type of UI component (button, input, modal, etc.)
139
+ top_k: Maximum patterns to return
140
+
141
+ Returns:
142
+ List of selector patterns with success rates
143
+ """
144
+ memories = self.alma.retrieve(
145
+ task=f"selector patterns for {component_type}",
146
+ agent=self.agent_name,
147
+ top_k=top_k,
148
+ )
149
+
150
+ patterns = []
151
+ for h in memories.heuristics:
152
+ if "selector" in h.condition.lower():
153
+ patterns.append(
154
+ {
155
+ "pattern": h.strategy,
156
+ "confidence": h.confidence,
157
+ "occurrences": h.occurrence_count,
158
+ }
159
+ )
160
+
161
+ return patterns
162
+
163
+ def get_form_testing_strategies(self, top_k: int = 5) -> List[Dict[str, Any]]:
164
+ """
165
+ Get proven form testing strategies.
166
+
167
+ Returns strategies for testing forms, validation, and submissions.
168
+ """
169
+ memories = self.alma.retrieve(
170
+ task="form testing validation submit strategies",
171
+ agent=self.agent_name,
172
+ top_k=top_k,
173
+ )
174
+
175
+ strategies = []
176
+ for h in memories.heuristics:
177
+ if any(kw in h.condition.lower() for kw in ["form", "validation", "input"]):
178
+ strategies.append(
179
+ {
180
+ "condition": h.condition,
181
+ "strategy": h.strategy,
182
+ "confidence": h.confidence,
183
+ }
184
+ )
185
+
186
+ return strategies
187
+
188
+ def get_accessibility_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
189
+ """
190
+ Get accessibility testing patterns.
191
+
192
+ Returns patterns for ARIA, keyboard navigation, screen readers.
193
+ """
194
+ memories = self.alma.retrieve(
195
+ task="accessibility ARIA keyboard screen reader testing",
196
+ agent=self.agent_name,
197
+ top_k=top_k,
198
+ )
199
+
200
+ patterns = []
201
+ for h in memories.heuristics:
202
+ if any(kw in h.condition.lower() for kw in ["access", "aria", "keyboard"]):
203
+ patterns.append(
204
+ {
205
+ "condition": h.condition,
206
+ "strategy": h.strategy,
207
+ "confidence": h.confidence,
208
+ }
209
+ )
210
+
211
+ for dk in memories.domain_knowledge:
212
+ if dk.domain == "accessibility_testing":
213
+ patterns.append(
214
+ {
215
+ "fact": dk.fact,
216
+ "source": dk.source,
217
+ }
218
+ )
219
+
220
+ return patterns
221
+
222
+ def record_selector_pattern(
223
+ self,
224
+ selector: str,
225
+ component_type: str,
226
+ success: bool,
227
+ stability_score: float = 1.0,
228
+ ) -> bool:
229
+ """
230
+ Record a selector pattern for learning.
231
+
232
+ Args:
233
+ selector: The CSS/XPath selector used
234
+ component_type: Type of component targeted
235
+ success: Whether the selector worked
236
+ stability_score: How stable the selector is (0-1)
237
+
238
+ Returns:
239
+ True if recorded successfully
240
+ """
241
+ # Add as domain knowledge with stability metadata
242
+ description = f"Selector '{selector}' for {component_type}"
243
+ if stability_score < 0.7:
244
+ description += " (may be flaky)"
245
+
246
+ return self.add_knowledge(
247
+ domain="selector_patterns",
248
+ fact=description,
249
+ source=f"test_run:stability={stability_score:.2f}",
250
+ )
251
+
252
+ def record_accessibility_issue(
253
+ self,
254
+ issue_type: str,
255
+ element: str,
256
+ fix_suggestion: str,
257
+ ) -> bool:
258
+ """
259
+ Record an accessibility issue found.
260
+
261
+ Args:
262
+ issue_type: Type of issue (ARIA, contrast, keyboard, etc.)
263
+ element: Element with the issue
264
+ fix_suggestion: How to fix it
265
+
266
+ Returns:
267
+ True if recorded successfully
268
+ """
269
+ fact = f"Issue: {issue_type} on {element}. Fix: {fix_suggestion}"
270
+ return self.add_knowledge(
271
+ domain="accessibility_testing",
272
+ fact=fact,
273
+ source="accessibility_audit",
274
+ )
275
+
276
+ def format_ui_test_prompt(
277
+ self,
278
+ memories: MemorySlice,
279
+ test_context: UITestContext,
280
+ ) -> str:
281
+ """
282
+ Format memories for Helena's UI testing prompt.
283
+
284
+ Provides Helena-specific formatting with test context.
285
+ """
286
+ sections = []
287
+
288
+ # Base memory formatting
289
+ base_format = self.format_memories_for_prompt(memories)
290
+ if base_format:
291
+ sections.append(base_format)
292
+
293
+ # Add test context
294
+ sections.append("\n## Current Test Context")
295
+ sections.append(f"- **Task**: {test_context.task_description}")
296
+ sections.append(f"- **Task Type**: {test_context.task_type}")
297
+
298
+ if test_context.component_type:
299
+ sections.append(f"- **Component**: {test_context.component_type}")
300
+ if test_context.page_url:
301
+ sections.append(f"- **Page**: {test_context.page_url}")
302
+ if test_context.viewport:
303
+ sections.append(
304
+ f"- **Viewport**: {test_context.viewport.get('width', '?')}x"
305
+ f"{test_context.viewport.get('height', '?')}"
306
+ )
307
+
308
+ if test_context.is_accessibility_test:
309
+ sections.append("- **Focus**: Accessibility validation")
310
+ if test_context.is_visual_test:
311
+ sections.append("- **Focus**: Visual regression testing")
312
+
313
+ return "\n".join(sections)
314
+
315
+
316
+ def create_helena_hooks(alma: ALMA, auto_learn: bool = True) -> HelenaHooks:
317
+ """
318
+ Convenience function to create Helena hooks.
319
+
320
+ Args:
321
+ alma: ALMA instance
322
+ auto_learn: Whether to automatically learn
323
+
324
+ Returns:
325
+ Configured HelenaHooks
326
+ """
327
+ return HelenaHooks(alma=alma, auto_learn=auto_learn)
328
+
329
+
330
+ def helena_pre_task(
331
+ alma: ALMA,
332
+ task: str,
333
+ component_type: Optional[str] = None,
334
+ page_url: Optional[str] = None,
335
+ project_id: str = "default",
336
+ top_k: int = 5,
337
+ ) -> Dict[str, Any]:
338
+ """
339
+ Convenience function for Helena pre-task hook.
340
+
341
+ Quick integration without creating full hooks object.
342
+
343
+ Args:
344
+ alma: ALMA instance
345
+ task: Task description
346
+ component_type: Optional component type
347
+ page_url: Optional page URL
348
+ project_id: Project ID
349
+ top_k: Max memories per type
350
+
351
+ Returns:
352
+ Dict with memories and formatted prompt
353
+ """
354
+ hooks = HelenaHooks(alma=alma, auto_learn=False)
355
+
356
+ context = UITestContext(
357
+ task_description=task,
358
+ task_type="", # Will be inferred
359
+ agent_name="helena",
360
+ project_id=project_id,
361
+ component_type=component_type,
362
+ page_url=page_url,
363
+ )
364
+
365
+ memories = hooks.pre_task(context, top_k=top_k)
366
+ prompt = hooks.format_ui_test_prompt(memories, context)
367
+
368
+ return {
369
+ "memories": memories,
370
+ "prompt": prompt,
371
+ "context": context,
372
+ }
373
+
374
+
375
+ def helena_post_task(
376
+ alma: ALMA,
377
+ task: str,
378
+ success: bool,
379
+ strategy_used: str,
380
+ selectors_used: Optional[List[str]] = None,
381
+ accessibility_issues: Optional[List[Dict[str, Any]]] = None,
382
+ project_id: str = "default",
383
+ duration_ms: Optional[int] = None,
384
+ error_message: Optional[str] = None,
385
+ ) -> bool:
386
+ """
387
+ Convenience function for Helena post-task hook.
388
+
389
+ Quick integration without creating full hooks object.
390
+
391
+ Args:
392
+ alma: ALMA instance
393
+ task: Task description
394
+ success: Whether task succeeded
395
+ strategy_used: Strategy used
396
+ selectors_used: Selectors used during test
397
+ accessibility_issues: Any accessibility issues found
398
+ project_id: Project ID
399
+ duration_ms: Task duration
400
+ error_message: Error if failed
401
+
402
+ Returns:
403
+ True if learning was recorded
404
+ """
405
+ hooks = HelenaHooks(alma=alma, auto_learn=True)
406
+
407
+ context = UITestContext(
408
+ task_description=task,
409
+ task_type="", # Will be inferred
410
+ agent_name="helena",
411
+ project_id=project_id,
412
+ )
413
+
414
+ outcome = UITestOutcome(
415
+ success=success,
416
+ strategy_used=strategy_used,
417
+ selectors_used=selectors_used or [],
418
+ accessibility_issues=accessibility_issues or [],
419
+ duration_ms=duration_ms,
420
+ error_message=error_message,
421
+ )
422
+
423
+ return hooks.post_task(context, outcome)