alma-memory 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,413 @@
1
+ """
2
+ ALMA Helena Integration.
3
+
4
+ Helena-specific integration for frontend QA testing with ALMA memory.
5
+
6
+ Helena specializes in:
7
+ - Playwright automation
8
+ - UI/UX testing
9
+ - Accessibility validation
10
+ - Visual regression testing
11
+ - Form testing patterns
12
+
13
+ This module provides Helena-specific memory categories, prompts, and utilities.
14
+ """
15
+
16
+ import logging
17
+ from typing import Optional, Dict, Any, List
18
+ from dataclasses import dataclass, field
19
+
20
+ from alma.core import ALMA
21
+ from alma.types import MemorySlice
22
+ from alma.harness.domains import CodingDomain
23
+ from alma.integration.claude_agents import (
24
+ ClaudeAgentHooks,
25
+ TaskContext,
26
+ TaskOutcome,
27
+ AgentType,
28
+ )
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ # Helena's learning categories
34
+ HELENA_CATEGORIES = [
35
+ "testing_strategies",
36
+ "selector_patterns",
37
+ "ui_component_patterns",
38
+ "form_testing",
39
+ "accessibility_testing",
40
+ ]
41
+
42
+ # Categories Helena should NOT learn
43
+ HELENA_FORBIDDEN = [
44
+ "backend_logic",
45
+ "database_queries",
46
+ "api_design",
47
+ "infrastructure",
48
+ ]
49
+
50
+
51
+ @dataclass
52
+ class UITestContext(TaskContext):
53
+ """
54
+ Helena-specific test context.
55
+
56
+ Extends TaskContext with UI testing-specific fields.
57
+ """
58
+ component_type: Optional[str] = None
59
+ page_url: Optional[str] = None
60
+ viewport: Optional[Dict[str, int]] = None
61
+ browser: str = "chromium"
62
+ is_accessibility_test: bool = False
63
+ is_visual_test: bool = False
64
+
65
+ def __post_init__(self):
66
+ # Ensure agent name is helena
67
+ self.agent_name = "helena"
68
+ # Set default task type if not specified
69
+ if not self.task_type:
70
+ self.task_type = self._infer_task_type()
71
+
72
+ def _infer_task_type(self) -> str:
73
+ """Infer task type from context."""
74
+ if self.is_accessibility_test:
75
+ return "accessibility_testing"
76
+ if self.is_visual_test:
77
+ return "visual_testing"
78
+ if self.component_type:
79
+ if "form" in self.component_type.lower():
80
+ return "form_testing"
81
+ return "ui_component_patterns"
82
+ return "testing_strategies"
83
+
84
+
85
+ @dataclass
86
+ class UITestOutcome(TaskOutcome):
87
+ """
88
+ Helena-specific test outcome.
89
+
90
+ Extends TaskOutcome with UI testing-specific results.
91
+ """
92
+ selectors_used: List[str] = field(default_factory=list)
93
+ accessibility_issues: List[Dict[str, Any]] = field(default_factory=list)
94
+ visual_diffs: List[str] = field(default_factory=list)
95
+ flaky_elements: List[str] = field(default_factory=list)
96
+ screenshots: List[str] = field(default_factory=list)
97
+
98
+ def __post_init__(self):
99
+ # Add selectors to tools_used
100
+ if self.selectors_used and not self.tools_used:
101
+ self.tools_used = self.selectors_used
102
+
103
+
104
+ class HelenaHooks(ClaudeAgentHooks):
105
+ """
106
+ Helena-specific integration hooks.
107
+
108
+ Extends ClaudeAgentHooks with UI testing-specific functionality.
109
+ """
110
+
111
+ def __init__(self, alma: ALMA, auto_learn: bool = True):
112
+ """
113
+ Initialize Helena hooks.
114
+
115
+ Args:
116
+ alma: ALMA instance
117
+ auto_learn: Whether to automatically learn from outcomes
118
+ """
119
+ harness = CodingDomain.create_helena(alma)
120
+ super().__init__(
121
+ alma=alma,
122
+ agent_type=AgentType.HELENA,
123
+ harness=harness,
124
+ auto_learn=auto_learn,
125
+ )
126
+
127
+ def get_selector_patterns(
128
+ self,
129
+ component_type: str,
130
+ top_k: int = 5,
131
+ ) -> List[Dict[str, Any]]:
132
+ """
133
+ Get proven selector patterns for a component type.
134
+
135
+ Args:
136
+ component_type: Type of UI component (button, input, modal, etc.)
137
+ top_k: Maximum patterns to return
138
+
139
+ Returns:
140
+ List of selector patterns with success rates
141
+ """
142
+ memories = self.alma.retrieve(
143
+ task=f"selector patterns for {component_type}",
144
+ agent=self.agent_name,
145
+ top_k=top_k,
146
+ )
147
+
148
+ patterns = []
149
+ for h in memories.heuristics:
150
+ if "selector" in h.condition.lower():
151
+ patterns.append({
152
+ "pattern": h.strategy,
153
+ "confidence": h.confidence,
154
+ "occurrences": h.occurrence_count,
155
+ })
156
+
157
+ return patterns
158
+
159
+ def get_form_testing_strategies(self, top_k: int = 5) -> List[Dict[str, Any]]:
160
+ """
161
+ Get proven form testing strategies.
162
+
163
+ Returns strategies for testing forms, validation, and submissions.
164
+ """
165
+ memories = self.alma.retrieve(
166
+ task="form testing validation submit strategies",
167
+ agent=self.agent_name,
168
+ top_k=top_k,
169
+ )
170
+
171
+ strategies = []
172
+ for h in memories.heuristics:
173
+ if any(kw in h.condition.lower() for kw in ["form", "validation", "input"]):
174
+ strategies.append({
175
+ "condition": h.condition,
176
+ "strategy": h.strategy,
177
+ "confidence": h.confidence,
178
+ })
179
+
180
+ return strategies
181
+
182
+ def get_accessibility_patterns(self, top_k: int = 5) -> List[Dict[str, Any]]:
183
+ """
184
+ Get accessibility testing patterns.
185
+
186
+ Returns patterns for ARIA, keyboard navigation, screen readers.
187
+ """
188
+ memories = self.alma.retrieve(
189
+ task="accessibility ARIA keyboard screen reader testing",
190
+ agent=self.agent_name,
191
+ top_k=top_k,
192
+ )
193
+
194
+ patterns = []
195
+ for h in memories.heuristics:
196
+ if any(kw in h.condition.lower() for kw in ["access", "aria", "keyboard"]):
197
+ patterns.append({
198
+ "condition": h.condition,
199
+ "strategy": h.strategy,
200
+ "confidence": h.confidence,
201
+ })
202
+
203
+ for dk in memories.domain_knowledge:
204
+ if dk.domain == "accessibility_testing":
205
+ patterns.append({
206
+ "fact": dk.fact,
207
+ "source": dk.source,
208
+ })
209
+
210
+ return patterns
211
+
212
+ def record_selector_pattern(
213
+ self,
214
+ selector: str,
215
+ component_type: str,
216
+ success: bool,
217
+ stability_score: float = 1.0,
218
+ ) -> bool:
219
+ """
220
+ Record a selector pattern for learning.
221
+
222
+ Args:
223
+ selector: The CSS/XPath selector used
224
+ component_type: Type of component targeted
225
+ success: Whether the selector worked
226
+ stability_score: How stable the selector is (0-1)
227
+
228
+ Returns:
229
+ True if recorded successfully
230
+ """
231
+ # Add as domain knowledge with stability metadata
232
+ description = f"Selector '{selector}' for {component_type}"
233
+ if stability_score < 0.7:
234
+ description += " (may be flaky)"
235
+
236
+ return self.add_knowledge(
237
+ domain="selector_patterns",
238
+ fact=description,
239
+ source=f"test_run:stability={stability_score:.2f}",
240
+ )
241
+
242
+ def record_accessibility_issue(
243
+ self,
244
+ issue_type: str,
245
+ element: str,
246
+ fix_suggestion: str,
247
+ ) -> bool:
248
+ """
249
+ Record an accessibility issue found.
250
+
251
+ Args:
252
+ issue_type: Type of issue (ARIA, contrast, keyboard, etc.)
253
+ element: Element with the issue
254
+ fix_suggestion: How to fix it
255
+
256
+ Returns:
257
+ True if recorded successfully
258
+ """
259
+ fact = f"Issue: {issue_type} on {element}. Fix: {fix_suggestion}"
260
+ return self.add_knowledge(
261
+ domain="accessibility_testing",
262
+ fact=fact,
263
+ source="accessibility_audit",
264
+ )
265
+
266
+ def format_ui_test_prompt(
267
+ self,
268
+ memories: MemorySlice,
269
+ test_context: UITestContext,
270
+ ) -> str:
271
+ """
272
+ Format memories for Helena's UI testing prompt.
273
+
274
+ Provides Helena-specific formatting with test context.
275
+ """
276
+ sections = []
277
+
278
+ # Base memory formatting
279
+ base_format = self.format_memories_for_prompt(memories)
280
+ if base_format:
281
+ sections.append(base_format)
282
+
283
+ # Add test context
284
+ sections.append("\n## Current Test Context")
285
+ sections.append(f"- **Task**: {test_context.task_description}")
286
+ sections.append(f"- **Task Type**: {test_context.task_type}")
287
+
288
+ if test_context.component_type:
289
+ sections.append(f"- **Component**: {test_context.component_type}")
290
+ if test_context.page_url:
291
+ sections.append(f"- **Page**: {test_context.page_url}")
292
+ if test_context.viewport:
293
+ sections.append(
294
+ f"- **Viewport**: {test_context.viewport.get('width', '?')}x"
295
+ f"{test_context.viewport.get('height', '?')}"
296
+ )
297
+
298
+ if test_context.is_accessibility_test:
299
+ sections.append("- **Focus**: Accessibility validation")
300
+ if test_context.is_visual_test:
301
+ sections.append("- **Focus**: Visual regression testing")
302
+
303
+ return "\n".join(sections)
304
+
305
+
306
+ def create_helena_hooks(alma: ALMA, auto_learn: bool = True) -> HelenaHooks:
307
+ """
308
+ Convenience function to create Helena hooks.
309
+
310
+ Args:
311
+ alma: ALMA instance
312
+ auto_learn: Whether to automatically learn
313
+
314
+ Returns:
315
+ Configured HelenaHooks
316
+ """
317
+ return HelenaHooks(alma=alma, auto_learn=auto_learn)
318
+
319
+
320
+ def helena_pre_task(
321
+ alma: ALMA,
322
+ task: str,
323
+ component_type: Optional[str] = None,
324
+ page_url: Optional[str] = None,
325
+ project_id: str = "default",
326
+ top_k: int = 5,
327
+ ) -> Dict[str, Any]:
328
+ """
329
+ Convenience function for Helena pre-task hook.
330
+
331
+ Quick integration without creating full hooks object.
332
+
333
+ Args:
334
+ alma: ALMA instance
335
+ task: Task description
336
+ component_type: Optional component type
337
+ page_url: Optional page URL
338
+ project_id: Project ID
339
+ top_k: Max memories per type
340
+
341
+ Returns:
342
+ Dict with memories and formatted prompt
343
+ """
344
+ hooks = HelenaHooks(alma=alma, auto_learn=False)
345
+
346
+ context = UITestContext(
347
+ task_description=task,
348
+ task_type="", # Will be inferred
349
+ agent_name="helena",
350
+ project_id=project_id,
351
+ component_type=component_type,
352
+ page_url=page_url,
353
+ )
354
+
355
+ memories = hooks.pre_task(context, top_k=top_k)
356
+ prompt = hooks.format_ui_test_prompt(memories, context)
357
+
358
+ return {
359
+ "memories": memories,
360
+ "prompt": prompt,
361
+ "context": context,
362
+ }
363
+
364
+
365
+ def helena_post_task(
366
+ alma: ALMA,
367
+ task: str,
368
+ success: bool,
369
+ strategy_used: str,
370
+ selectors_used: Optional[List[str]] = None,
371
+ accessibility_issues: Optional[List[Dict[str, Any]]] = None,
372
+ project_id: str = "default",
373
+ duration_ms: Optional[int] = None,
374
+ error_message: Optional[str] = None,
375
+ ) -> bool:
376
+ """
377
+ Convenience function for Helena post-task hook.
378
+
379
+ Quick integration without creating full hooks object.
380
+
381
+ Args:
382
+ alma: ALMA instance
383
+ task: Task description
384
+ success: Whether task succeeded
385
+ strategy_used: Strategy used
386
+ selectors_used: Selectors used during test
387
+ accessibility_issues: Any accessibility issues found
388
+ project_id: Project ID
389
+ duration_ms: Task duration
390
+ error_message: Error if failed
391
+
392
+ Returns:
393
+ True if learning was recorded
394
+ """
395
+ hooks = HelenaHooks(alma=alma, auto_learn=True)
396
+
397
+ context = UITestContext(
398
+ task_description=task,
399
+ task_type="", # Will be inferred
400
+ agent_name="helena",
401
+ project_id=project_id,
402
+ )
403
+
404
+ outcome = UITestOutcome(
405
+ success=success,
406
+ strategy_used=strategy_used,
407
+ selectors_used=selectors_used or [],
408
+ accessibility_issues=accessibility_issues or [],
409
+ duration_ms=duration_ms,
410
+ error_message=error_message,
411
+ )
412
+
413
+ return hooks.post_task(context, outcome)