alma-memory 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alma/__init__.py +75 -0
- alma/config/__init__.py +5 -0
- alma/config/loader.py +156 -0
- alma/core.py +322 -0
- alma/harness/__init__.py +35 -0
- alma/harness/base.py +377 -0
- alma/harness/domains.py +689 -0
- alma/integration/__init__.py +62 -0
- alma/integration/claude_agents.py +432 -0
- alma/integration/helena.py +413 -0
- alma/integration/victor.py +447 -0
- alma/learning/__init__.py +86 -0
- alma/learning/forgetting.py +1396 -0
- alma/learning/heuristic_extractor.py +374 -0
- alma/learning/protocols.py +326 -0
- alma/learning/validation.py +341 -0
- alma/mcp/__init__.py +45 -0
- alma/mcp/__main__.py +155 -0
- alma/mcp/resources.py +121 -0
- alma/mcp/server.py +533 -0
- alma/mcp/tools.py +374 -0
- alma/retrieval/__init__.py +53 -0
- alma/retrieval/cache.py +1062 -0
- alma/retrieval/embeddings.py +202 -0
- alma/retrieval/engine.py +287 -0
- alma/retrieval/scoring.py +334 -0
- alma/storage/__init__.py +20 -0
- alma/storage/azure_cosmos.py +972 -0
- alma/storage/base.py +372 -0
- alma/storage/file_based.py +583 -0
- alma/storage/sqlite_local.py +912 -0
- alma/types.py +216 -0
- alma_memory-0.2.0.dist-info/METADATA +327 -0
- alma_memory-0.2.0.dist-info/RECORD +36 -0
- alma_memory-0.2.0.dist-info/WHEEL +5 -0
- alma_memory-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ALMA Agent Integration.
|
|
3
|
+
|
|
4
|
+
Provides integration hooks for Claude Code agents (Helena, Victor, etc).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from alma.integration.claude_agents import (
|
|
8
|
+
AgentType,
|
|
9
|
+
TaskContext,
|
|
10
|
+
TaskOutcome,
|
|
11
|
+
ClaudeAgentHooks,
|
|
12
|
+
AgentIntegration,
|
|
13
|
+
create_integration,
|
|
14
|
+
)
|
|
15
|
+
from alma.integration.helena import (
|
|
16
|
+
UITestContext,
|
|
17
|
+
UITestOutcome,
|
|
18
|
+
HelenaHooks,
|
|
19
|
+
create_helena_hooks,
|
|
20
|
+
helena_pre_task,
|
|
21
|
+
helena_post_task,
|
|
22
|
+
HELENA_CATEGORIES,
|
|
23
|
+
HELENA_FORBIDDEN,
|
|
24
|
+
)
|
|
25
|
+
from alma.integration.victor import (
|
|
26
|
+
APITestContext,
|
|
27
|
+
APITestOutcome,
|
|
28
|
+
VictorHooks,
|
|
29
|
+
create_victor_hooks,
|
|
30
|
+
victor_pre_task,
|
|
31
|
+
victor_post_task,
|
|
32
|
+
VICTOR_CATEGORIES,
|
|
33
|
+
VICTOR_FORBIDDEN,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
__all__ = [
|
|
37
|
+
# Core Integration
|
|
38
|
+
"AgentType",
|
|
39
|
+
"TaskContext",
|
|
40
|
+
"TaskOutcome",
|
|
41
|
+
"ClaudeAgentHooks",
|
|
42
|
+
"AgentIntegration",
|
|
43
|
+
"create_integration",
|
|
44
|
+
# Helena Integration
|
|
45
|
+
"UITestContext",
|
|
46
|
+
"UITestOutcome",
|
|
47
|
+
"HelenaHooks",
|
|
48
|
+
"create_helena_hooks",
|
|
49
|
+
"helena_pre_task",
|
|
50
|
+
"helena_post_task",
|
|
51
|
+
"HELENA_CATEGORIES",
|
|
52
|
+
"HELENA_FORBIDDEN",
|
|
53
|
+
# Victor Integration
|
|
54
|
+
"APITestContext",
|
|
55
|
+
"APITestOutcome",
|
|
56
|
+
"VictorHooks",
|
|
57
|
+
"create_victor_hooks",
|
|
58
|
+
"victor_pre_task",
|
|
59
|
+
"victor_post_task",
|
|
60
|
+
"VICTOR_CATEGORIES",
|
|
61
|
+
"VICTOR_FORBIDDEN",
|
|
62
|
+
]
|
|
@@ -0,0 +1,432 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ALMA Claude Code Integration.
|
|
3
|
+
|
|
4
|
+
Provides hooks for integrating ALMA with Claude Code agents (Helena, Victor, etc).
|
|
5
|
+
These hooks enable agents to:
|
|
6
|
+
- Retrieve relevant memories before executing tasks
|
|
7
|
+
- Learn from task outcomes automatically
|
|
8
|
+
- Access domain-specific heuristics and patterns
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
from typing import Optional, Dict, Any, List
|
|
13
|
+
from dataclasses import dataclass, field
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from enum import Enum
|
|
16
|
+
|
|
17
|
+
from alma.core import ALMA
|
|
18
|
+
from alma.types import MemorySlice
|
|
19
|
+
from alma.harness.base import Harness, Context, RunResult
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AgentType(Enum):
|
|
25
|
+
"""Supported Claude Code agent types."""
|
|
26
|
+
HELENA = "helena"
|
|
27
|
+
VICTOR = "victor"
|
|
28
|
+
CLARA = "clara"
|
|
29
|
+
ALEX = "alex"
|
|
30
|
+
CUSTOM = "custom"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class TaskContext:
|
|
35
|
+
"""
|
|
36
|
+
Context for a Claude Code agent task.
|
|
37
|
+
|
|
38
|
+
Captures all relevant information for memory retrieval and learning.
|
|
39
|
+
"""
|
|
40
|
+
task_description: str
|
|
41
|
+
task_type: str
|
|
42
|
+
agent_name: str
|
|
43
|
+
project_id: str
|
|
44
|
+
user_id: Optional[str] = None
|
|
45
|
+
session_id: Optional[str] = None
|
|
46
|
+
inputs: Dict[str, Any] = field(default_factory=dict)
|
|
47
|
+
constraints: List[str] = field(default_factory=list)
|
|
48
|
+
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
49
|
+
|
|
50
|
+
def to_harness_context(self) -> Context:
|
|
51
|
+
"""Convert to Harness Context."""
|
|
52
|
+
return Context(
|
|
53
|
+
task=self.task_description,
|
|
54
|
+
user_id=self.user_id,
|
|
55
|
+
project_id=self.project_id,
|
|
56
|
+
session_id=self.session_id,
|
|
57
|
+
inputs=self.inputs,
|
|
58
|
+
constraints=self.constraints,
|
|
59
|
+
timestamp=self.timestamp,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class TaskOutcome:
|
|
65
|
+
"""
|
|
66
|
+
Outcome of a Claude Code agent task.
|
|
67
|
+
|
|
68
|
+
Used for learning from task results.
|
|
69
|
+
"""
|
|
70
|
+
success: bool
|
|
71
|
+
strategy_used: str
|
|
72
|
+
output: Any = None
|
|
73
|
+
tools_used: List[str] = field(default_factory=list)
|
|
74
|
+
duration_ms: Optional[int] = None
|
|
75
|
+
error_message: Optional[str] = None
|
|
76
|
+
feedback: Optional[str] = None
|
|
77
|
+
reflections: List[str] = field(default_factory=list)
|
|
78
|
+
|
|
79
|
+
def to_run_result(self) -> RunResult:
|
|
80
|
+
"""Convert to Harness RunResult."""
|
|
81
|
+
return RunResult(
|
|
82
|
+
success=self.success,
|
|
83
|
+
output=self.output,
|
|
84
|
+
reflections=self.reflections,
|
|
85
|
+
tools_used=self.tools_used,
|
|
86
|
+
duration_ms=self.duration_ms,
|
|
87
|
+
error=self.error_message,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class ClaudeAgentHooks:
|
|
92
|
+
"""
|
|
93
|
+
Integration hooks for Claude Code agents.
|
|
94
|
+
|
|
95
|
+
Provides a consistent interface for agents to interact with ALMA.
|
|
96
|
+
|
|
97
|
+
Usage:
|
|
98
|
+
hooks = ClaudeAgentHooks(alma, agent_type=AgentType.HELENA)
|
|
99
|
+
|
|
100
|
+
# Before task execution
|
|
101
|
+
memories = hooks.pre_task(task_context)
|
|
102
|
+
prompt_enhancement = hooks.format_memories_for_prompt(memories)
|
|
103
|
+
|
|
104
|
+
# After task execution
|
|
105
|
+
hooks.post_task(task_context, task_outcome)
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def __init__(
|
|
109
|
+
self,
|
|
110
|
+
alma: ALMA,
|
|
111
|
+
agent_type: AgentType,
|
|
112
|
+
harness: Optional[Harness] = None,
|
|
113
|
+
auto_learn: bool = True,
|
|
114
|
+
):
|
|
115
|
+
"""
|
|
116
|
+
Initialize hooks for an agent.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
alma: ALMA instance for memory operations
|
|
120
|
+
agent_type: Type of Claude Code agent
|
|
121
|
+
harness: Optional pre-configured harness
|
|
122
|
+
auto_learn: Whether to automatically learn from outcomes
|
|
123
|
+
"""
|
|
124
|
+
self.alma = alma
|
|
125
|
+
self.agent_type = agent_type
|
|
126
|
+
self.agent_name = agent_type.value
|
|
127
|
+
self.harness = harness
|
|
128
|
+
self.auto_learn = auto_learn
|
|
129
|
+
self._task_start_times: Dict[str, datetime] = {}
|
|
130
|
+
|
|
131
|
+
def pre_task(
|
|
132
|
+
self,
|
|
133
|
+
context: TaskContext,
|
|
134
|
+
top_k: int = 5,
|
|
135
|
+
) -> MemorySlice:
|
|
136
|
+
"""
|
|
137
|
+
Pre-task hook: Retrieve relevant memories.
|
|
138
|
+
|
|
139
|
+
Called before the agent executes a task to get relevant
|
|
140
|
+
heuristics, patterns, and domain knowledge.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
context: Task context
|
|
144
|
+
top_k: Maximum items per memory type
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
MemorySlice with relevant memories
|
|
148
|
+
"""
|
|
149
|
+
# Track start time for duration calculation
|
|
150
|
+
task_id = f"{context.project_id}:{context.timestamp.isoformat()}"
|
|
151
|
+
self._task_start_times[task_id] = datetime.now(timezone.utc)
|
|
152
|
+
|
|
153
|
+
logger.debug(
|
|
154
|
+
f"[{self.agent_name}] Pre-task: Retrieving memories for '{context.task_description[:50]}...'"
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
if self.harness:
|
|
158
|
+
# Use harness pre_run method
|
|
159
|
+
harness_context = context.to_harness_context()
|
|
160
|
+
return self.harness.pre_run(harness_context)
|
|
161
|
+
else:
|
|
162
|
+
# Direct ALMA retrieval
|
|
163
|
+
return self.alma.retrieve(
|
|
164
|
+
task=context.task_description,
|
|
165
|
+
agent=self.agent_name,
|
|
166
|
+
user_id=context.user_id,
|
|
167
|
+
top_k=top_k,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
def post_task(
|
|
171
|
+
self,
|
|
172
|
+
context: TaskContext,
|
|
173
|
+
outcome: TaskOutcome,
|
|
174
|
+
) -> bool:
|
|
175
|
+
"""
|
|
176
|
+
Post-task hook: Learn from the outcome.
|
|
177
|
+
|
|
178
|
+
Called after the agent completes a task to record the outcome
|
|
179
|
+
and potentially update heuristics.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
context: Original task context
|
|
183
|
+
outcome: Task outcome
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
True if learning was recorded, False otherwise
|
|
187
|
+
"""
|
|
188
|
+
if not self.auto_learn:
|
|
189
|
+
logger.debug(f"[{self.agent_name}] Auto-learn disabled, skipping")
|
|
190
|
+
return False
|
|
191
|
+
|
|
192
|
+
# Calculate duration if not provided
|
|
193
|
+
task_id = f"{context.project_id}:{context.timestamp.isoformat()}"
|
|
194
|
+
if outcome.duration_ms is None and task_id in self._task_start_times:
|
|
195
|
+
start = self._task_start_times.pop(task_id)
|
|
196
|
+
outcome.duration_ms = int(
|
|
197
|
+
(datetime.now(timezone.utc) - start).total_seconds() * 1000
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
logger.debug(
|
|
201
|
+
f"[{self.agent_name}] Post-task: Recording {'success' if outcome.success else 'failure'} "
|
|
202
|
+
f"for '{context.task_description[:50]}...'"
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
if self.harness:
|
|
206
|
+
# Use harness post_run method
|
|
207
|
+
harness_context = context.to_harness_context()
|
|
208
|
+
run_result = outcome.to_run_result()
|
|
209
|
+
self.harness.post_run(harness_context, run_result)
|
|
210
|
+
return True
|
|
211
|
+
else:
|
|
212
|
+
# Direct ALMA learning
|
|
213
|
+
return self.alma.learn(
|
|
214
|
+
agent=self.agent_name,
|
|
215
|
+
task=context.task_description,
|
|
216
|
+
outcome="success" if outcome.success else "failure",
|
|
217
|
+
strategy_used=outcome.strategy_used,
|
|
218
|
+
task_type=context.task_type,
|
|
219
|
+
duration_ms=outcome.duration_ms,
|
|
220
|
+
error_message=outcome.error_message,
|
|
221
|
+
feedback=outcome.feedback,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
def format_memories_for_prompt(
|
|
225
|
+
self,
|
|
226
|
+
memories: MemorySlice,
|
|
227
|
+
include_section_headers: bool = True,
|
|
228
|
+
) -> str:
|
|
229
|
+
"""
|
|
230
|
+
Format memories for injection into agent prompt.
|
|
231
|
+
|
|
232
|
+
Converts MemorySlice into a formatted string suitable for
|
|
233
|
+
inclusion in the agent's system prompt.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
memories: Retrieved memory slice
|
|
237
|
+
include_section_headers: Whether to include markdown headers
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Formatted string for prompt injection
|
|
241
|
+
"""
|
|
242
|
+
if memories.total_items == 0:
|
|
243
|
+
return ""
|
|
244
|
+
|
|
245
|
+
sections = []
|
|
246
|
+
|
|
247
|
+
if include_section_headers:
|
|
248
|
+
sections.append("## Relevant Memory (from past runs)")
|
|
249
|
+
|
|
250
|
+
# Heuristics
|
|
251
|
+
if memories.heuristics:
|
|
252
|
+
if include_section_headers:
|
|
253
|
+
sections.append("\n### Proven Strategies:")
|
|
254
|
+
for h in memories.heuristics:
|
|
255
|
+
confidence_pct = int(h.confidence * 100)
|
|
256
|
+
sections.append(
|
|
257
|
+
f"- **{h.condition}**: {h.strategy} (confidence: {confidence_pct}%)"
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Anti-patterns
|
|
261
|
+
if memories.anti_patterns:
|
|
262
|
+
if include_section_headers:
|
|
263
|
+
sections.append("\n### Avoid These:")
|
|
264
|
+
for ap in memories.anti_patterns:
|
|
265
|
+
sections.append(f"- ⚠️ {ap.pattern}: {ap.consequence}")
|
|
266
|
+
|
|
267
|
+
# Domain knowledge
|
|
268
|
+
if memories.domain_knowledge:
|
|
269
|
+
if include_section_headers:
|
|
270
|
+
sections.append("\n### Domain Knowledge:")
|
|
271
|
+
for dk in memories.domain_knowledge:
|
|
272
|
+
sections.append(f"- [{dk.domain}] {dk.fact}")
|
|
273
|
+
|
|
274
|
+
# User preferences
|
|
275
|
+
if memories.user_preferences:
|
|
276
|
+
if include_section_headers:
|
|
277
|
+
sections.append("\n### User Preferences:")
|
|
278
|
+
for up in memories.user_preferences:
|
|
279
|
+
sections.append(f"- [{up.category}] {up.preference}")
|
|
280
|
+
|
|
281
|
+
# Recent outcomes
|
|
282
|
+
if memories.recent_outcomes:
|
|
283
|
+
if include_section_headers:
|
|
284
|
+
sections.append("\n### Recent Outcomes:")
|
|
285
|
+
for o in memories.recent_outcomes[:3]: # Limit to 3 most recent
|
|
286
|
+
status = "✓" if o.success else "✗"
|
|
287
|
+
sections.append(
|
|
288
|
+
f"- {status} {o.task_type}: {o.task_description[:50]}..."
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
return "\n".join(sections)
|
|
292
|
+
|
|
293
|
+
def get_agent_stats(self) -> Dict[str, Any]:
|
|
294
|
+
"""Get memory statistics for this agent."""
|
|
295
|
+
return self.alma.get_stats(agent=self.agent_name)
|
|
296
|
+
|
|
297
|
+
def add_knowledge(
|
|
298
|
+
self,
|
|
299
|
+
domain: str,
|
|
300
|
+
fact: str,
|
|
301
|
+
source: str = "agent_discovered",
|
|
302
|
+
) -> bool:
|
|
303
|
+
"""
|
|
304
|
+
Add domain knowledge discovered by the agent.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
domain: Knowledge domain (must be within agent's scope)
|
|
308
|
+
fact: The fact to remember
|
|
309
|
+
source: How this was discovered
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
True if knowledge was added, False if scope violation
|
|
313
|
+
"""
|
|
314
|
+
result = self.alma.add_domain_knowledge(
|
|
315
|
+
agent=self.agent_name,
|
|
316
|
+
domain=domain,
|
|
317
|
+
fact=fact,
|
|
318
|
+
source=source,
|
|
319
|
+
)
|
|
320
|
+
return result is not None
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
class AgentIntegration:
|
|
324
|
+
"""
|
|
325
|
+
High-level integration manager for multiple Claude Code agents.
|
|
326
|
+
|
|
327
|
+
Manages hooks for all registered agents and provides a unified
|
|
328
|
+
interface for the Claude Code runtime.
|
|
329
|
+
|
|
330
|
+
Usage:
|
|
331
|
+
integration = AgentIntegration(alma)
|
|
332
|
+
integration.register_agent(AgentType.HELENA, helena_harness)
|
|
333
|
+
integration.register_agent(AgentType.VICTOR, victor_harness)
|
|
334
|
+
|
|
335
|
+
# Get hooks for a specific agent
|
|
336
|
+
helena_hooks = integration.get_hooks("helena")
|
|
337
|
+
"""
|
|
338
|
+
|
|
339
|
+
def __init__(self, alma: ALMA):
|
|
340
|
+
"""
|
|
341
|
+
Initialize the integration manager.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
alma: ALMA instance for memory operations
|
|
345
|
+
"""
|
|
346
|
+
self.alma = alma
|
|
347
|
+
self._agents: Dict[str, ClaudeAgentHooks] = {}
|
|
348
|
+
|
|
349
|
+
def register_agent(
|
|
350
|
+
self,
|
|
351
|
+
agent_type: AgentType,
|
|
352
|
+
harness: Optional[Harness] = None,
|
|
353
|
+
auto_learn: bool = True,
|
|
354
|
+
) -> ClaudeAgentHooks:
|
|
355
|
+
"""
|
|
356
|
+
Register an agent for integration.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
agent_type: Type of agent
|
|
360
|
+
harness: Optional pre-configured harness
|
|
361
|
+
auto_learn: Whether to automatically learn from outcomes
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
ClaudeAgentHooks for the agent
|
|
365
|
+
"""
|
|
366
|
+
hooks = ClaudeAgentHooks(
|
|
367
|
+
alma=self.alma,
|
|
368
|
+
agent_type=agent_type,
|
|
369
|
+
harness=harness,
|
|
370
|
+
auto_learn=auto_learn,
|
|
371
|
+
)
|
|
372
|
+
self._agents[agent_type.value] = hooks
|
|
373
|
+
logger.info(f"Registered agent: {agent_type.value}")
|
|
374
|
+
return hooks
|
|
375
|
+
|
|
376
|
+
def get_hooks(self, agent_name: str) -> Optional[ClaudeAgentHooks]:
|
|
377
|
+
"""
|
|
378
|
+
Get hooks for a specific agent.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
agent_name: Name of the agent
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
ClaudeAgentHooks or None if not registered
|
|
385
|
+
"""
|
|
386
|
+
return self._agents.get(agent_name)
|
|
387
|
+
|
|
388
|
+
def list_agents(self) -> List[str]:
|
|
389
|
+
"""List all registered agents."""
|
|
390
|
+
return list(self._agents.keys())
|
|
391
|
+
|
|
392
|
+
def get_all_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
393
|
+
"""Get memory statistics for all registered agents."""
|
|
394
|
+
return {
|
|
395
|
+
name: hooks.get_agent_stats()
|
|
396
|
+
for name, hooks in self._agents.items()
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
def create_integration(
|
|
401
|
+
alma: ALMA,
|
|
402
|
+
agents: Optional[List[AgentType]] = None,
|
|
403
|
+
) -> AgentIntegration:
|
|
404
|
+
"""
|
|
405
|
+
Convenience function to create an integration with default agents.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
alma: ALMA instance
|
|
409
|
+
agents: List of agents to register, or None for defaults (Helena, Victor)
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Configured AgentIntegration
|
|
413
|
+
"""
|
|
414
|
+
from alma.harness.domains import CodingDomain
|
|
415
|
+
|
|
416
|
+
integration = AgentIntegration(alma)
|
|
417
|
+
|
|
418
|
+
if agents is None:
|
|
419
|
+
agents = [AgentType.HELENA, AgentType.VICTOR]
|
|
420
|
+
|
|
421
|
+
for agent_type in agents:
|
|
422
|
+
if agent_type == AgentType.HELENA:
|
|
423
|
+
harness = CodingDomain.create_helena(alma)
|
|
424
|
+
integration.register_agent(agent_type, harness)
|
|
425
|
+
elif agent_type == AgentType.VICTOR:
|
|
426
|
+
harness = CodingDomain.create_victor(alma)
|
|
427
|
+
integration.register_agent(agent_type, harness)
|
|
428
|
+
else:
|
|
429
|
+
# Register without harness for custom agents
|
|
430
|
+
integration.register_agent(agent_type)
|
|
431
|
+
|
|
432
|
+
return integration
|