alma-memory 0.5.1__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. alma/__init__.py +296 -226
  2. alma/compression/__init__.py +33 -0
  3. alma/compression/pipeline.py +980 -0
  4. alma/confidence/__init__.py +47 -47
  5. alma/confidence/engine.py +540 -540
  6. alma/confidence/types.py +351 -351
  7. alma/config/loader.py +157 -157
  8. alma/consolidation/__init__.py +23 -23
  9. alma/consolidation/engine.py +678 -678
  10. alma/consolidation/prompts.py +84 -84
  11. alma/core.py +1189 -430
  12. alma/domains/__init__.py +30 -30
  13. alma/domains/factory.py +359 -359
  14. alma/domains/schemas.py +448 -448
  15. alma/domains/types.py +272 -272
  16. alma/events/__init__.py +75 -75
  17. alma/events/emitter.py +285 -284
  18. alma/events/storage_mixin.py +246 -246
  19. alma/events/types.py +126 -126
  20. alma/events/webhook.py +425 -425
  21. alma/exceptions.py +49 -49
  22. alma/extraction/__init__.py +31 -31
  23. alma/extraction/auto_learner.py +265 -265
  24. alma/extraction/extractor.py +420 -420
  25. alma/graph/__init__.py +106 -106
  26. alma/graph/backends/__init__.py +32 -32
  27. alma/graph/backends/kuzu.py +624 -624
  28. alma/graph/backends/memgraph.py +432 -432
  29. alma/graph/backends/memory.py +236 -236
  30. alma/graph/backends/neo4j.py +417 -417
  31. alma/graph/base.py +159 -159
  32. alma/graph/extraction.py +198 -198
  33. alma/graph/store.py +860 -860
  34. alma/harness/__init__.py +35 -35
  35. alma/harness/base.py +386 -386
  36. alma/harness/domains.py +705 -705
  37. alma/initializer/__init__.py +37 -37
  38. alma/initializer/initializer.py +418 -418
  39. alma/initializer/types.py +250 -250
  40. alma/integration/__init__.py +62 -62
  41. alma/integration/claude_agents.py +444 -444
  42. alma/integration/helena.py +423 -423
  43. alma/integration/victor.py +471 -471
  44. alma/learning/__init__.py +101 -86
  45. alma/learning/decay.py +878 -0
  46. alma/learning/forgetting.py +1446 -1446
  47. alma/learning/heuristic_extractor.py +390 -390
  48. alma/learning/protocols.py +374 -374
  49. alma/learning/validation.py +346 -346
  50. alma/mcp/__init__.py +123 -45
  51. alma/mcp/__main__.py +156 -156
  52. alma/mcp/resources.py +122 -122
  53. alma/mcp/server.py +955 -591
  54. alma/mcp/tools.py +3254 -509
  55. alma/observability/__init__.py +91 -84
  56. alma/observability/config.py +302 -302
  57. alma/observability/guidelines.py +170 -0
  58. alma/observability/logging.py +424 -424
  59. alma/observability/metrics.py +583 -583
  60. alma/observability/tracing.py +440 -440
  61. alma/progress/__init__.py +21 -21
  62. alma/progress/tracker.py +607 -607
  63. alma/progress/types.py +250 -250
  64. alma/retrieval/__init__.py +134 -53
  65. alma/retrieval/budget.py +525 -0
  66. alma/retrieval/cache.py +1304 -1061
  67. alma/retrieval/embeddings.py +202 -202
  68. alma/retrieval/engine.py +850 -427
  69. alma/retrieval/modes.py +365 -0
  70. alma/retrieval/progressive.py +560 -0
  71. alma/retrieval/scoring.py +344 -344
  72. alma/retrieval/trust_scoring.py +637 -0
  73. alma/retrieval/verification.py +797 -0
  74. alma/session/__init__.py +19 -19
  75. alma/session/manager.py +442 -399
  76. alma/session/types.py +288 -288
  77. alma/storage/__init__.py +101 -90
  78. alma/storage/archive.py +233 -0
  79. alma/storage/azure_cosmos.py +1259 -1259
  80. alma/storage/base.py +1083 -583
  81. alma/storage/chroma.py +1443 -1443
  82. alma/storage/constants.py +103 -103
  83. alma/storage/file_based.py +614 -614
  84. alma/storage/migrations/__init__.py +21 -21
  85. alma/storage/migrations/base.py +321 -321
  86. alma/storage/migrations/runner.py +323 -323
  87. alma/storage/migrations/version_stores.py +337 -337
  88. alma/storage/migrations/versions/__init__.py +11 -11
  89. alma/storage/migrations/versions/v1_0_0.py +373 -373
  90. alma/storage/migrations/versions/v1_1_0_workflow_context.py +551 -0
  91. alma/storage/pinecone.py +1080 -1080
  92. alma/storage/postgresql.py +1948 -1559
  93. alma/storage/qdrant.py +1306 -1306
  94. alma/storage/sqlite_local.py +3041 -1457
  95. alma/testing/__init__.py +46 -46
  96. alma/testing/factories.py +301 -301
  97. alma/testing/mocks.py +389 -389
  98. alma/types.py +292 -264
  99. alma/utils/__init__.py +19 -0
  100. alma/utils/tokenizer.py +521 -0
  101. alma/workflow/__init__.py +83 -0
  102. alma/workflow/artifacts.py +170 -0
  103. alma/workflow/checkpoint.py +311 -0
  104. alma/workflow/context.py +228 -0
  105. alma/workflow/outcomes.py +189 -0
  106. alma/workflow/reducers.py +393 -0
  107. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/METADATA +210 -72
  108. alma_memory-0.7.0.dist-info/RECORD +112 -0
  109. alma_memory-0.5.1.dist-info/RECORD +0 -93
  110. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/WHEEL +0 -0
  111. {alma_memory-0.5.1.dist-info → alma_memory-0.7.0.dist-info}/top_level.txt +0 -0
alma/harness/base.py CHANGED
@@ -1,386 +1,386 @@
1
- """
2
- ALMA Harness Pattern - Base Classes.
3
-
4
- The harness pattern decouples agent capabilities from domain, making any
5
- tool-using workflow able to "learn" over time through memory injections.
6
-
7
- Pattern Components:
8
- 1. Setting - Fixed environment (tools, constraints)
9
- 2. Context - Ephemeral per-run inputs
10
- 3. Agent - The executor that acts within setting+context
11
- 4. Memory Schema - Domain-specific structure for logging/retrieving learnings
12
-
13
- Flow:
14
- Pre-run -> Inject relevant memory slices
15
- Run -> Agent acts, uses tools, logs reflections
16
- Post-run -> Update memory schema
17
- Repeat -> Agent appears to "learn" without weight changes
18
- """
19
-
20
- from dataclasses import dataclass, field
21
- from datetime import datetime, timezone
22
- from enum import Enum
23
- from typing import Any, Callable, Dict, List, Optional
24
-
25
- from alma.types import MemoryScope, MemorySlice
26
-
27
-
28
- class ToolType(Enum):
29
- """Categories of tools available to agents."""
30
-
31
- SEARCH = "search" # Web search, semantic search
32
- DATA_ACCESS = "data_access" # APIs, databases
33
- EXECUTION = "execution" # Code execution, automation
34
- COMMUNICATION = "communication" # Email, messaging
35
- ANALYSIS = "analysis" # Data processing, synthesis
36
- CREATION = "creation" # Content generation, design
37
-
38
-
39
- @dataclass
40
- class Tool:
41
- """
42
- A tool available in the agent's setting.
43
-
44
- Tools are the building blocks agents use to accomplish tasks.
45
- """
46
-
47
- name: str
48
- description: str
49
- tool_type: ToolType
50
- parameters: Dict[str, Any] = field(default_factory=dict)
51
- constraints: List[str] = field(default_factory=list)
52
- examples: List[str] = field(default_factory=list)
53
-
54
- def to_prompt(self) -> str:
55
- """Format tool for inclusion in agent prompt."""
56
- prompt = f"**{self.name}** ({self.tool_type.value}): {self.description}"
57
- if self.constraints:
58
- prompt += f"\n Constraints: {', '.join(self.constraints)}"
59
- return prompt
60
-
61
-
62
- @dataclass
63
- class Setting:
64
- """
65
- The fixed environment in which an agent operates.
66
-
67
- Includes available tools and immutable constraints that don't change
68
- between runs. The setting defines WHAT the agent CAN do.
69
- """
70
-
71
- name: str
72
- description: str
73
- tools: List[Tool] = field(default_factory=list)
74
- global_constraints: List[str] = field(default_factory=list)
75
- environment_vars: Dict[str, str] = field(default_factory=dict)
76
-
77
- def to_prompt(self) -> str:
78
- """Format setting for inclusion in agent prompt."""
79
- lines = [
80
- f"## Setting: {self.name}",
81
- self.description,
82
- "",
83
- "### Available Tools:",
84
- ]
85
- for tool in self.tools:
86
- lines.append(f"- {tool.to_prompt()}")
87
-
88
- if self.global_constraints:
89
- lines.append("")
90
- lines.append("### Constraints:")
91
- for c in self.global_constraints:
92
- lines.append(f"- {c}")
93
-
94
- return "\n".join(lines)
95
-
96
-
97
- @dataclass
98
- class Context:
99
- """
100
- Ephemeral inputs for a single run.
101
-
102
- This is injected fresh each time and contains task-specific information.
103
- The context defines WHAT the agent should do THIS run.
104
- """
105
-
106
- task: str
107
- user_id: Optional[str] = None
108
- project_id: Optional[str] = None
109
- session_id: Optional[str] = None
110
- inputs: Dict[str, Any] = field(default_factory=dict)
111
- constraints: List[str] = field(default_factory=list)
112
- timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
113
-
114
- def to_prompt(self) -> str:
115
- """Format context for inclusion in agent prompt."""
116
- lines = [
117
- "## Current Task",
118
- self.task,
119
- ]
120
- if self.inputs:
121
- lines.append("")
122
- lines.append("### Inputs:")
123
- for k, v in self.inputs.items():
124
- lines.append(f"- {k}: {v}")
125
-
126
- if self.constraints:
127
- lines.append("")
128
- lines.append("### Task Constraints:")
129
- for c in self.constraints:
130
- lines.append(f"- {c}")
131
-
132
- return "\n".join(lines)
133
-
134
-
135
- @dataclass
136
- class MemorySchema:
137
- """
138
- Domain-specific structure for logging and retrieving learnings.
139
-
140
- This defines WHAT gets remembered and HOW, ensuring relevance
141
- and preventing scope creep. Each domain has its own schema.
142
- """
143
-
144
- domain: str
145
- description: str
146
-
147
- # What this domain can learn
148
- learnable_categories: List[str] = field(default_factory=list)
149
-
150
- # What this domain should NOT learn (prevents over-scoping)
151
- forbidden_categories: List[str] = field(default_factory=list)
152
-
153
- # Heuristic templates for this domain
154
- heuristic_templates: List[str] = field(default_factory=list)
155
-
156
- # Outcome fields to track
157
- outcome_fields: List[str] = field(default_factory=list)
158
-
159
- # Minimum occurrences before creating a heuristic
160
- min_occurrences: int = 3
161
-
162
- # Custom metadata schema
163
- metadata_schema: Dict[str, str] = field(default_factory=dict)
164
-
165
- def to_scope(self, agent_name: str) -> MemoryScope:
166
- """Convert to MemoryScope for ALMA integration."""
167
- return MemoryScope(
168
- agent_name=agent_name,
169
- can_learn=self.learnable_categories,
170
- cannot_learn=self.forbidden_categories,
171
- min_occurrences_for_heuristic=self.min_occurrences,
172
- )
173
-
174
- def to_prompt(self) -> str:
175
- """Format schema for documentation."""
176
- lines = [
177
- f"## Memory Schema: {self.domain}",
178
- self.description,
179
- "",
180
- "### Learnable Categories:",
181
- ]
182
- for cat in self.learnable_categories:
183
- lines.append(f"- {cat}")
184
-
185
- if self.heuristic_templates:
186
- lines.append("")
187
- lines.append("### Heuristic Templates:")
188
- for t in self.heuristic_templates:
189
- lines.append(f"- {t}")
190
-
191
- return "\n".join(lines)
192
-
193
-
194
- @dataclass
195
- class Agent:
196
- """
197
- The executor that operates within a setting, given a context.
198
-
199
- Agents start "dumb" but get smarter via memory injections.
200
- They use tools to accomplish tasks and log reflections post-run.
201
- """
202
-
203
- name: str
204
- role: str
205
- description: str
206
- memory_schema: MemorySchema
207
-
208
- # Personality/style traits
209
- traits: List[str] = field(default_factory=list)
210
-
211
- # Default behaviors
212
- default_actions: List[str] = field(default_factory=list)
213
-
214
- def to_prompt(self) -> str:
215
- """Format agent identity for system prompt."""
216
- lines = [
217
- f"## You are {self.name}",
218
- f"**Role**: {self.role}",
219
- "",
220
- self.description,
221
- ]
222
-
223
- if self.traits:
224
- lines.append("")
225
- lines.append("### Traits:")
226
- for t in self.traits:
227
- lines.append(f"- {t}")
228
-
229
- return "\n".join(lines)
230
-
231
-
232
- @dataclass
233
- class RunResult:
234
- """Result of a harness run."""
235
-
236
- success: bool
237
- output: Any
238
- reflections: List[str] = field(default_factory=list)
239
- tools_used: List[str] = field(default_factory=list)
240
- duration_ms: Optional[int] = None
241
- error: Optional[str] = None
242
-
243
-
244
- class Harness:
245
- """
246
- The complete harness that orchestrates Setting + Context + Agent + Memory.
247
-
248
- This is the main interface for running learning agents across any domain.
249
-
250
- Usage:
251
- harness = Harness(setting, agent, alma)
252
- result = harness.run(context)
253
- """
254
-
255
- def __init__(
256
- self,
257
- setting: Setting,
258
- agent: Agent,
259
- alma: Any, # ALMA instance
260
- ):
261
- """
262
- Initialize harness.
263
-
264
- Args:
265
- setting: The fixed environment
266
- agent: The executor
267
- alma: ALMA instance for memory management
268
- """
269
- self.setting = setting
270
- self.agent = agent
271
- self.alma = alma
272
-
273
- def build_prompt(
274
- self,
275
- context: Context,
276
- memory_slice: Optional[MemorySlice] = None,
277
- ) -> str:
278
- """
279
- Build the complete prompt for a run.
280
-
281
- Combines: Agent identity + Setting + Memory + Context
282
- """
283
- sections = [
284
- self.agent.to_prompt(),
285
- "",
286
- self.setting.to_prompt(),
287
- ]
288
-
289
- # Inject memory if available
290
- if memory_slice and memory_slice.total_items > 0:
291
- sections.append("")
292
- sections.append("## Relevant Memory (from past runs)")
293
- sections.append(memory_slice.to_prompt())
294
-
295
- sections.append("")
296
- sections.append(context.to_prompt())
297
-
298
- return "\n".join(sections)
299
-
300
- def pre_run(self, context: Context) -> MemorySlice:
301
- """
302
- Pre-run: Retrieve relevant memory for this task.
303
-
304
- Returns memory slice to inject into prompt.
305
- """
306
- return self.alma.retrieve(
307
- task=context.task,
308
- agent=self.agent.name,
309
- user_id=context.user_id,
310
- top_k=5,
311
- )
312
-
313
- def post_run(
314
- self,
315
- context: Context,
316
- result: RunResult,
317
- ):
318
- """
319
- Post-run: Update memory based on outcome.
320
-
321
- Logs the outcome and potentially creates new heuristics.
322
- """
323
- # Learn from the outcome
324
- self.alma.learn(
325
- agent=self.agent.name,
326
- task=context.task,
327
- outcome="success" if result.success else "failure",
328
- strategy_used=(
329
- ", ".join(result.tools_used) if result.tools_used else "direct"
330
- ),
331
- duration_ms=result.duration_ms,
332
- error_message=result.error,
333
- feedback="; ".join(result.reflections) if result.reflections else None,
334
- )
335
-
336
- def run(
337
- self,
338
- context: Context,
339
- executor: Optional[Callable[[str], RunResult]] = None,
340
- ) -> RunResult:
341
- """
342
- Execute the full harness flow.
343
-
344
- 1. Pre-run: Retrieve relevant memories
345
- 2. Build prompt with injected memory
346
- 3. Execute (via provided executor or return prompt for external use)
347
- 4. Post-run: Update memory with outcome
348
-
349
- Args:
350
- context: The task context for this run
351
- executor: Optional function that takes prompt and returns RunResult.
352
- If not provided, returns a RunResult with the built prompt.
353
-
354
- Returns:
355
- RunResult with output or prompt
356
- """
357
- import time
358
-
359
- start_time = time.time()
360
-
361
- # 1. Pre-run: Get relevant memories
362
- memory_slice = self.pre_run(context)
363
-
364
- # 2. Build prompt
365
- prompt = self.build_prompt(context, memory_slice)
366
-
367
- # 3. Execute
368
- if executor:
369
- result = executor(prompt)
370
- result.duration_ms = int((time.time() - start_time) * 1000)
371
-
372
- # 4. Post-run: Update memory
373
- self.post_run(context, result)
374
-
375
- return result
376
- else:
377
- # Return prompt for external execution
378
- return RunResult(
379
- success=True,
380
- output=prompt,
381
- duration_ms=int((time.time() - start_time) * 1000),
382
- )
383
-
384
- def get_stats(self) -> Dict[str, Any]:
385
- """Get memory statistics for this agent."""
386
- return self.alma.get_stats(agent=self.agent.name)
1
+ """
2
+ ALMA Harness Pattern - Base Classes.
3
+
4
+ The harness pattern decouples agent capabilities from domain, making any
5
+ tool-using workflow able to "learn" over time through memory injections.
6
+
7
+ Pattern Components:
8
+ 1. Setting - Fixed environment (tools, constraints)
9
+ 2. Context - Ephemeral per-run inputs
10
+ 3. Agent - The executor that acts within setting+context
11
+ 4. Memory Schema - Domain-specific structure for logging/retrieving learnings
12
+
13
+ Flow:
14
+ Pre-run -> Inject relevant memory slices
15
+ Run -> Agent acts, uses tools, logs reflections
16
+ Post-run -> Update memory schema
17
+ Repeat -> Agent appears to "learn" without weight changes
18
+ """
19
+
20
+ from dataclasses import dataclass, field
21
+ from datetime import datetime, timezone
22
+ from enum import Enum
23
+ from typing import Any, Callable, Dict, List, Optional
24
+
25
+ from alma.types import MemoryScope, MemorySlice
26
+
27
+
28
+ class ToolType(Enum):
29
+ """Categories of tools available to agents."""
30
+
31
+ SEARCH = "search" # Web search, semantic search
32
+ DATA_ACCESS = "data_access" # APIs, databases
33
+ EXECUTION = "execution" # Code execution, automation
34
+ COMMUNICATION = "communication" # Email, messaging
35
+ ANALYSIS = "analysis" # Data processing, synthesis
36
+ CREATION = "creation" # Content generation, design
37
+
38
+
39
+ @dataclass
40
+ class Tool:
41
+ """
42
+ A tool available in the agent's setting.
43
+
44
+ Tools are the building blocks agents use to accomplish tasks.
45
+ """
46
+
47
+ name: str
48
+ description: str
49
+ tool_type: ToolType
50
+ parameters: Dict[str, Any] = field(default_factory=dict)
51
+ constraints: List[str] = field(default_factory=list)
52
+ examples: List[str] = field(default_factory=list)
53
+
54
+ def to_prompt(self) -> str:
55
+ """Format tool for inclusion in agent prompt."""
56
+ prompt = f"**{self.name}** ({self.tool_type.value}): {self.description}"
57
+ if self.constraints:
58
+ prompt += f"\n Constraints: {', '.join(self.constraints)}"
59
+ return prompt
60
+
61
+
62
+ @dataclass
63
+ class Setting:
64
+ """
65
+ The fixed environment in which an agent operates.
66
+
67
+ Includes available tools and immutable constraints that don't change
68
+ between runs. The setting defines WHAT the agent CAN do.
69
+ """
70
+
71
+ name: str
72
+ description: str
73
+ tools: List[Tool] = field(default_factory=list)
74
+ global_constraints: List[str] = field(default_factory=list)
75
+ environment_vars: Dict[str, str] = field(default_factory=dict)
76
+
77
+ def to_prompt(self) -> str:
78
+ """Format setting for inclusion in agent prompt."""
79
+ lines = [
80
+ f"## Setting: {self.name}",
81
+ self.description,
82
+ "",
83
+ "### Available Tools:",
84
+ ]
85
+ for tool in self.tools:
86
+ lines.append(f"- {tool.to_prompt()}")
87
+
88
+ if self.global_constraints:
89
+ lines.append("")
90
+ lines.append("### Constraints:")
91
+ for c in self.global_constraints:
92
+ lines.append(f"- {c}")
93
+
94
+ return "\n".join(lines)
95
+
96
+
97
+ @dataclass
98
+ class Context:
99
+ """
100
+ Ephemeral inputs for a single run.
101
+
102
+ This is injected fresh each time and contains task-specific information.
103
+ The context defines WHAT the agent should do THIS run.
104
+ """
105
+
106
+ task: str
107
+ user_id: Optional[str] = None
108
+ project_id: Optional[str] = None
109
+ session_id: Optional[str] = None
110
+ inputs: Dict[str, Any] = field(default_factory=dict)
111
+ constraints: List[str] = field(default_factory=list)
112
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
113
+
114
+ def to_prompt(self) -> str:
115
+ """Format context for inclusion in agent prompt."""
116
+ lines = [
117
+ "## Current Task",
118
+ self.task,
119
+ ]
120
+ if self.inputs:
121
+ lines.append("")
122
+ lines.append("### Inputs:")
123
+ for k, v in self.inputs.items():
124
+ lines.append(f"- {k}: {v}")
125
+
126
+ if self.constraints:
127
+ lines.append("")
128
+ lines.append("### Task Constraints:")
129
+ for c in self.constraints:
130
+ lines.append(f"- {c}")
131
+
132
+ return "\n".join(lines)
133
+
134
+
135
+ @dataclass
136
+ class MemorySchema:
137
+ """
138
+ Domain-specific structure for logging and retrieving learnings.
139
+
140
+ This defines WHAT gets remembered and HOW, ensuring relevance
141
+ and preventing scope creep. Each domain has its own schema.
142
+ """
143
+
144
+ domain: str
145
+ description: str
146
+
147
+ # What this domain can learn
148
+ learnable_categories: List[str] = field(default_factory=list)
149
+
150
+ # What this domain should NOT learn (prevents over-scoping)
151
+ forbidden_categories: List[str] = field(default_factory=list)
152
+
153
+ # Heuristic templates for this domain
154
+ heuristic_templates: List[str] = field(default_factory=list)
155
+
156
+ # Outcome fields to track
157
+ outcome_fields: List[str] = field(default_factory=list)
158
+
159
+ # Minimum occurrences before creating a heuristic
160
+ min_occurrences: int = 3
161
+
162
+ # Custom metadata schema
163
+ metadata_schema: Dict[str, str] = field(default_factory=dict)
164
+
165
+ def to_scope(self, agent_name: str) -> MemoryScope:
166
+ """Convert to MemoryScope for ALMA integration."""
167
+ return MemoryScope(
168
+ agent_name=agent_name,
169
+ can_learn=self.learnable_categories,
170
+ cannot_learn=self.forbidden_categories,
171
+ min_occurrences_for_heuristic=self.min_occurrences,
172
+ )
173
+
174
+ def to_prompt(self) -> str:
175
+ """Format schema for documentation."""
176
+ lines = [
177
+ f"## Memory Schema: {self.domain}",
178
+ self.description,
179
+ "",
180
+ "### Learnable Categories:",
181
+ ]
182
+ for cat in self.learnable_categories:
183
+ lines.append(f"- {cat}")
184
+
185
+ if self.heuristic_templates:
186
+ lines.append("")
187
+ lines.append("### Heuristic Templates:")
188
+ for t in self.heuristic_templates:
189
+ lines.append(f"- {t}")
190
+
191
+ return "\n".join(lines)
192
+
193
+
194
+ @dataclass
195
+ class Agent:
196
+ """
197
+ The executor that operates within a setting, given a context.
198
+
199
+ Agents start "dumb" but get smarter via memory injections.
200
+ They use tools to accomplish tasks and log reflections post-run.
201
+ """
202
+
203
+ name: str
204
+ role: str
205
+ description: str
206
+ memory_schema: MemorySchema
207
+
208
+ # Personality/style traits
209
+ traits: List[str] = field(default_factory=list)
210
+
211
+ # Default behaviors
212
+ default_actions: List[str] = field(default_factory=list)
213
+
214
+ def to_prompt(self) -> str:
215
+ """Format agent identity for system prompt."""
216
+ lines = [
217
+ f"## You are {self.name}",
218
+ f"**Role**: {self.role}",
219
+ "",
220
+ self.description,
221
+ ]
222
+
223
+ if self.traits:
224
+ lines.append("")
225
+ lines.append("### Traits:")
226
+ for t in self.traits:
227
+ lines.append(f"- {t}")
228
+
229
+ return "\n".join(lines)
230
+
231
+
232
+ @dataclass
233
+ class RunResult:
234
+ """Result of a harness run."""
235
+
236
+ success: bool
237
+ output: Any
238
+ reflections: List[str] = field(default_factory=list)
239
+ tools_used: List[str] = field(default_factory=list)
240
+ duration_ms: Optional[int] = None
241
+ error: Optional[str] = None
242
+
243
+
244
+ class Harness:
245
+ """
246
+ The complete harness that orchestrates Setting + Context + Agent + Memory.
247
+
248
+ This is the main interface for running learning agents across any domain.
249
+
250
+ Usage:
251
+ harness = Harness(setting, agent, alma)
252
+ result = harness.run(context)
253
+ """
254
+
255
+ def __init__(
256
+ self,
257
+ setting: Setting,
258
+ agent: Agent,
259
+ alma: Any, # ALMA instance
260
+ ):
261
+ """
262
+ Initialize harness.
263
+
264
+ Args:
265
+ setting: The fixed environment
266
+ agent: The executor
267
+ alma: ALMA instance for memory management
268
+ """
269
+ self.setting = setting
270
+ self.agent = agent
271
+ self.alma = alma
272
+
273
+ def build_prompt(
274
+ self,
275
+ context: Context,
276
+ memory_slice: Optional[MemorySlice] = None,
277
+ ) -> str:
278
+ """
279
+ Build the complete prompt for a run.
280
+
281
+ Combines: Agent identity + Setting + Memory + Context
282
+ """
283
+ sections = [
284
+ self.agent.to_prompt(),
285
+ "",
286
+ self.setting.to_prompt(),
287
+ ]
288
+
289
+ # Inject memory if available
290
+ if memory_slice and memory_slice.total_items > 0:
291
+ sections.append("")
292
+ sections.append("## Relevant Memory (from past runs)")
293
+ sections.append(memory_slice.to_prompt())
294
+
295
+ sections.append("")
296
+ sections.append(context.to_prompt())
297
+
298
+ return "\n".join(sections)
299
+
300
+ def pre_run(self, context: Context) -> MemorySlice:
301
+ """
302
+ Pre-run: Retrieve relevant memory for this task.
303
+
304
+ Returns memory slice to inject into prompt.
305
+ """
306
+ return self.alma.retrieve(
307
+ task=context.task,
308
+ agent=self.agent.name,
309
+ user_id=context.user_id,
310
+ top_k=5,
311
+ )
312
+
313
+ def post_run(
314
+ self,
315
+ context: Context,
316
+ result: RunResult,
317
+ ):
318
+ """
319
+ Post-run: Update memory based on outcome.
320
+
321
+ Logs the outcome and potentially creates new heuristics.
322
+ """
323
+ # Learn from the outcome
324
+ self.alma.learn(
325
+ agent=self.agent.name,
326
+ task=context.task,
327
+ outcome="success" if result.success else "failure",
328
+ strategy_used=(
329
+ ", ".join(result.tools_used) if result.tools_used else "direct"
330
+ ),
331
+ duration_ms=result.duration_ms,
332
+ error_message=result.error,
333
+ feedback="; ".join(result.reflections) if result.reflections else None,
334
+ )
335
+
336
+ def run(
337
+ self,
338
+ context: Context,
339
+ executor: Optional[Callable[[str], RunResult]] = None,
340
+ ) -> RunResult:
341
+ """
342
+ Execute the full harness flow.
343
+
344
+ 1. Pre-run: Retrieve relevant memories
345
+ 2. Build prompt with injected memory
346
+ 3. Execute (via provided executor or return prompt for external use)
347
+ 4. Post-run: Update memory with outcome
348
+
349
+ Args:
350
+ context: The task context for this run
351
+ executor: Optional function that takes prompt and returns RunResult.
352
+ If not provided, returns a RunResult with the built prompt.
353
+
354
+ Returns:
355
+ RunResult with output or prompt
356
+ """
357
+ import time
358
+
359
+ start_time = time.time()
360
+
361
+ # 1. Pre-run: Get relevant memories
362
+ memory_slice = self.pre_run(context)
363
+
364
+ # 2. Build prompt
365
+ prompt = self.build_prompt(context, memory_slice)
366
+
367
+ # 3. Execute
368
+ if executor:
369
+ result = executor(prompt)
370
+ result.duration_ms = int((time.time() - start_time) * 1000)
371
+
372
+ # 4. Post-run: Update memory
373
+ self.post_run(context, result)
374
+
375
+ return result
376
+ else:
377
+ # Return prompt for external execution
378
+ return RunResult(
379
+ success=True,
380
+ output=prompt,
381
+ duration_ms=int((time.time() - start_time) * 1000),
382
+ )
383
+
384
+ def get_stats(self) -> Dict[str, Any]:
385
+ """Get memory statistics for this agent."""
386
+ return self.alma.get_stats(agent=self.agent.name)