alma-memory 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
alma/harness/base.py ADDED
@@ -0,0 +1,377 @@
1
+ """
2
+ ALMA Harness Pattern - Base Classes.
3
+
4
+ The harness pattern decouples agent capabilities from domain, making any
5
+ tool-using workflow able to "learn" over time through memory injections.
6
+
7
+ Pattern Components:
8
+ 1. Setting - Fixed environment (tools, constraints)
9
+ 2. Context - Ephemeral per-run inputs
10
+ 3. Agent - The executor that acts within setting+context
11
+ 4. Memory Schema - Domain-specific structure for logging/retrieving learnings
12
+
13
+ Flow:
14
+ Pre-run -> Inject relevant memory slices
15
+ Run -> Agent acts, uses tools, logs reflections
16
+ Post-run -> Update memory schema
17
+ Repeat -> Agent appears to "learn" without weight changes
18
+ """
19
+
20
+ from abc import ABC, abstractmethod
21
+ from dataclasses import dataclass, field
22
+ from datetime import datetime, timezone
23
+ from typing import Optional, List, Dict, Any, Callable
24
+ from enum import Enum
25
+
26
+ from alma.types import MemorySlice, MemoryScope
27
+
28
+
29
+ class ToolType(Enum):
30
+ """Categories of tools available to agents."""
31
+ SEARCH = "search" # Web search, semantic search
32
+ DATA_ACCESS = "data_access" # APIs, databases
33
+ EXECUTION = "execution" # Code execution, automation
34
+ COMMUNICATION = "communication" # Email, messaging
35
+ ANALYSIS = "analysis" # Data processing, synthesis
36
+ CREATION = "creation" # Content generation, design
37
+
38
+
39
+ @dataclass
40
+ class Tool:
41
+ """
42
+ A tool available in the agent's setting.
43
+
44
+ Tools are the building blocks agents use to accomplish tasks.
45
+ """
46
+ name: str
47
+ description: str
48
+ tool_type: ToolType
49
+ parameters: Dict[str, Any] = field(default_factory=dict)
50
+ constraints: List[str] = field(default_factory=list)
51
+ examples: List[str] = field(default_factory=list)
52
+
53
+ def to_prompt(self) -> str:
54
+ """Format tool for inclusion in agent prompt."""
55
+ prompt = f"**{self.name}** ({self.tool_type.value}): {self.description}"
56
+ if self.constraints:
57
+ prompt += f"\n Constraints: {', '.join(self.constraints)}"
58
+ return prompt
59
+
60
+
61
+ @dataclass
62
+ class Setting:
63
+ """
64
+ The fixed environment in which an agent operates.
65
+
66
+ Includes available tools and immutable constraints that don't change
67
+ between runs. The setting defines WHAT the agent CAN do.
68
+ """
69
+ name: str
70
+ description: str
71
+ tools: List[Tool] = field(default_factory=list)
72
+ global_constraints: List[str] = field(default_factory=list)
73
+ environment_vars: Dict[str, str] = field(default_factory=dict)
74
+
75
+ def to_prompt(self) -> str:
76
+ """Format setting for inclusion in agent prompt."""
77
+ lines = [
78
+ f"## Setting: {self.name}",
79
+ self.description,
80
+ "",
81
+ "### Available Tools:",
82
+ ]
83
+ for tool in self.tools:
84
+ lines.append(f"- {tool.to_prompt()}")
85
+
86
+ if self.global_constraints:
87
+ lines.append("")
88
+ lines.append("### Constraints:")
89
+ for c in self.global_constraints:
90
+ lines.append(f"- {c}")
91
+
92
+ return "\n".join(lines)
93
+
94
+
95
+ @dataclass
96
+ class Context:
97
+ """
98
+ Ephemeral inputs for a single run.
99
+
100
+ This is injected fresh each time and contains task-specific information.
101
+ The context defines WHAT the agent should do THIS run.
102
+ """
103
+ task: str
104
+ user_id: Optional[str] = None
105
+ project_id: Optional[str] = None
106
+ session_id: Optional[str] = None
107
+ inputs: Dict[str, Any] = field(default_factory=dict)
108
+ constraints: List[str] = field(default_factory=list)
109
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
110
+
111
+ def to_prompt(self) -> str:
112
+ """Format context for inclusion in agent prompt."""
113
+ lines = [
114
+ "## Current Task",
115
+ self.task,
116
+ ]
117
+ if self.inputs:
118
+ lines.append("")
119
+ lines.append("### Inputs:")
120
+ for k, v in self.inputs.items():
121
+ lines.append(f"- {k}: {v}")
122
+
123
+ if self.constraints:
124
+ lines.append("")
125
+ lines.append("### Task Constraints:")
126
+ for c in self.constraints:
127
+ lines.append(f"- {c}")
128
+
129
+ return "\n".join(lines)
130
+
131
+
132
+ @dataclass
133
+ class MemorySchema:
134
+ """
135
+ Domain-specific structure for logging and retrieving learnings.
136
+
137
+ This defines WHAT gets remembered and HOW, ensuring relevance
138
+ and preventing scope creep. Each domain has its own schema.
139
+ """
140
+ domain: str
141
+ description: str
142
+
143
+ # What this domain can learn
144
+ learnable_categories: List[str] = field(default_factory=list)
145
+
146
+ # What this domain should NOT learn (prevents over-scoping)
147
+ forbidden_categories: List[str] = field(default_factory=list)
148
+
149
+ # Heuristic templates for this domain
150
+ heuristic_templates: List[str] = field(default_factory=list)
151
+
152
+ # Outcome fields to track
153
+ outcome_fields: List[str] = field(default_factory=list)
154
+
155
+ # Minimum occurrences before creating a heuristic
156
+ min_occurrences: int = 3
157
+
158
+ # Custom metadata schema
159
+ metadata_schema: Dict[str, str] = field(default_factory=dict)
160
+
161
+ def to_scope(self, agent_name: str) -> MemoryScope:
162
+ """Convert to MemoryScope for ALMA integration."""
163
+ return MemoryScope(
164
+ agent_name=agent_name,
165
+ can_learn=self.learnable_categories,
166
+ cannot_learn=self.forbidden_categories,
167
+ min_occurrences_for_heuristic=self.min_occurrences,
168
+ )
169
+
170
+ def to_prompt(self) -> str:
171
+ """Format schema for documentation."""
172
+ lines = [
173
+ f"## Memory Schema: {self.domain}",
174
+ self.description,
175
+ "",
176
+ "### Learnable Categories:",
177
+ ]
178
+ for cat in self.learnable_categories:
179
+ lines.append(f"- {cat}")
180
+
181
+ if self.heuristic_templates:
182
+ lines.append("")
183
+ lines.append("### Heuristic Templates:")
184
+ for t in self.heuristic_templates:
185
+ lines.append(f"- {t}")
186
+
187
+ return "\n".join(lines)
188
+
189
+
190
+ @dataclass
191
+ class Agent:
192
+ """
193
+ The executor that operates within a setting, given a context.
194
+
195
+ Agents start "dumb" but get smarter via memory injections.
196
+ They use tools to accomplish tasks and log reflections post-run.
197
+ """
198
+ name: str
199
+ role: str
200
+ description: str
201
+ memory_schema: MemorySchema
202
+
203
+ # Personality/style traits
204
+ traits: List[str] = field(default_factory=list)
205
+
206
+ # Default behaviors
207
+ default_actions: List[str] = field(default_factory=list)
208
+
209
+ def to_prompt(self) -> str:
210
+ """Format agent identity for system prompt."""
211
+ lines = [
212
+ f"## You are {self.name}",
213
+ f"**Role**: {self.role}",
214
+ "",
215
+ self.description,
216
+ ]
217
+
218
+ if self.traits:
219
+ lines.append("")
220
+ lines.append("### Traits:")
221
+ for t in self.traits:
222
+ lines.append(f"- {t}")
223
+
224
+ return "\n".join(lines)
225
+
226
+
227
+ @dataclass
228
+ class RunResult:
229
+ """Result of a harness run."""
230
+ success: bool
231
+ output: Any
232
+ reflections: List[str] = field(default_factory=list)
233
+ tools_used: List[str] = field(default_factory=list)
234
+ duration_ms: Optional[int] = None
235
+ error: Optional[str] = None
236
+
237
+
238
+ class Harness:
239
+ """
240
+ The complete harness that orchestrates Setting + Context + Agent + Memory.
241
+
242
+ This is the main interface for running learning agents across any domain.
243
+
244
+ Usage:
245
+ harness = Harness(setting, agent, alma)
246
+ result = harness.run(context)
247
+ """
248
+
249
+ def __init__(
250
+ self,
251
+ setting: Setting,
252
+ agent: Agent,
253
+ alma: Any, # ALMA instance
254
+ ):
255
+ """
256
+ Initialize harness.
257
+
258
+ Args:
259
+ setting: The fixed environment
260
+ agent: The executor
261
+ alma: ALMA instance for memory management
262
+ """
263
+ self.setting = setting
264
+ self.agent = agent
265
+ self.alma = alma
266
+
267
+ def build_prompt(
268
+ self,
269
+ context: Context,
270
+ memory_slice: Optional[MemorySlice] = None,
271
+ ) -> str:
272
+ """
273
+ Build the complete prompt for a run.
274
+
275
+ Combines: Agent identity + Setting + Memory + Context
276
+ """
277
+ sections = [
278
+ self.agent.to_prompt(),
279
+ "",
280
+ self.setting.to_prompt(),
281
+ ]
282
+
283
+ # Inject memory if available
284
+ if memory_slice and memory_slice.total_items > 0:
285
+ sections.append("")
286
+ sections.append("## Relevant Memory (from past runs)")
287
+ sections.append(memory_slice.to_prompt())
288
+
289
+ sections.append("")
290
+ sections.append(context.to_prompt())
291
+
292
+ return "\n".join(sections)
293
+
294
+ def pre_run(self, context: Context) -> MemorySlice:
295
+ """
296
+ Pre-run: Retrieve relevant memory for this task.
297
+
298
+ Returns memory slice to inject into prompt.
299
+ """
300
+ return self.alma.retrieve(
301
+ task=context.task,
302
+ agent=self.agent.name,
303
+ user_id=context.user_id,
304
+ top_k=5,
305
+ )
306
+
307
+ def post_run(
308
+ self,
309
+ context: Context,
310
+ result: RunResult,
311
+ ):
312
+ """
313
+ Post-run: Update memory based on outcome.
314
+
315
+ Logs the outcome and potentially creates new heuristics.
316
+ """
317
+ # Learn from the outcome
318
+ self.alma.learn(
319
+ agent=self.agent.name,
320
+ task=context.task,
321
+ outcome="success" if result.success else "failure",
322
+ strategy_used=", ".join(result.tools_used) if result.tools_used else "direct",
323
+ duration_ms=result.duration_ms,
324
+ error_message=result.error,
325
+ feedback="; ".join(result.reflections) if result.reflections else None,
326
+ )
327
+
328
+ def run(
329
+ self,
330
+ context: Context,
331
+ executor: Optional[Callable[[str], RunResult]] = None,
332
+ ) -> RunResult:
333
+ """
334
+ Execute the full harness flow.
335
+
336
+ 1. Pre-run: Retrieve relevant memories
337
+ 2. Build prompt with injected memory
338
+ 3. Execute (via provided executor or return prompt for external use)
339
+ 4. Post-run: Update memory with outcome
340
+
341
+ Args:
342
+ context: The task context for this run
343
+ executor: Optional function that takes prompt and returns RunResult.
344
+ If not provided, returns a RunResult with the built prompt.
345
+
346
+ Returns:
347
+ RunResult with output or prompt
348
+ """
349
+ import time
350
+ start_time = time.time()
351
+
352
+ # 1. Pre-run: Get relevant memories
353
+ memory_slice = self.pre_run(context)
354
+
355
+ # 2. Build prompt
356
+ prompt = self.build_prompt(context, memory_slice)
357
+
358
+ # 3. Execute
359
+ if executor:
360
+ result = executor(prompt)
361
+ result.duration_ms = int((time.time() - start_time) * 1000)
362
+
363
+ # 4. Post-run: Update memory
364
+ self.post_run(context, result)
365
+
366
+ return result
367
+ else:
368
+ # Return prompt for external execution
369
+ return RunResult(
370
+ success=True,
371
+ output=prompt,
372
+ duration_ms=int((time.time() - start_time) * 1000),
373
+ )
374
+
375
+ def get_stats(self) -> Dict[str, Any]:
376
+ """Get memory statistics for this agent."""
377
+ return self.alma.get_stats(agent=self.agent.name)