loom-agent 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of loom-agent might be problematic. Click here for more details.

Files changed (51) hide show
  1. loom/__init__.py +1 -0
  2. loom/adapters/converters.py +77 -0
  3. loom/adapters/registry.py +43 -0
  4. loom/api/factory.py +77 -0
  5. loom/api/main.py +201 -0
  6. loom/builtin/__init__.py +3 -0
  7. loom/builtin/memory/__init__.py +3 -0
  8. loom/builtin/memory/metabolic.py +96 -0
  9. loom/builtin/memory/pso.py +41 -0
  10. loom/builtin/memory/sanitizers.py +39 -0
  11. loom/builtin/memory/validators.py +55 -0
  12. loom/config/tool.py +63 -0
  13. loom/infra/__init__.py +0 -0
  14. loom/infra/llm.py +43 -0
  15. loom/infra/logging.py +42 -0
  16. loom/infra/store.py +39 -0
  17. loom/infra/transport/memory.py +85 -0
  18. loom/infra/transport/nats.py +141 -0
  19. loom/infra/transport/redis.py +140 -0
  20. loom/interfaces/llm.py +44 -0
  21. loom/interfaces/memory.py +50 -0
  22. loom/interfaces/store.py +29 -0
  23. loom/interfaces/transport.py +35 -0
  24. loom/kernel/__init__.py +0 -0
  25. loom/kernel/base_interceptor.py +97 -0
  26. loom/kernel/bus.py +76 -0
  27. loom/kernel/dispatcher.py +58 -0
  28. loom/kernel/interceptors/__init__.py +14 -0
  29. loom/kernel/interceptors/budget.py +60 -0
  30. loom/kernel/interceptors/depth.py +45 -0
  31. loom/kernel/interceptors/hitl.py +51 -0
  32. loom/kernel/interceptors/studio.py +137 -0
  33. loom/kernel/interceptors/timeout.py +27 -0
  34. loom/kernel/state.py +71 -0
  35. loom/memory/hierarchical.py +94 -0
  36. loom/node/__init__.py +0 -0
  37. loom/node/agent.py +133 -0
  38. loom/node/base.py +121 -0
  39. loom/node/crew.py +103 -0
  40. loom/node/router.py +68 -0
  41. loom/node/tool.py +50 -0
  42. loom/protocol/__init__.py +0 -0
  43. loom/protocol/cloudevents.py +73 -0
  44. loom/protocol/interfaces.py +110 -0
  45. loom/protocol/mcp.py +97 -0
  46. loom/protocol/memory_operations.py +51 -0
  47. loom/protocol/patch.py +93 -0
  48. loom_agent-0.3.2.dist-info/LICENSE +204 -0
  49. loom_agent-0.3.2.dist-info/METADATA +139 -0
  50. loom_agent-0.3.2.dist-info/RECORD +51 -0
  51. loom_agent-0.3.2.dist-info/WHEEL +4 -0
loom/__init__.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.3.0"
@@ -0,0 +1,77 @@
1
+ """
2
+ Tool Converters (M4)
3
+ """
4
+
5
+ import inspect
6
+ import json
7
+ from typing import Any, Callable, Dict, get_type_hints, Type
8
+
9
+ from loom.protocol.mcp import MCPToolDefinition
10
+
11
+ class FunctionToMCP:
12
+ """
13
+ Converts Python functions to MCP Tool Definitions.
14
+ """
15
+
16
+ @staticmethod
17
+ def convert(func: Callable[..., Any], name: str = None) -> MCPToolDefinition:
18
+ """
19
+ Introspects a python function and returns an MCP Tool Definition.
20
+ """
21
+ func_name = name or func.__name__
22
+ doc = inspect.getdoc(func) or "No description provided."
23
+
24
+ # Parse arguments
25
+ sig = inspect.signature(func)
26
+ type_hints = get_type_hints(func)
27
+
28
+ properties = {}
29
+ required = []
30
+
31
+ for param_name, param in sig.parameters.items():
32
+ if param_name == "self" or param_name == "cls":
33
+ continue
34
+
35
+ # Get type
36
+ py_type = type_hints.get(param_name, Any)
37
+ json_type = FunctionToMCP._map_type(py_type)
38
+
39
+ prop_def = {"type": json_type}
40
+
41
+ # TODO: Description from docstring parsing? (Google-style/NumPy-style)
42
+ # For now, just basic type.
43
+
44
+ properties[param_name] = prop_def
45
+
46
+ if param.default == inspect.Parameter.empty:
47
+ required.append(param_name)
48
+
49
+ input_schema = {
50
+ "type": "object",
51
+ "properties": properties,
52
+ "required": required
53
+ }
54
+
55
+ return MCPToolDefinition(
56
+ name=func_name,
57
+ description=doc,
58
+ input_schema=input_schema
59
+ )
60
+
61
+ @staticmethod
62
+ def _map_type(py_type: Type) -> str:
63
+ """Map Python type to JSON Schema type."""
64
+ if py_type == str:
65
+ return "string"
66
+ elif py_type == int:
67
+ return "integer"
68
+ elif py_type == float:
69
+ return "number"
70
+ elif py_type == bool:
71
+ return "boolean"
72
+ elif py_type == list or getattr(py_type, "__origin__", None) == list:
73
+ return "array"
74
+ elif py_type == dict or getattr(py_type, "__origin__", None) == dict:
75
+ return "object"
76
+ else:
77
+ return "string" # Default fallback
@@ -0,0 +1,43 @@
1
+ """
2
+ Tool Registry (M4)
3
+ """
4
+
5
+ from typing import Dict, Any, Callable, List, Optional
6
+ from loom.protocol.mcp import MCPToolDefinition
7
+ from loom.adapters.converters import FunctionToMCP
8
+ # ToolNode is in loom.node.tool, but avoid circular import if possible.
9
+ # Ideally Registry produces definitions + execution callables.
10
+ # Factory creates Nodes.
11
+
12
+ class ToolRegistry:
13
+ """
14
+ Central repository for tools available to Agents.
15
+ """
16
+
17
+ def __init__(self):
18
+ self._tools: Dict[str, Callable] = {}
19
+ self._definitions: Dict[str, MCPToolDefinition] = {}
20
+
21
+ def register_function(self, func: Callable, name: str = None) -> MCPToolDefinition:
22
+ """Register a python function as a tool."""
23
+ # Clean name
24
+ tool_name = name or func.__name__
25
+
26
+ # Convert to MCP
27
+ definition = FunctionToMCP.convert(func, name=tool_name)
28
+
29
+ # Store
30
+ self._tools[tool_name] = func
31
+ self._definitions[tool_name] = definition
32
+
33
+ return definition
34
+
35
+ def get_definition(self, name: str) -> Optional[MCPToolDefinition]:
36
+ return self._definitions.get(name)
37
+
38
+ def get_callable(self, name: str) -> Optional[Callable]:
39
+ return self._tools.get(name)
40
+
41
+ @property
42
+ def definitions(self) -> List[MCPToolDefinition]:
43
+ return list(self._definitions.values())
loom/api/factory.py ADDED
@@ -0,0 +1,77 @@
1
+ """
2
+ Loom SDK: Factory Helpers
3
+ """
4
+
5
+ from typing import List, Optional, Callable, Dict, Any
6
+
7
+ from loom.api.main import LoomApp
8
+ from loom.node.agent import AgentNode
9
+ from loom.node.tool import ToolNode
10
+ from loom.node.crew import CrewNode
11
+ from loom.protocol.mcp import MCPToolDefinition
12
+ from loom.interfaces.llm import LLMProvider
13
+
14
+ from loom.interfaces.memory import MemoryInterface
15
+
16
+ def Agent(
17
+ app: LoomApp,
18
+ name: str,
19
+ role: str = "Assistant",
20
+ tools: Optional[List[ToolNode]] = None,
21
+ provider: Optional[LLMProvider] = None,
22
+ memory: Optional[MemoryInterface] = None
23
+ ) -> AgentNode:
24
+ """Helper to create an AgentNode."""
25
+ return AgentNode(
26
+ node_id=name,
27
+ dispatcher=app.dispatcher,
28
+ role=role,
29
+ tools=tools,
30
+ provider=provider,
31
+ memory=memory
32
+ )
33
+
34
+ from loom.adapters.converters import FunctionToMCP
35
+
36
+ def Tool(
37
+ app: LoomApp,
38
+ name: str,
39
+ func: Callable[..., Any],
40
+ description: Optional[str] = None,
41
+ parameters: Optional[Dict[str, Any]] = None
42
+ ) -> ToolNode:
43
+ """
44
+ Helper to create a ToolNode.
45
+ Auto-generates schema from function signature if parameters not provided.
46
+ """
47
+
48
+ # Auto-generate definition/schema if not provided
49
+ auto_def = FunctionToMCP.convert(func, name=name)
50
+
51
+ final_desc = description or auto_def.description
52
+ final_input_schema = parameters or auto_def.input_schema
53
+
54
+ tool_def = MCPToolDefinition(
55
+ name=name,
56
+ description=final_desc,
57
+ input_schema=final_input_schema
58
+ )
59
+
60
+ return ToolNode(
61
+ node_id=name,
62
+ dispatcher=app.dispatcher,
63
+ tool_def=tool_def,
64
+ func=func
65
+ )
66
+
67
+ def Crew(
68
+ app: LoomApp,
69
+ name: str,
70
+ agents: List[AgentNode]
71
+ ) -> CrewNode:
72
+ """Helper to create a CrewNode."""
73
+ return CrewNode(
74
+ node_id=name,
75
+ dispatcher=app.dispatcher,
76
+ agents=agents
77
+ )
loom/api/main.py ADDED
@@ -0,0 +1,201 @@
1
+ """
2
+ Loom SDK: Main Application
3
+ """
4
+
5
+ import asyncio
6
+ from typing import Callable, Any, Optional, Dict
7
+ from uuid import uuid4
8
+
9
+ from loom.kernel.bus import UniversalEventBus
10
+ from loom.kernel.state import StateStore
11
+ from loom.kernel.dispatcher import Dispatcher
12
+ from loom.kernel.interceptors import TracingInterceptor
13
+ from loom.kernel.interceptors.budget import BudgetInterceptor
14
+ from loom.kernel.interceptors.depth import DepthInterceptor
15
+ from loom.kernel.interceptors.hitl import HITLInterceptor
16
+ from loom.kernel.interceptors.studio import StudioInterceptor
17
+ from loom.protocol.cloudevents import CloudEvent
18
+ from loom.interfaces.store import EventStore
19
+ from loom.node.base import Node
20
+
21
+ from loom.interfaces.transport import Transport
22
+
23
+ class LoomApp:
24
+ """
25
+ The High-Level Application Object.
26
+
27
+ Usage:
28
+ app = LoomApp(control_config={"budget": 5000})
29
+ app.add_node(agent)
30
+ app.run("Do something", target="agent_1")
31
+ """
32
+
33
+ def __init__(self,
34
+ store: Optional[EventStore] = None,
35
+ transport: Optional[Transport] = None,
36
+ control_config: Optional[Dict[str, Any]] = None):
37
+
38
+ control_config = control_config or {}
39
+
40
+ if "transport" in control_config and isinstance(control_config["transport"], dict):
41
+ # Config dict provided, maybe future extensibility
42
+ pass
43
+
44
+ # Transport Selection
45
+ # 1. Transport object passed directly
46
+ self.transport = transport
47
+
48
+ if not self.transport:
49
+ # Config from control_config or Env
50
+ transport_cfg = {}
51
+ if "transport" in control_config and isinstance(control_config["transport"], dict):
52
+ transport_cfg = control_config["transport"]
53
+
54
+ import os
55
+ # Priority: Config > Env > Default
56
+ transport_type = transport_cfg.get("type") or os.getenv("LOOM_TRANSPORT", "memory").lower()
57
+
58
+ if transport_type == "redis":
59
+ from loom.infra.transport.redis import RedisTransport
60
+ redis_url = transport_cfg.get("redis_url") or os.getenv("REDIS_URL", "redis://localhost:6379")
61
+ self.transport = RedisTransport(redis_url=redis_url)
62
+ elif transport_type == "nats":
63
+ from loom.infra.transport.nats import NATSTransport
64
+ nats_servers_cfg = transport_cfg.get("nats_servers")
65
+ if nats_servers_cfg:
66
+ nats_servers = nats_servers_cfg if isinstance(nats_servers_cfg, list) else [nats_servers_cfg]
67
+ else:
68
+ nats_servers = os.getenv("NATS_SERVERS", "nats://localhost:4222").split(",")
69
+ self.transport = NATSTransport(servers=nats_servers)
70
+ else:
71
+ from loom.infra.transport.memory import InMemoryTransport
72
+ self.transport = InMemoryTransport()
73
+
74
+ self.bus = UniversalEventBus(store=store, transport=self.transport)
75
+ self.state_store = StateStore()
76
+ self.dispatcher = Dispatcher(self.bus)
77
+
78
+ # Default Interceptors
79
+ self.dispatcher.add_interceptor(TracingInterceptor())
80
+
81
+ # Configured Controls
82
+ control_config = control_config or {}
83
+
84
+ if "budget" in control_config:
85
+ cfg = control_config["budget"]
86
+ max_tokens = cfg["max_tokens"] if isinstance(cfg, dict) else cfg
87
+ self.dispatcher.add_interceptor(BudgetInterceptor(max_tokens=max_tokens))
88
+
89
+ if "depth" in control_config:
90
+ cfg = control_config["depth"]
91
+ max_depth = cfg["max_depth"] if isinstance(cfg, dict) else cfg
92
+ self.dispatcher.add_interceptor(DepthInterceptor(max_depth=max_depth))
93
+
94
+ if "hitl" in control_config:
95
+ # hitl expects a list of patterns
96
+ patterns = control_config["hitl"]
97
+ patterns = control_config["hitl"]
98
+ if isinstance(patterns, list):
99
+ self.dispatcher.add_interceptor(HITLInterceptor(patterns=patterns))
100
+
101
+ # Studio Support
102
+ # Check env var or control_config
103
+ studio_enabled = False
104
+ studio_url = "ws://localhost:8765"
105
+
106
+ if "studio" in control_config:
107
+ studio_cfg = control_config["studio"]
108
+ if isinstance(studio_cfg, dict):
109
+ studio_enabled = studio_cfg.get("enabled", False)
110
+ studio_url = studio_cfg.get("url", studio_url)
111
+ elif isinstance(studio_cfg, bool):
112
+ studio_enabled = studio_cfg
113
+ else:
114
+ import os
115
+ if os.getenv("LOOM_STUDIO_ENABLED", "false").lower() == "true":
116
+ studio_enabled = True
117
+ studio_url = os.getenv("LOOM_STUDIO_URL", studio_url)
118
+
119
+ if studio_enabled:
120
+ self.dispatcher.add_interceptor(StudioInterceptor(studio_url=studio_url, enabled=True))
121
+
122
+ self._started = False
123
+
124
+ async def start(self):
125
+ """Initialize async components."""
126
+ if self._started:
127
+ return
128
+
129
+ await self.bus.connect()
130
+ await self.bus.subscribe("state.patch/*", self.state_store.apply_event)
131
+ self._started = True
132
+
133
+ def add_node(self, node: Node):
134
+ """Register a node with the app."""
135
+ # Nodes auto-subscribe in their __init__ using the dispatcher.
136
+ # We assume the node has already been initialized with THIS app's dispatcher.
137
+ # Or we can provide a helper here if Node wasn't initialized?
138
+ # Better: The Factory helper uses app.dispatcher.
139
+ pass
140
+
141
+ async def run(self, task: str, target: str) -> Any:
142
+ """
143
+ Run a single task targeting a specific node and return the result.
144
+ """
145
+ await self.start()
146
+
147
+ request_id = str(uuid4())
148
+ event = CloudEvent.create(
149
+ source="/user/sdk",
150
+ type="node.request",
151
+ data={"task": task},
152
+ subject=target
153
+ )
154
+ event.id = request_id
155
+
156
+ # Subscribe to response
157
+ response_future = asyncio.Future()
158
+
159
+ async def handle_response(event: CloudEvent):
160
+ if event.data and event.data.get("request_id") == request_id:
161
+ if not response_future.done():
162
+ if event.type == "node.error":
163
+ response_future.set_exception(Exception(event.data.get("error", "Unknown Error")))
164
+ else:
165
+ response_future.set_result(event.data.get("result"))
166
+
167
+ target_topic = f"node.response/{target.strip('/')}"
168
+
169
+ # We need to subscribe to the response
170
+ await self.bus.subscribe(target_topic, handle_response)
171
+
172
+ try:
173
+ await self.dispatcher.dispatch(event)
174
+
175
+ # Use timeout from event if set (injected by interceptor)
176
+ timeout = 30.0
177
+ if event.extensions and "timeout" in event.extensions:
178
+ try:
179
+ timeout = float(event.extensions["timeout"])
180
+ except (ValueError, TypeError):
181
+ pass
182
+
183
+ return await asyncio.wait_for(response_future, timeout=timeout)
184
+ except asyncio.TimeoutError:
185
+ raise TimeoutError(f"Task targeting {target} timed out after {timeout}s")
186
+
187
+ def on(self, event_type: str, handler: Callable[[CloudEvent], Any]):
188
+ """
189
+ Add an observability hook.
190
+ """
191
+ async def _wrapper(event: CloudEvent):
192
+ if event_type == "*" or event.type == event_type:
193
+ res = handler(event)
194
+ if asyncio.iscoroutine(res):
195
+ await res
196
+
197
+ # We subscribe to the bus
198
+ # This requires an async context to call 'await bus.subscribe'.
199
+ # We can schedule it.
200
+ asyncio.create_task(self.bus.subscribe(f"{event_type}/*" if event_type != "*" else "*", _wrapper))
201
+
@@ -0,0 +1,3 @@
1
+ """
2
+ Built-in standard implementations for Loom components.
3
+ """
@@ -0,0 +1,3 @@
1
+ """
2
+ Built-in memory implementations (Metabolic Memory components).
3
+ """
@@ -0,0 +1,96 @@
1
+ """
2
+ Metabolic Memory Core Implementation.
3
+ """
4
+
5
+ import asyncio
6
+ from typing import Any, Dict, List, Optional
7
+ from loom.interfaces.memory import MemoryInterface, MemoryEntry
8
+ from loom.protocol.memory_operations import MemoryValidator, ProjectStateObject, ContextSanitizer
9
+ from loom.builtin.memory.validators import HeuristicValueAssessor
10
+ from loom.builtin.memory.pso import SimplePSO
11
+ from loom.builtin.memory.sanitizers import CompressiveSanitizer
12
+
13
+ class MetabolicMemory(MemoryInterface):
14
+ """
15
+ Advanced Memory System that 'metabolizes' information.
16
+ 1. Perceives (Validates importances)
17
+ 2. Maintains State (PSO)
18
+ 3. Consolidates (Compresses/Sanitizes)
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ validator: Optional[MemoryValidator] = None,
24
+ pso: Optional[ProjectStateObject] = None,
25
+ sanitizer: Optional[ContextSanitizer] = None
26
+ ):
27
+ self.validator = validator or HeuristicValueAssessor()
28
+ self.pso = pso or SimplePSO()
29
+ self.sanitizer = sanitizer or CompressiveSanitizer()
30
+
31
+ self.short_term: List[MemoryEntry] = []
32
+ self.limit = 10 # Short term limit before consolidation triggers
33
+
34
+ async def add(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
35
+ """
36
+ Add entry with metabolic processing.
37
+ """
38
+ # 1. Perception / Validation
39
+ importance = await self.validator.validate(content)
40
+
41
+ # 2. Add to Short Term
42
+ entry = MemoryEntry(
43
+ role=role,
44
+ content=content,
45
+ metadata={**(metadata or {}), "importance": importance, "tier": "ephemeral"}
46
+ )
47
+ self.short_term.append(entry)
48
+
49
+ # 3. Trigger Metabolism (Consolidation) if limit reached
50
+ if len(self.short_term) > self.limit:
51
+ await self.consolidate()
52
+
53
+ async def get_context(self, task: str = "") -> str:
54
+ """
55
+ Construct context from PSO + Short Term.
56
+ """
57
+ pso_context = self.pso.to_markdown()
58
+
59
+ # Get high importance short term or just recent
60
+ recent_context = "\n".join([f"{e.role}: {e.content}" for e in self.short_term])
61
+
62
+ return f"{pso_context}\n\n### Recent Activity\n{recent_context}"
63
+
64
+ async def get_recent(self, limit: int = 10) -> List[Dict[str, Any]]:
65
+ return [e.model_dump() for e in self.short_term[-limit:]]
66
+
67
+ async def clear(self) -> None:
68
+ self.short_term = []
69
+ # PSO persists? Or clears? Usually PSO persists for the project lifetime.
70
+ # But for 'clear memory' command, maybe we reset session.
71
+ pass
72
+
73
+ async def consolidate(self) -> None:
74
+ """
75
+ Metabolic Cycle:
76
+ 1. Update PSO with recent events.
77
+ 2. Compress short_term -> long_term (not impl here) or just drop low value.
78
+ 3. Keep only high value in short_term?
79
+ """
80
+ # 1. Update PSO
81
+ # Convert entries to dicts for PSO
82
+ events = [e.model_dump() for e in self.short_term]
83
+ await self.pso.update(events)
84
+
85
+ # 2. Prune Short Term
86
+ # Keep only last N/2, or keep high score?
87
+ # Simple FIFO for now, but in real metabolic, we'd keep high importance ones active longer.
88
+ # Let's keep last 5.
89
+
90
+ # If we wanted to "Santize" / Compress:
91
+ # text_block = ...
92
+ # compressed = await self.sanitizer.sanitize(text_block)
93
+ # We might move compressed summary to a 'middle term' tier.
94
+
95
+ keep_count = self.limit // 2
96
+ self.short_term = self.short_term[-keep_count:]
@@ -0,0 +1,41 @@
1
+ """
2
+ Project State Object (PSO) Implementation.
3
+ """
4
+
5
+ from typing import Any, Dict, List
6
+ from loom.protocol.memory_operations import ProjectStateObject
7
+
8
+ class SimplePSO(ProjectStateObject):
9
+ def __init__(self):
10
+ self.state: Dict[str, Any] = {
11
+ "goals": [],
12
+ "completed_tasks": [],
13
+ "current_context": "",
14
+ "variables": {}
15
+ }
16
+
17
+ async def update(self, events: List[Dict[str, Any]]) -> None:
18
+ """
19
+ Update state based on heuristic interpretation of events.
20
+ """
21
+ for event in events:
22
+ role = event.get("role")
23
+ content = event.get("content", "")
24
+
25
+ if role == "user" and ("task" in content.lower() or "goal" in content.lower()):
26
+ self.state["goals"].append(content)
27
+ elif role == "tool" and "result" in str(content).lower():
28
+ self.state["completed_tasks"].append(str(content)[:100])
29
+
30
+ async def snapshot(self) -> Dict[str, Any]:
31
+ return self.state.copy()
32
+
33
+ def to_markdown(self) -> str:
34
+ md = "## Project State\n"
35
+ md += "### Goals\n"
36
+ for g in self.state["goals"]:
37
+ md += f"- [ ] {g}\n"
38
+ md += "\n### Completed\n"
39
+ for t in self.state["completed_tasks"]:
40
+ md += f"- [x] {t}\n"
41
+ return md
@@ -0,0 +1,39 @@
1
+ """
2
+ Context Sanitizers Implementation.
3
+ """
4
+
5
+ from loom.protocol.memory_operations import ContextSanitizer
6
+
7
+ class BubbleUpSanitizer(ContextSanitizer):
8
+ """
9
+ Sanitizes child context for parent consumption.
10
+ Extracts high-level signals.
11
+ """
12
+ async def sanitize(self, context: str, target_token_limit: int) -> str:
13
+ # 1. Identify "Goal"
14
+ # 2. Identify "Result"
15
+ # 3. Identify "Blockers"
16
+
17
+ # Simple string processing for prototype
18
+ lines = context.split('\n')
19
+ important_lines = [line for line in lines if "Result:" in line or "Error:" in line or "Goal:" in line]
20
+
21
+ result = "\n".join(important_lines)
22
+ if len(result) > target_token_limit * 4: # rough char approx
23
+ return result[:target_token_limit * 4] + "..."
24
+ return result
25
+
26
+ class CompressiveSanitizer(ContextSanitizer):
27
+ """
28
+ Compresses older conversation turns.
29
+ """
30
+ async def sanitize(self, context: str, target_token_limit: int) -> str:
31
+ # In a real impl, calls LLM to summarize.
32
+ # Here we just truncate the middle.
33
+
34
+ if len(context) < target_token_limit * 4:
35
+ return context
36
+
37
+ head = context[:(target_token_limit * 2)]
38
+ tail = context[-(target_token_limit * 2):]
39
+ return f"{head}\n... [Compressed {len(context) - len(head) - len(tail)} chars] ...\n{tail}"
@@ -0,0 +1,55 @@
1
+ """
2
+ Memory Validators Implementation.
3
+ """
4
+
5
+ from typing import Any
6
+ from loom.protocol.memory_operations import MemoryValidator
7
+
8
+ # Assumed LLM interface access - in real implementation, this would be injected
9
+ # For now we'll accept an LLMProvider in init
10
+
11
+ class HeuristicValueAssessor(MemoryValidator):
12
+ def __init__(self, key_terms: list[str] = None):
13
+ self.key_terms = key_terms or ["goal", "error", "result", "important", "decision"]
14
+
15
+ async def validate(self, content: Any) -> float:
16
+ """
17
+ Simple heuristic: active if contains key terms or is short and punchy.
18
+ """
19
+ text = str(content).lower()
20
+ score = 0.0
21
+
22
+ # Length bias: too short might be noise, too long might be noise
23
+ if 10 < len(text) < 500:
24
+ score += 0.3
25
+
26
+ # Term bias
27
+ for term in self.key_terms:
28
+ if term in text:
29
+ score += 0.2
30
+
31
+ return min(1.0, score)
32
+
33
+ class LLMValueAssessor(MemoryValidator):
34
+ def __init__(self, llm_provider: Any): # using Any to avoid circ dep for now
35
+ self.llm = llm_provider
36
+
37
+ async def validate(self, content: Any) -> float:
38
+ """
39
+ Ask LLM to score the importance.
40
+ """
41
+ prompt = f"""
42
+ Rate the importance of the following memory entry for a long-term project on a scale of 0.0 to 1.0.
43
+ Return ONLY the number.
44
+
45
+ Entry:
46
+ {content}
47
+ """
48
+ try:
49
+ # Assumed simple LLM call not needing message structure for simplicity in this prototype
50
+ # But in reality, we'd use the proper Chat interface
51
+ # response = await self.llm.chat([{"role": "user", "content": prompt}])
52
+ # For now, let's mock/assume simple wrapper or return a high default
53
+ return 0.8 # Placeholder for actual LLM call
54
+ except:
55
+ return 0.5