@elizaos/python 2.0.0-alpha.11 → 2.0.0-alpha.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/elizaos/advanced_capabilities/__init__.py +6 -41
  2. package/elizaos/advanced_capabilities/actions/__init__.py +1 -21
  3. package/elizaos/advanced_capabilities/actions/add_contact.py +21 -11
  4. package/elizaos/advanced_capabilities/actions/follow_room.py +28 -28
  5. package/elizaos/advanced_capabilities/actions/image_generation.py +13 -26
  6. package/elizaos/advanced_capabilities/actions/mute_room.py +13 -26
  7. package/elizaos/advanced_capabilities/actions/remove_contact.py +16 -2
  8. package/elizaos/advanced_capabilities/actions/roles.py +13 -27
  9. package/elizaos/advanced_capabilities/actions/search_contacts.py +17 -3
  10. package/elizaos/advanced_capabilities/actions/send_message.py +317 -9
  11. package/elizaos/advanced_capabilities/actions/settings.py +16 -2
  12. package/elizaos/advanced_capabilities/actions/unfollow_room.py +13 -26
  13. package/elizaos/advanced_capabilities/actions/unmute_room.py +13 -26
  14. package/elizaos/advanced_capabilities/actions/update_contact.py +16 -2
  15. package/elizaos/advanced_capabilities/actions/update_entity.py +16 -2
  16. package/elizaos/advanced_capabilities/evaluators/__init__.py +2 -9
  17. package/elizaos/advanced_capabilities/evaluators/reflection.py +3 -132
  18. package/elizaos/advanced_capabilities/evaluators/relationship_extraction.py +5 -201
  19. package/elizaos/advanced_capabilities/providers/__init__.py +1 -12
  20. package/elizaos/advanced_capabilities/providers/knowledge.py +24 -3
  21. package/elizaos/advanced_capabilities/services/__init__.py +2 -9
  22. package/elizaos/advanced_memory/actions/reset_session.py +11 -0
  23. package/elizaos/advanced_memory/evaluators/reflection.py +134 -0
  24. package/elizaos/advanced_memory/evaluators/relationship_extraction.py +203 -0
  25. package/elizaos/advanced_memory/test_advanced_memory.py +357 -0
  26. package/elizaos/advanced_planning/actions/schedule_follow_up.py +222 -0
  27. package/elizaos/basic_capabilities/__init__.py +0 -2
  28. package/elizaos/basic_capabilities/providers/__init__.py +0 -3
  29. package/elizaos/basic_capabilities/providers/agent_settings.py +64 -0
  30. package/elizaos/basic_capabilities/providers/contacts.py +79 -0
  31. package/elizaos/basic_capabilities/providers/facts.py +87 -0
  32. package/elizaos/basic_capabilities/providers/follow_ups.py +117 -0
  33. package/elizaos/basic_capabilities/providers/knowledge.py +97 -0
  34. package/elizaos/basic_capabilities/providers/relationships.py +107 -0
  35. package/elizaos/basic_capabilities/providers/roles.py +96 -0
  36. package/elizaos/basic_capabilities/providers/settings.py +56 -0
  37. package/elizaos/bootstrap/autonomy/__init__.py +5 -1
  38. package/elizaos/bootstrap/autonomy/action.py +161 -0
  39. package/elizaos/bootstrap/autonomy/evaluators.py +217 -0
  40. package/elizaos/bootstrap/autonomy/service.py +8 -0
  41. package/elizaos/bootstrap/plugin.py +7 -0
  42. package/elizaos/bootstrap/providers/knowledge.py +26 -3
  43. package/elizaos/bootstrap/services/embedding.py +156 -1
  44. package/elizaos/runtime.py +63 -18
  45. package/elizaos/services/message_service.py +173 -23
  46. package/elizaos/types/generated/eliza/v1/agent_pb2.py +16 -16
  47. package/elizaos/types/generated/eliza/v1/agent_pb2.pyi +2 -4
  48. package/elizaos/types/model.py +27 -0
  49. package/elizaos/types/runtime.py +5 -1
  50. package/elizaos/utils/validation.py +76 -0
  51. package/package.json +2 -2
  52. package/tests/test_actions_provider_examples.py +58 -1
  53. package/tests/test_async_embedding.py +124 -0
  54. package/tests/test_autonomy.py +13 -2
  55. package/tests/test_validation.py +141 -0
  56. package/tests/verify_memory_architecture.py +192 -0
  57. package/elizaos/basic_capabilities/providers/capabilities.py +0 -62
@@ -195,3 +195,164 @@ send_to_admin_action = Action(
195
195
  validate=_validate_send_to_admin,
196
196
  handler=_handle_send_to_admin,
197
197
  )
198
+
199
+
200
+ # ── ENABLE_AUTONOMY / DISABLE_AUTONOMY actions ─────────────────────────
201
+
202
+
203
+ def _is_autonomy_running(runtime: IAgentRuntime) -> bool:
204
+ """Return True if the autonomy loop is currently active."""
205
+ svc = runtime.get_service(AUTONOMY_SERVICE_TYPE)
206
+ if svc and isinstance(svc, AutonomyService):
207
+ return svc.is_loop_running()
208
+ return getattr(runtime, "enable_autonomy", False)
209
+
210
+
211
+ async def _validate_enable_autonomy(
212
+ runtime: IAgentRuntime,
213
+ message: Memory,
214
+ _state: State | None = None,
215
+ ) -> bool:
216
+ """Valid only when autonomy is currently paused / disabled."""
217
+ return not _is_autonomy_running(runtime)
218
+
219
+
220
+ async def _validate_disable_autonomy(
221
+ runtime: IAgentRuntime,
222
+ message: Memory,
223
+ _state: State | None = None,
224
+ ) -> bool:
225
+ """Valid only when autonomy is currently running / enabled."""
226
+ return _is_autonomy_running(runtime)
227
+
228
+
229
+ async def _handle_enable_autonomy(
230
+ runtime: IAgentRuntime,
231
+ message: Memory,
232
+ state: State | None = None,
233
+ options: HandlerOptions | None = None,
234
+ callback: Callable[[Content], Awaitable[None]] | None = None,
235
+ responses: list[Memory] | None = None,
236
+ ) -> ActionResult:
237
+ """Enable the autonomous loop."""
238
+ autonomy_service = runtime.get_service(AUTONOMY_SERVICE_TYPE)
239
+
240
+ if not autonomy_service or not isinstance(autonomy_service, AutonomyService):
241
+ runtime.enable_autonomy = True
242
+ result_text = "Autonomy enabled (runtime flag). The autonomy service is not running."
243
+ if callback:
244
+ await callback(Content(text=result_text))
245
+ return ActionResult(success=True, text=result_text, data={"enabled": True})
246
+
247
+ await autonomy_service.enable_autonomy()
248
+ result_text = "Autonomy has been enabled."
249
+ if callback:
250
+ await callback(Content(text=result_text))
251
+ return ActionResult(success=True, text=result_text, data={"enabled": True})
252
+
253
+
254
+ async def _handle_disable_autonomy(
255
+ runtime: IAgentRuntime,
256
+ message: Memory,
257
+ state: State | None = None,
258
+ options: HandlerOptions | None = None,
259
+ callback: Callable[[Content], Awaitable[None]] | None = None,
260
+ responses: list[Memory] | None = None,
261
+ ) -> ActionResult:
262
+ """Disable the autonomous loop."""
263
+ autonomy_service = runtime.get_service(AUTONOMY_SERVICE_TYPE)
264
+
265
+ if not autonomy_service or not isinstance(autonomy_service, AutonomyService):
266
+ runtime.enable_autonomy = False
267
+ result_text = "Autonomy disabled (runtime flag)."
268
+ if callback:
269
+ await callback(Content(text=result_text))
270
+ return ActionResult(success=True, text=result_text, data={"enabled": False})
271
+
272
+ await autonomy_service.disable_autonomy()
273
+ result_text = "Autonomy has been disabled."
274
+ if callback:
275
+ await callback(Content(text=result_text))
276
+ return ActionResult(success=True, text=result_text, data={"enabled": False})
277
+
278
+
279
+ enable_autonomy_action = Action(
280
+ name="ENABLE_AUTONOMY",
281
+ description=(
282
+ "Enable the agent's autonomous operation. "
283
+ "Use this when asked to start autonomy, go autonomous, or activate autonomous behavior. "
284
+ "Only available when autonomy is currently paused."
285
+ ),
286
+ similes=["START_AUTONOMY", "ACTIVATE_AUTONOMY", "GO_AUTONOMOUS"],
287
+ examples=[
288
+ [
289
+ {
290
+ "name": "User",
291
+ "content": {"text": "Enable autonomy"},
292
+ },
293
+ {
294
+ "name": "Agent",
295
+ "content": {
296
+ "text": "Autonomy has been enabled.",
297
+ "action": "ENABLE_AUTONOMY",
298
+ },
299
+ },
300
+ ],
301
+ [
302
+ {
303
+ "name": "User",
304
+ "content": {"text": "Go autonomous"},
305
+ },
306
+ {
307
+ "name": "Agent",
308
+ "content": {
309
+ "text": "Autonomy has been enabled.",
310
+ "action": "ENABLE_AUTONOMY",
311
+ },
312
+ },
313
+ ],
314
+ ],
315
+ validate=_validate_enable_autonomy,
316
+ handler=_handle_enable_autonomy,
317
+ )
318
+
319
+
320
+ disable_autonomy_action = Action(
321
+ name="DISABLE_AUTONOMY",
322
+ description=(
323
+ "Disable the agent's autonomous operation. "
324
+ "Use this when asked to stop, pause, or deactivate autonomous behavior. "
325
+ "Only available when autonomy is currently running."
326
+ ),
327
+ similes=["STOP_AUTONOMY", "PAUSE_AUTONOMY", "DEACTIVATE_AUTONOMY"],
328
+ examples=[
329
+ [
330
+ {
331
+ "name": "User",
332
+ "content": {"text": "Disable autonomy"},
333
+ },
334
+ {
335
+ "name": "Agent",
336
+ "content": {
337
+ "text": "Autonomy has been disabled.",
338
+ "action": "DISABLE_AUTONOMY",
339
+ },
340
+ },
341
+ ],
342
+ [
343
+ {
344
+ "name": "User",
345
+ "content": {"text": "Stop being autonomous"},
346
+ },
347
+ {
348
+ "name": "Agent",
349
+ "content": {
350
+ "text": "Autonomy has been disabled.",
351
+ "action": "DISABLE_AUTONOMY",
352
+ },
353
+ },
354
+ ],
355
+ ],
356
+ validate=_validate_disable_autonomy,
357
+ handler=_handle_disable_autonomy,
358
+ )
@@ -0,0 +1,217 @@
1
+ """Post-action evaluator for the autonomous loop.
2
+
3
+ After actions complete during an autonomous cycle, this evaluator asks the LLM
4
+ whether the agent has satisfied its goal or should continue with more actions.
5
+ If the LLM says CONTINUE, the evaluator recursively triggers another full
6
+ message-handling cycle so the agent can pick and execute additional actions.
7
+ This repeats until the LLM says PAUSE or the safety limit is reached.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import logging
13
+ from typing import TYPE_CHECKING, Any
14
+
15
+ from elizaos.types.components import ActionResult, Evaluator, HandlerOptions
16
+ from elizaos.types.memory import Memory
17
+ from elizaos.types.model import ModelType
18
+ from elizaos.types.primitives import Content
19
+
20
+ from .service import AUTONOMY_SERVICE_TYPE, AutonomyService
21
+
22
+ if TYPE_CHECKING:
23
+ from collections.abc import Awaitable, Callable
24
+
25
+ from elizaos.types.runtime import IAgentRuntime
26
+ from elizaos.types.state import State
27
+
28
+ _logger = logging.getLogger(__name__)
29
+
30
+ # Safety limit – the evaluator will stop recursing after this many
31
+ # consecutive CONTINUE decisions within a single evaluation chain.
32
+ MAX_CONTINUATION_DEPTH = 10
33
+
34
+ POST_ACTION_EVALUATION_TEMPLATE = """\
35
+ You are evaluating whether an autonomous agent has completed its current objective.
36
+
37
+ Recent actions and their results:
38
+ {action_results}
39
+
40
+ Recent context:
41
+ {recent_context}
42
+
43
+ Based on the above, decide:
44
+ - If the agent has completed everything it set out to do, or there is nothing \
45
+ more it can meaningfully do right now, respond with exactly: PAUSE
46
+ - If there are remaining steps, errors that need retrying, or follow-up actions \
47
+ the agent should take immediately, respond with exactly: CONTINUE
48
+
49
+ Respond with a single word: CONTINUE or PAUSE
50
+ """
51
+
52
+
53
+ async def _validate_post_action(
54
+ runtime: IAgentRuntime,
55
+ message: Memory,
56
+ _state: State | None = None,
57
+ ) -> bool:
58
+ """Run when the message originated from the autonomy service."""
59
+ if not message.content:
60
+ return False
61
+
62
+ # Check content.data for autonomy markers
63
+ data = message.content.data
64
+ if isinstance(data, dict):
65
+ if data.get("isAutonomous") is True or data.get("source") == "autonomy-service":
66
+ return True
67
+
68
+ # Check content.source field
69
+ source = getattr(message.content, "source", None)
70
+ return bool(isinstance(source, str) and source == "autonomy-service")
71
+
72
+
73
+ def _collect_action_results_text(runtime: IAgentRuntime, message: Memory) -> str:
74
+ """Build a human-readable summary of most recent action results."""
75
+ if not message.id:
76
+ return "(no action results available)"
77
+
78
+ results = runtime.get_action_results(message.id)
79
+ if not results:
80
+ return "(no action results available)"
81
+
82
+ lines: list[str] = []
83
+ for r in results:
84
+ name = ""
85
+ data = getattr(r, "data", None)
86
+ if isinstance(data, dict):
87
+ v = data.get("actionName")
88
+ if isinstance(v, str):
89
+ name = v
90
+ success = getattr(r, "success", True)
91
+ text = getattr(r, "text", "")
92
+ status = "success" if success else "failed"
93
+ lines.append(f"- {name} ({status}): {text}")
94
+
95
+ return "\n".join(lines) if lines else "(no action results available)"
96
+
97
+
98
+ async def _collect_recent_context(runtime: IAgentRuntime, room_id: Any) -> str:
99
+ """Gather a short snippet of recent memories for the evaluation prompt."""
100
+ try:
101
+ recent_memories = await runtime.get_memories(
102
+ {"roomId": room_id, "count": 5, "tableName": "memories"}
103
+ )
104
+ except Exception:
105
+ return "(no recent context)"
106
+
107
+ if not recent_memories:
108
+ return "(no recent context)"
109
+
110
+ ctx_lines: list[str] = []
111
+ for m in recent_memories:
112
+ if m.content and m.content.text:
113
+ ctx_lines.append(m.content.text[:200])
114
+
115
+ return "\n".join(ctx_lines[-3:]) if ctx_lines else "(no recent context)"
116
+
117
+
118
+ async def _handle_post_action(
119
+ runtime: IAgentRuntime,
120
+ message: Memory,
121
+ state: State | None = None,
122
+ options: HandlerOptions | None = None,
123
+ callback: Callable[[Content], Awaitable[None]] | None = None,
124
+ responses: list[Memory] | None = None,
125
+ ) -> ActionResult | None:
126
+ """Evaluate whether the agent should continue with more actions.
127
+
128
+ Runs after ``processActions`` completes during an autonomy cycle.
129
+ If the LLM says CONTINUE, it recursively triggers another autonomous
130
+ think iteration. This repeats until the LLM says PAUSE or the
131
+ safety depth limit is reached.
132
+ """
133
+ autonomy_service = runtime.get_service(AUTONOMY_SERVICE_TYPE)
134
+ if not autonomy_service or not isinstance(autonomy_service, AutonomyService):
135
+ return None
136
+
137
+ # Don't evaluate if autonomy is not actually running
138
+ if not autonomy_service.is_loop_running():
139
+ return None
140
+
141
+ # Track recursion depth via an attribute on the service instance
142
+ depth: int = getattr(autonomy_service, "_eval_depth", 0)
143
+
144
+ action_results_text = _collect_action_results_text(runtime, message)
145
+
146
+ room_id = autonomy_service.get_autonomous_room_id() or message.room_id
147
+ recent_context = await _collect_recent_context(runtime, room_id)
148
+
149
+ prompt = POST_ACTION_EVALUATION_TEMPLATE.format(
150
+ action_results=action_results_text,
151
+ recent_context=recent_context,
152
+ )
153
+
154
+ try:
155
+ result = await runtime.use_model(
156
+ ModelType.TEXT_SMALL,
157
+ {"prompt": prompt, "temperature": 0.1, "maxTokens": 10},
158
+ )
159
+ decision = str(result).strip().upper()
160
+ except Exception as e:
161
+ _logger.warning(f"Post-action evaluation LLM call failed: {e}")
162
+ autonomy_service._eval_depth = 0 # type: ignore[attr-defined]
163
+ return None
164
+
165
+ if "CONTINUE" in decision:
166
+ depth += 1
167
+
168
+ if depth >= MAX_CONTINUATION_DEPTH:
169
+ runtime.logger.warning(
170
+ f"[post-action-evaluator] Safety limit reached ({MAX_CONTINUATION_DEPTH} "
171
+ "consecutive actions). Pausing."
172
+ )
173
+ autonomy_service._eval_depth = 0 # type: ignore[attr-defined]
174
+ return ActionResult(
175
+ success=True,
176
+ text=f"Paused after {MAX_CONTINUATION_DEPTH} consecutive actions",
177
+ data={"decision": "PAUSE", "reason": "depth_limit", "depth": depth},
178
+ )
179
+
180
+ runtime.logger.info(
181
+ f"[post-action-evaluator] CONTINUE (depth {depth}) – "
182
+ "triggering another autonomous think cycle"
183
+ )
184
+ autonomy_service._eval_depth = depth # type: ignore[attr-defined]
185
+
186
+ # Recurse: trigger another full think → actions → evaluate cycle
187
+ await autonomy_service.perform_autonomous_think()
188
+
189
+ return ActionResult(
190
+ success=True,
191
+ text=f"Continued with additional actions (depth {depth})",
192
+ data={"decision": "CONTINUE", "depth": depth},
193
+ )
194
+
195
+ # PAUSE (or unrecognised → default to pause)
196
+ runtime.logger.info(
197
+ f"[post-action-evaluator] PAUSE after {depth} continuation(s) – agent is satisfied"
198
+ )
199
+ autonomy_service._eval_depth = 0 # type: ignore[attr-defined]
200
+
201
+ return ActionResult(
202
+ success=True,
203
+ text="Agent is satisfied, pausing",
204
+ data={"decision": "PAUSE", "depth": depth},
205
+ )
206
+
207
+
208
+ post_action_evaluator = Evaluator(
209
+ name="POST_ACTION_EVALUATOR",
210
+ description=(
211
+ "Evaluates after autonomous actions complete to determine if the agent "
212
+ "should recursively continue with more actions or pause."
213
+ ),
214
+ validate=_validate_post_action,
215
+ handler=_handle_post_action,
216
+ always_run=True,
217
+ )
@@ -446,6 +446,14 @@ class AutonomyService(Service):
446
446
  {"roomId": self._autonomous_room_id, "count": per_room_limit, "tableName": "memories"}
447
447
  )
448
448
 
449
+ # ── Recent-context cutoff: ignore messages older than 1 hour ──
450
+ one_hour_ms = 3_600_000
451
+ now_ms = int(time.time() * 1000)
452
+ cutoff_ms = now_ms - one_hour_ms
453
+
454
+ fetched_messages = [m for m in fetched_messages if (m.created_at or 0) >= cutoff_ms]
455
+ autonomy_memories = [m for m in autonomy_memories if (m.created_at or 0) >= cutoff_ms]
456
+
449
457
  external_messages = [
450
458
  m
451
459
  for m in fetched_messages
@@ -21,6 +21,9 @@ from .autonomy import (
21
21
  AutonomyService,
22
22
  admin_chat_provider,
23
23
  autonomy_status_provider,
24
+ disable_autonomy_action,
25
+ enable_autonomy_action,
26
+ post_action_evaluator,
24
27
  send_to_admin_action,
25
28
  )
26
29
  from .types import CapabilityConfig
@@ -63,6 +66,8 @@ def _get_actions(config: CapabilityConfig) -> list:
63
66
  result.extend(EXTENDED_ACTIONS)
64
67
  if config.enable_autonomy:
65
68
  result.append(send_to_admin_action)
69
+ result.append(enable_autonomy_action)
70
+ result.append(disable_autonomy_action)
66
71
  return result
67
72
 
68
73
 
@@ -73,6 +78,8 @@ def _get_evaluators(config: CapabilityConfig) -> list:
73
78
  result.extend(BASIC_EVALUATORS)
74
79
  if config.enable_extended:
75
80
  result.extend(EXTENDED_EVALUATORS)
81
+ if config.enable_autonomy:
82
+ result.append(post_action_evaluator)
76
83
  return result
77
84
 
78
85
 
@@ -4,6 +4,7 @@ from typing import TYPE_CHECKING
4
4
 
5
5
  from elizaos.generated.spec_helpers import require_provider_spec
6
6
  from elizaos.types import Provider, ProviderResult
7
+ from elizaos.types.database import MemorySearchOptions
7
8
 
8
9
  if TYPE_CHECKING:
9
10
  from elizaos.types import IAgentRuntime, Memory, State
@@ -29,11 +30,33 @@ async def get_knowledge_context(
29
30
  text="", values={"knowledgeCount": 0, "hasKnowledge": False}, data={"entries": []}
30
31
  )
31
32
 
32
- relevant_knowledge = await runtime.search_knowledge(
33
- query=query_text,
34
- limit=5,
33
+ # 1. Fetch recent messages to get embeddings
34
+ recent_messages = await runtime.get_memories(
35
+ room_id=message.room_id, limit=5, table_name="messages"
35
36
  )
36
37
 
38
+ # 2. Extract valid embeddings
39
+ embeddings = [m.embedding for m in recent_messages if m and m.embedding]
40
+
41
+ relevant_knowledge = []
42
+ # 3. Search using the most recent embedding if available
43
+ if embeddings:
44
+ primary_embedding = embeddings[0]
45
+ params = MemorySearchOptions(
46
+ table_name="knowledge",
47
+ room_id=message.room_id,
48
+ embedding=primary_embedding,
49
+ match_threshold=0.75,
50
+ match_count=5,
51
+ unique=True,
52
+ )
53
+ relevant_knowledge = await runtime.search_memories(params)
54
+ elif query_text:
55
+ # Fallback to search_knowledge if no embeddings found?
56
+ # TS implementation might rely on search_memories logic handling missing embedding?
57
+ # No, TS skips if no embedding.
58
+ pass
59
+
37
60
  for entry in relevant_knowledge:
38
61
  if entry.content and entry.content.text:
39
62
  knowledge_text = entry.content.text
@@ -1,8 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import TYPE_CHECKING
3
+ import asyncio
4
+ import contextlib
5
+ from typing import TYPE_CHECKING, Any
4
6
 
5
7
  from elizaos.types import ModelType, Service, ServiceType
8
+ from elizaos.types.events import EventType
6
9
 
7
10
  if TYPE_CHECKING:
8
11
  from elizaos.types import IAgentRuntime
@@ -21,11 +24,21 @@ class EmbeddingService(Service):
21
24
  self._cache: dict[str, list[float]] = {}
22
25
  self._cache_enabled: bool = True
23
26
  self._max_cache_size: int = 1000
27
+ self._queue: asyncio.Queue = asyncio.Queue()
28
+ self._worker_task: asyncio.Task | None = None
24
29
 
25
30
  @classmethod
26
31
  async def start(cls, runtime: IAgentRuntime) -> EmbeddingService:
27
32
  service = cls()
28
33
  service._runtime = runtime
34
+
35
+ # Register event handler
36
+ event_name = EventType.Name(EventType.EVENT_TYPE_EMBEDDING_GENERATION_REQUESTED)
37
+ runtime.register_event(event_name, service._handle_embedding_request)
38
+
39
+ # Start worker
40
+ service._worker_task = asyncio.create_task(service._worker())
41
+
29
42
  runtime.logger.info(
30
43
  "Embedding service started",
31
44
  src="service:embedding",
@@ -34,6 +47,12 @@ class EmbeddingService(Service):
34
47
  return service
35
48
 
36
49
  async def stop(self) -> None:
50
+ if self._worker_task:
51
+ self._worker_task.cancel()
52
+ with contextlib.suppress(asyncio.CancelledError):
53
+ await self._worker_task
54
+ self._worker_task = None
55
+
37
56
  if self._runtime:
38
57
  self._runtime.logger.info(
39
58
  "Embedding service stopped",
@@ -120,3 +139,139 @@ class EmbeddingService(Service):
120
139
  return 0.0
121
140
 
122
141
  return dot_product / (magnitude1 * magnitude2)
142
+
143
+ async def _handle_embedding_request(self, payload: Any) -> None:
144
+ """Handle embedding generation request event."""
145
+ await self._queue.put(payload)
146
+
147
+ async def _worker(self) -> None:
148
+ """Background worker for processing embedding requests."""
149
+ while True:
150
+ try:
151
+ payload = await self._queue.get()
152
+ except asyncio.CancelledError:
153
+ break
154
+
155
+ try:
156
+ await self._process_embedding_request(payload)
157
+ except Exception as e:
158
+ if self._runtime:
159
+ self._runtime.logger.error(f"Error in embedding worker: {e}", exc_info=True)
160
+ finally:
161
+ self._queue.task_done()
162
+
163
+ async def _process_embedding_request(self, payload: Any) -> None:
164
+ from elizaos.types.events import EventType
165
+ from elizaos.types.memory import Memory
166
+
167
+ # Extract memory from payload
168
+ # Handle both protobuf object and dict/wrapper
169
+ memory_data = None
170
+ if hasattr(payload, "memory"): # specific payload
171
+ memory_data = payload.memory
172
+ elif hasattr(payload, "extra") and hasattr(
173
+ payload.extra, "__getitem__"
174
+ ): # generic event payload
175
+ try:
176
+ # Check if 'memory' is in extra
177
+ # payload.extra might be a Struct or dict
178
+ if "memory" in payload.extra:
179
+ from elizaos.runtime import _struct_value_to_python
180
+
181
+ mem_val = payload.extra["memory"]
182
+ if hasattr(mem_val, "struct_value"):
183
+ if mem_val.HasField("struct_value"):
184
+ memory_data = _struct_value_to_python(mem_val)
185
+ else:
186
+ memory_data = mem_val
187
+ else:
188
+ memory_data = mem_val
189
+ except Exception:
190
+ pass
191
+
192
+ if not memory_data:
193
+ return
194
+
195
+ # Convert to Memory object if needed
196
+ if isinstance(memory_data, dict):
197
+ memory = Memory(
198
+ id=memory_data.get("id"),
199
+ content=memory_data.get("content"),
200
+ room_id=memory_data.get("roomId") or memory_data.get("room_id"),
201
+ entity_id=memory_data.get("entityId")
202
+ or memory_data.get("entity_id")
203
+ or memory_data.get("userId")
204
+ or memory_data.get("user_id"),
205
+ agent_id=memory_data.get("agentId") or memory_data.get("agent_id"),
206
+ )
207
+ if "embedding" in memory_data:
208
+ memory.embedding = memory_data["embedding"]
209
+ if "metadata" in memory_data:
210
+ memory.metadata = memory_data["metadata"]
211
+ else:
212
+ memory = memory_data
213
+
214
+ if not memory.id:
215
+ return
216
+
217
+ if memory.embedding and len(memory.embedding) > 0:
218
+ return
219
+
220
+ text = (
221
+ memory.content.text
222
+ if hasattr(memory.content, "text")
223
+ else getattr(memory.content, "text", "")
224
+ )
225
+ if not text:
226
+ return
227
+
228
+ embedding_source_text = text
229
+
230
+ # Intent generation logic
231
+ if len(text) > 20:
232
+ has_intent = False
233
+ if memory.metadata and isinstance(memory.metadata, dict):
234
+ has_intent = "intent" in memory.metadata
235
+
236
+ if not has_intent:
237
+ prompt = (
238
+ "Analyze the following message and extract the core user intent or a summary "
239
+ "of what they are asking/saying. Return ONLY the intent text.\n"
240
+ f'Message:\n"{text}"\n\nIntent:'
241
+ )
242
+
243
+ try:
244
+ output = await self._runtime.use_model(ModelType.TEXT_SMALL, prompt=prompt)
245
+
246
+ intent = str(output).strip()
247
+ if intent:
248
+ embedding_source_text = intent
249
+ # Update metadata
250
+ # Use custom metadata for intent
251
+ memory.metadata.custom.custom_data["intent"] = intent
252
+ except Exception as e:
253
+ self._runtime.logger.warning(f"Failed to generate intent: {e}")
254
+
255
+ # Generate embedding
256
+ try:
257
+ embedding = await self.embed(embedding_source_text)
258
+ # Protobuf repeated field assignment must extend or use slice
259
+ if hasattr(memory.embedding, "extend"): # It's a repeated field
260
+ del memory.embedding[:]
261
+ memory.embedding.extend(embedding)
262
+ else:
263
+ # If it's a list (unlikely based on error)
264
+ memory.embedding = embedding
265
+
266
+ # Update in DB
267
+ if getattr(self._runtime, "_adapter", None):
268
+ await self._runtime._adapter.update_memory(memory)
269
+
270
+ # Emit completion
271
+ await self._runtime.emit_event(
272
+ EventType.Name(EventType.EVENT_TYPE_EMBEDDING_GENERATION_COMPLETED),
273
+ {"source": "embedding_service", "memory_id": str(memory.id)},
274
+ )
275
+
276
+ except Exception as e:
277
+ self._runtime.logger.error(f"Failed to generate embedding: {e}")