@elizaos/python 2.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +26 -0
- package/README.md +239 -0
- package/elizaos/__init__.py +280 -0
- package/elizaos/action_docs.py +149 -0
- package/elizaos/advanced_capabilities/__init__.py +85 -0
- package/elizaos/advanced_capabilities/actions/__init__.py +54 -0
- package/elizaos/advanced_capabilities/actions/add_contact.py +139 -0
- package/elizaos/advanced_capabilities/actions/follow_room.py +151 -0
- package/elizaos/advanced_capabilities/actions/image_generation.py +148 -0
- package/elizaos/advanced_capabilities/actions/mute_room.py +164 -0
- package/elizaos/advanced_capabilities/actions/remove_contact.py +145 -0
- package/elizaos/advanced_capabilities/actions/roles.py +207 -0
- package/elizaos/advanced_capabilities/actions/schedule_follow_up.py +154 -0
- package/elizaos/advanced_capabilities/actions/search_contacts.py +145 -0
- package/elizaos/advanced_capabilities/actions/send_message.py +187 -0
- package/elizaos/advanced_capabilities/actions/settings.py +151 -0
- package/elizaos/advanced_capabilities/actions/unfollow_room.py +164 -0
- package/elizaos/advanced_capabilities/actions/unmute_room.py +164 -0
- package/elizaos/advanced_capabilities/actions/update_contact.py +164 -0
- package/elizaos/advanced_capabilities/actions/update_entity.py +161 -0
- package/elizaos/advanced_capabilities/evaluators/__init__.py +18 -0
- package/elizaos/advanced_capabilities/evaluators/reflection.py +134 -0
- package/elizaos/advanced_capabilities/evaluators/relationship_extraction.py +203 -0
- package/elizaos/advanced_capabilities/providers/__init__.py +36 -0
- package/elizaos/advanced_capabilities/providers/agent_settings.py +60 -0
- package/elizaos/advanced_capabilities/providers/contacts.py +77 -0
- package/elizaos/advanced_capabilities/providers/facts.py +82 -0
- package/elizaos/advanced_capabilities/providers/follow_ups.py +113 -0
- package/elizaos/advanced_capabilities/providers/knowledge.py +83 -0
- package/elizaos/advanced_capabilities/providers/relationships.py +112 -0
- package/elizaos/advanced_capabilities/providers/roles.py +97 -0
- package/elizaos/advanced_capabilities/providers/settings.py +51 -0
- package/elizaos/advanced_capabilities/services/__init__.py +18 -0
- package/elizaos/advanced_capabilities/services/follow_up.py +138 -0
- package/elizaos/advanced_capabilities/services/rolodex.py +244 -0
- package/elizaos/advanced_memory/__init__.py +3 -0
- package/elizaos/advanced_memory/evaluators.py +97 -0
- package/elizaos/advanced_memory/memory_service.py +556 -0
- package/elizaos/advanced_memory/plugin.py +30 -0
- package/elizaos/advanced_memory/prompts.py +12 -0
- package/elizaos/advanced_memory/providers.py +90 -0
- package/elizaos/advanced_memory/types.py +65 -0
- package/elizaos/advanced_planning/__init__.py +10 -0
- package/elizaos/advanced_planning/actions.py +145 -0
- package/elizaos/advanced_planning/message_classifier.py +127 -0
- package/elizaos/advanced_planning/planning_service.py +712 -0
- package/elizaos/advanced_planning/plugin.py +40 -0
- package/elizaos/advanced_planning/prompts.py +4 -0
- package/elizaos/basic_capabilities/__init__.py +66 -0
- package/elizaos/basic_capabilities/actions/__init__.py +24 -0
- package/elizaos/basic_capabilities/actions/choice.py +140 -0
- package/elizaos/basic_capabilities/actions/ignore.py +66 -0
- package/elizaos/basic_capabilities/actions/none.py +56 -0
- package/elizaos/basic_capabilities/actions/reply.py +120 -0
- package/elizaos/basic_capabilities/providers/__init__.py +54 -0
- package/elizaos/basic_capabilities/providers/action_state.py +113 -0
- package/elizaos/basic_capabilities/providers/actions.py +263 -0
- package/elizaos/basic_capabilities/providers/attachments.py +76 -0
- package/elizaos/basic_capabilities/providers/capabilities.py +62 -0
- package/elizaos/basic_capabilities/providers/character.py +113 -0
- package/elizaos/basic_capabilities/providers/choice.py +73 -0
- package/elizaos/basic_capabilities/providers/context_bench.py +44 -0
- package/elizaos/basic_capabilities/providers/current_time.py +58 -0
- package/elizaos/basic_capabilities/providers/entities.py +99 -0
- package/elizaos/basic_capabilities/providers/evaluators.py +54 -0
- package/elizaos/basic_capabilities/providers/providers_list.py +55 -0
- package/elizaos/basic_capabilities/providers/recent_messages.py +85 -0
- package/elizaos/basic_capabilities/providers/time.py +45 -0
- package/elizaos/basic_capabilities/providers/world.py +93 -0
- package/elizaos/basic_capabilities/services/__init__.py +18 -0
- package/elizaos/basic_capabilities/services/embedding.py +122 -0
- package/elizaos/basic_capabilities/services/task.py +178 -0
- package/elizaos/bootstrap/__init__.py +12 -0
- package/elizaos/bootstrap/actions/__init__.py +68 -0
- package/elizaos/bootstrap/actions/add_contact.py +149 -0
- package/elizaos/bootstrap/actions/choice.py +147 -0
- package/elizaos/bootstrap/actions/follow_room.py +151 -0
- package/elizaos/bootstrap/actions/ignore.py +80 -0
- package/elizaos/bootstrap/actions/image_generation.py +135 -0
- package/elizaos/bootstrap/actions/mute_room.py +151 -0
- package/elizaos/bootstrap/actions/none.py +71 -0
- package/elizaos/bootstrap/actions/remove_contact.py +159 -0
- package/elizaos/bootstrap/actions/reply.py +140 -0
- package/elizaos/bootstrap/actions/roles.py +193 -0
- package/elizaos/bootstrap/actions/schedule_follow_up.py +164 -0
- package/elizaos/bootstrap/actions/search_contacts.py +159 -0
- package/elizaos/bootstrap/actions/send_message.py +173 -0
- package/elizaos/bootstrap/actions/settings.py +165 -0
- package/elizaos/bootstrap/actions/unfollow_room.py +151 -0
- package/elizaos/bootstrap/actions/unmute_room.py +151 -0
- package/elizaos/bootstrap/actions/update_contact.py +178 -0
- package/elizaos/bootstrap/actions/update_entity.py +175 -0
- package/elizaos/bootstrap/autonomy/__init__.py +18 -0
- package/elizaos/bootstrap/autonomy/action.py +197 -0
- package/elizaos/bootstrap/autonomy/providers.py +165 -0
- package/elizaos/bootstrap/autonomy/routes.py +171 -0
- package/elizaos/bootstrap/autonomy/service.py +562 -0
- package/elizaos/bootstrap/autonomy/types.py +18 -0
- package/elizaos/bootstrap/evaluators/__init__.py +19 -0
- package/elizaos/bootstrap/evaluators/reflection.py +118 -0
- package/elizaos/bootstrap/evaluators/relationship_extraction.py +192 -0
- package/elizaos/bootstrap/plugin.py +140 -0
- package/elizaos/bootstrap/providers/__init__.py +80 -0
- package/elizaos/bootstrap/providers/action_state.py +71 -0
- package/elizaos/bootstrap/providers/actions.py +256 -0
- package/elizaos/bootstrap/providers/agent_settings.py +63 -0
- package/elizaos/bootstrap/providers/attachments.py +76 -0
- package/elizaos/bootstrap/providers/capabilities.py +66 -0
- package/elizaos/bootstrap/providers/character.py +128 -0
- package/elizaos/bootstrap/providers/choice.py +77 -0
- package/elizaos/bootstrap/providers/contacts.py +78 -0
- package/elizaos/bootstrap/providers/context_bench.py +49 -0
- package/elizaos/bootstrap/providers/current_time.py +56 -0
- package/elizaos/bootstrap/providers/entities.py +99 -0
- package/elizaos/bootstrap/providers/evaluators.py +58 -0
- package/elizaos/bootstrap/providers/facts.py +86 -0
- package/elizaos/bootstrap/providers/follow_ups.py +116 -0
- package/elizaos/bootstrap/providers/knowledge.py +73 -0
- package/elizaos/bootstrap/providers/providers_list.py +59 -0
- package/elizaos/bootstrap/providers/recent_messages.py +85 -0
- package/elizaos/bootstrap/providers/relationships.py +106 -0
- package/elizaos/bootstrap/providers/roles.py +95 -0
- package/elizaos/bootstrap/providers/settings.py +55 -0
- package/elizaos/bootstrap/providers/time.py +45 -0
- package/elizaos/bootstrap/providers/world.py +97 -0
- package/elizaos/bootstrap/services/__init__.py +26 -0
- package/elizaos/bootstrap/services/embedding.py +122 -0
- package/elizaos/bootstrap/services/follow_up.py +138 -0
- package/elizaos/bootstrap/services/rolodex.py +244 -0
- package/elizaos/bootstrap/services/task.py +585 -0
- package/elizaos/bootstrap/types.py +54 -0
- package/elizaos/bootstrap/utils/__init__.py +7 -0
- package/elizaos/bootstrap/utils/xml.py +69 -0
- package/elizaos/character.py +149 -0
- package/elizaos/logger.py +179 -0
- package/elizaos/media/__init__.py +45 -0
- package/elizaos/media/mime.py +315 -0
- package/elizaos/media/search.py +161 -0
- package/elizaos/media/tests/__init__.py +1 -0
- package/elizaos/media/tests/test_mime.py +117 -0
- package/elizaos/media/tests/test_search.py +156 -0
- package/elizaos/plugin.py +191 -0
- package/elizaos/prompts.py +1071 -0
- package/elizaos/py.typed +0 -0
- package/elizaos/runtime.py +2572 -0
- package/elizaos/services/__init__.py +49 -0
- package/elizaos/services/hook_service.py +511 -0
- package/elizaos/services/message_service.py +1248 -0
- package/elizaos/settings.py +182 -0
- package/elizaos/streaming_context.py +159 -0
- package/elizaos/trajectory_context.py +18 -0
- package/elizaos/types/__init__.py +512 -0
- package/elizaos/types/agent.py +31 -0
- package/elizaos/types/components.py +208 -0
- package/elizaos/types/database.py +64 -0
- package/elizaos/types/environment.py +46 -0
- package/elizaos/types/events.py +47 -0
- package/elizaos/types/memory.py +45 -0
- package/elizaos/types/model.py +393 -0
- package/elizaos/types/plugin.py +188 -0
- package/elizaos/types/primitives.py +100 -0
- package/elizaos/types/runtime.py +460 -0
- package/elizaos/types/service.py +113 -0
- package/elizaos/types/service_interfaces.py +244 -0
- package/elizaos/types/state.py +188 -0
- package/elizaos/types/task.py +29 -0
- package/elizaos/utils/__init__.py +108 -0
- package/elizaos/utils/spec_examples.py +48 -0
- package/elizaos/utils/streaming.py +426 -0
- package/elizaos_atropos_shared/__init__.py +1 -0
- package/elizaos_atropos_shared/canonical_eliza.py +282 -0
- package/package.json +19 -0
- package/pyproject.toml +143 -0
- package/requirements-dev.in +11 -0
- package/requirements-dev.lock +134 -0
- package/requirements.in +9 -0
- package/requirements.lock +64 -0
- package/tests/__init__.py +0 -0
- package/tests/test_action_parameters.py +154 -0
- package/tests/test_actions_provider_examples.py +39 -0
- package/tests/test_advanced_memory_behavior.py +96 -0
- package/tests/test_advanced_memory_flag.py +30 -0
- package/tests/test_advanced_planning_behavior.py +225 -0
- package/tests/test_advanced_planning_flag.py +26 -0
- package/tests/test_autonomy.py +445 -0
- package/tests/test_bootstrap_initialize.py +37 -0
- package/tests/test_character.py +163 -0
- package/tests/test_character_provider.py +231 -0
- package/tests/test_dynamic_prompt_exec.py +561 -0
- package/tests/test_logger_redaction.py +43 -0
- package/tests/test_plugin.py +117 -0
- package/tests/test_runtime.py +422 -0
- package/tests/test_salt_production_enforcement.py +22 -0
- package/tests/test_settings_crypto.py +118 -0
- package/tests/test_streaming.py +295 -0
- package/tests/test_types.py +221 -0
- package/tests/test_uuid_parity.py +46 -0
|
@@ -0,0 +1,1248 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from collections.abc import AsyncIterator, Callable, Coroutine
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import TYPE_CHECKING, Any
|
|
9
|
+
|
|
10
|
+
from google.protobuf.struct_pb2 import Struct
|
|
11
|
+
|
|
12
|
+
from elizaos.types.memory import Memory
|
|
13
|
+
from elizaos.types.model import ModelType
|
|
14
|
+
from elizaos.types.primitives import Content, as_uuid
|
|
15
|
+
from elizaos.types.state import SchemaRow, State
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from elizaos.types.runtime import IAgentRuntime
|
|
19
|
+
|
|
20
|
+
HandlerCallback = Callable[[Content], Coroutine[Any, Any, list[Memory]]]
|
|
21
|
+
StreamChunkCallback = Callable[[str], Coroutine[Any, Any, None]]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class MessageProcessingResult:
|
|
26
|
+
did_respond: bool
|
|
27
|
+
response_content: Content | None
|
|
28
|
+
response_messages: list[Memory] = field(default_factory=list)
|
|
29
|
+
state: State | None = None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class StreamingMessageResult:
|
|
34
|
+
"""Result metadata for streaming message processing."""
|
|
35
|
+
|
|
36
|
+
response_memory: Memory
|
|
37
|
+
state: State | None = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class IMessageService(ABC):
|
|
41
|
+
@abstractmethod
|
|
42
|
+
async def handle_message(
|
|
43
|
+
self,
|
|
44
|
+
runtime: IAgentRuntime,
|
|
45
|
+
message: Memory,
|
|
46
|
+
callback: HandlerCallback | None = None,
|
|
47
|
+
) -> MessageProcessingResult: ...
|
|
48
|
+
|
|
49
|
+
@abstractmethod
|
|
50
|
+
def handle_message_stream(
|
|
51
|
+
self,
|
|
52
|
+
runtime: IAgentRuntime,
|
|
53
|
+
message: Memory,
|
|
54
|
+
) -> AsyncIterator[str | StreamingMessageResult]:
|
|
55
|
+
"""
|
|
56
|
+
Process a message and stream the response token by token.
|
|
57
|
+
|
|
58
|
+
Yields:
|
|
59
|
+
str: Text chunks as they are generated
|
|
60
|
+
StreamingMessageResult: Final result with metadata (yielded last)
|
|
61
|
+
"""
|
|
62
|
+
...
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _parse_actions_from_xml(xml_response: str) -> list[str]:
|
|
66
|
+
"""Parse actions from XML response."""
|
|
67
|
+
import re
|
|
68
|
+
|
|
69
|
+
# Try to find <actions> tag
|
|
70
|
+
match = re.search(r"<actions>\s*([^<]+)\s*</actions>", xml_response, re.IGNORECASE)
|
|
71
|
+
if match:
|
|
72
|
+
actions_text = match.group(1).strip()
|
|
73
|
+
if actions_text:
|
|
74
|
+
# Split by comma and clean up
|
|
75
|
+
actions = [a.strip().upper() for a in actions_text.split(",") if a.strip()]
|
|
76
|
+
return actions
|
|
77
|
+
return []
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _parse_providers_from_xml(xml_response: str) -> list[str]:
|
|
81
|
+
"""Parse providers from XML response."""
|
|
82
|
+
import re
|
|
83
|
+
|
|
84
|
+
match = re.search(r"<providers>\s*([^<]+)\s*</providers>", xml_response, re.IGNORECASE)
|
|
85
|
+
if match:
|
|
86
|
+
providers_text = match.group(1).strip()
|
|
87
|
+
if providers_text:
|
|
88
|
+
providers = [p.strip() for p in providers_text.split(",") if p.strip()]
|
|
89
|
+
return providers
|
|
90
|
+
return []
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _parse_tag(xml: str, tag: str) -> str | None:
|
|
94
|
+
open_tag = f"<{tag}>"
|
|
95
|
+
close_tag = f"</{tag}>"
|
|
96
|
+
start = xml.find(open_tag)
|
|
97
|
+
if start == -1:
|
|
98
|
+
return None
|
|
99
|
+
inner_start = start + len(open_tag)
|
|
100
|
+
end = xml.find(close_tag, inner_start)
|
|
101
|
+
if end == -1:
|
|
102
|
+
return None
|
|
103
|
+
return xml[inner_start:end].strip()
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _parse_bool(value: object) -> bool:
|
|
107
|
+
if isinstance(value, bool):
|
|
108
|
+
return value
|
|
109
|
+
if isinstance(value, str):
|
|
110
|
+
return value.strip().lower() in ("true", "yes", "1", "on")
|
|
111
|
+
return False
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _parse_int(value: object, *, default: int) -> int:
|
|
115
|
+
if isinstance(value, int):
|
|
116
|
+
return value
|
|
117
|
+
if isinstance(value, str):
|
|
118
|
+
try:
|
|
119
|
+
return int(value.strip())
|
|
120
|
+
except Exception:
|
|
121
|
+
return default
|
|
122
|
+
return default
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _format_action_results(results: list[object]) -> str:
|
|
126
|
+
# Avoid importing ActionResult at module import time.
|
|
127
|
+
if not results:
|
|
128
|
+
return ""
|
|
129
|
+
lines: list[str] = []
|
|
130
|
+
for r in results:
|
|
131
|
+
# ActionResult has fields: success, text, data
|
|
132
|
+
name = ""
|
|
133
|
+
success = True
|
|
134
|
+
text = ""
|
|
135
|
+
data = getattr(r, "data", None)
|
|
136
|
+
if isinstance(data, dict):
|
|
137
|
+
v = data.get("actionName")
|
|
138
|
+
if isinstance(v, str):
|
|
139
|
+
name = v
|
|
140
|
+
s = getattr(r, "success", None)
|
|
141
|
+
if isinstance(s, bool):
|
|
142
|
+
success = s
|
|
143
|
+
t = getattr(r, "text", None)
|
|
144
|
+
if isinstance(t, str):
|
|
145
|
+
text = t
|
|
146
|
+
status = "success" if success else "failed"
|
|
147
|
+
lines.append(f"- {name} ({status}): {text}".strip())
|
|
148
|
+
return "\n".join(lines)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _parse_text_from_xml(xml_response: str) -> str:
|
|
152
|
+
"""Parse text content from XML response."""
|
|
153
|
+
import re
|
|
154
|
+
|
|
155
|
+
# Try <text> tag first
|
|
156
|
+
match = re.search(r"<text>\s*(.*?)\s*</text>", xml_response, re.DOTALL | re.IGNORECASE)
|
|
157
|
+
if match:
|
|
158
|
+
return match.group(1).strip()
|
|
159
|
+
return ""
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _parse_thought_from_xml(xml_response: str) -> str:
|
|
163
|
+
"""Parse thought from XML response."""
|
|
164
|
+
import re
|
|
165
|
+
|
|
166
|
+
match = re.search(r"<thought>\s*(.*?)\s*</thought>", xml_response, re.DOTALL | re.IGNORECASE)
|
|
167
|
+
if match:
|
|
168
|
+
return match.group(1).strip()
|
|
169
|
+
return ""
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _parse_params_from_xml(xml_response: str) -> dict[str, list[dict[str, str]]]:
|
|
173
|
+
"""Parse action parameters from an XML response.
|
|
174
|
+
|
|
175
|
+
The canonical template recommends nested XML:
|
|
176
|
+
<params><ACTION><param>value</param></ACTION></params>
|
|
177
|
+
|
|
178
|
+
In practice, some models return JSON inside <params>:
|
|
179
|
+
<params>{"ACTION":{"param":"value"}}</params>
|
|
180
|
+
|
|
181
|
+
This parser supports both.
|
|
182
|
+
"""
|
|
183
|
+
import json
|
|
184
|
+
import re
|
|
185
|
+
import xml.etree.ElementTree as ET
|
|
186
|
+
|
|
187
|
+
result: dict[str, list[dict[str, str]]] = {}
|
|
188
|
+
|
|
189
|
+
params_match = re.search(
|
|
190
|
+
r"<params>\s*(.*?)\s*</params>", xml_response, re.DOTALL | re.IGNORECASE
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# If there's no <params> wrapper, some models return JSON directly (optionally in fences
|
|
194
|
+
# or nested inside other tags). Attempt to recover a JSON object.
|
|
195
|
+
if not params_match:
|
|
196
|
+
stripped = xml_response.strip()
|
|
197
|
+
# Try fenced JSON (```json ...``` or ``` ... ```)
|
|
198
|
+
fenced = re.search(r"```(?:json)?\s*(\{[\s\S]*?\})\s*```", stripped, re.IGNORECASE)
|
|
199
|
+
if fenced:
|
|
200
|
+
stripped = fenced.group(1).strip()
|
|
201
|
+
|
|
202
|
+
# Try extracting a JSON object substring (first {...} block)
|
|
203
|
+
if not (stripped.startswith("{") and stripped.endswith("}")):
|
|
204
|
+
obj_match = re.search(r"(\{[\s\S]*\})", stripped)
|
|
205
|
+
if obj_match:
|
|
206
|
+
stripped = obj_match.group(1).strip()
|
|
207
|
+
|
|
208
|
+
if stripped.startswith("[") and stripped.endswith("]"):
|
|
209
|
+
try:
|
|
210
|
+
loaded_any = json.loads(stripped)
|
|
211
|
+
except json.JSONDecodeError:
|
|
212
|
+
return result
|
|
213
|
+
if isinstance(loaded_any, list):
|
|
214
|
+
for item in loaded_any:
|
|
215
|
+
if not isinstance(item, dict):
|
|
216
|
+
continue
|
|
217
|
+
for action_name, action_params_raw in item.items():
|
|
218
|
+
if not isinstance(action_params_raw, dict):
|
|
219
|
+
continue
|
|
220
|
+
params_out: dict[str, str] = {}
|
|
221
|
+
for k, v in action_params_raw.items():
|
|
222
|
+
params_out[str(k)] = str(v)
|
|
223
|
+
if params_out:
|
|
224
|
+
result.setdefault(str(action_name).upper(), []).append(params_out)
|
|
225
|
+
return result
|
|
226
|
+
|
|
227
|
+
if stripped.startswith("{") and stripped.endswith("}"):
|
|
228
|
+
try:
|
|
229
|
+
loaded_any = json.loads(stripped)
|
|
230
|
+
except json.JSONDecodeError:
|
|
231
|
+
# Some models emit a sequence of JSON objects separated by commas:
|
|
232
|
+
# {...},{...},{...}
|
|
233
|
+
# which is not valid JSON unless wrapped in an array. Try that.
|
|
234
|
+
try:
|
|
235
|
+
loaded_any = json.loads(f"[{stripped}]")
|
|
236
|
+
except json.JSONDecodeError:
|
|
237
|
+
return result
|
|
238
|
+
if isinstance(loaded_any, dict):
|
|
239
|
+
for action_name, action_params_raw in loaded_any.items():
|
|
240
|
+
if not isinstance(action_params_raw, dict):
|
|
241
|
+
continue
|
|
242
|
+
params_out: dict[str, str] = {}
|
|
243
|
+
for k, v in action_params_raw.items():
|
|
244
|
+
params_out[str(k)] = str(v)
|
|
245
|
+
if params_out:
|
|
246
|
+
result.setdefault(str(action_name).upper(), []).append(params_out)
|
|
247
|
+
elif isinstance(loaded_any, list):
|
|
248
|
+
for item in loaded_any:
|
|
249
|
+
if not isinstance(item, dict):
|
|
250
|
+
continue
|
|
251
|
+
for action_name, action_params_raw in item.items():
|
|
252
|
+
if not isinstance(action_params_raw, dict):
|
|
253
|
+
continue
|
|
254
|
+
params_out: dict[str, str] = {}
|
|
255
|
+
for k, v in action_params_raw.items():
|
|
256
|
+
params_out[str(k)] = str(v)
|
|
257
|
+
if params_out:
|
|
258
|
+
result.setdefault(str(action_name).upper(), []).append(params_out)
|
|
259
|
+
return result
|
|
260
|
+
|
|
261
|
+
params_content = params_match.group(1).strip()
|
|
262
|
+
if not params_content:
|
|
263
|
+
return result
|
|
264
|
+
|
|
265
|
+
# First try XML parsing of the inner params content
|
|
266
|
+
try:
|
|
267
|
+
root = ET.fromstring(f"<params>{params_content}</params>")
|
|
268
|
+
for action_elem in list(root):
|
|
269
|
+
action_name = action_elem.tag.upper()
|
|
270
|
+
action_params: dict[str, str] = {}
|
|
271
|
+
|
|
272
|
+
for param_elem in list(action_elem):
|
|
273
|
+
value_text = (param_elem.text or "").strip()
|
|
274
|
+
action_params[param_elem.tag] = value_text
|
|
275
|
+
|
|
276
|
+
# If the action block contains text but no nested tags, try JSON-in-action.
|
|
277
|
+
if not action_params:
|
|
278
|
+
action_text = (action_elem.text or "").strip()
|
|
279
|
+
if action_text.startswith("{"):
|
|
280
|
+
try:
|
|
281
|
+
loaded = json.loads(action_text)
|
|
282
|
+
if isinstance(loaded, dict):
|
|
283
|
+
for k, v in loaded.items():
|
|
284
|
+
action_params[str(k)] = str(v)
|
|
285
|
+
except json.JSONDecodeError:
|
|
286
|
+
return result
|
|
287
|
+
|
|
288
|
+
if action_params:
|
|
289
|
+
result.setdefault(action_name, []).append(action_params)
|
|
290
|
+
|
|
291
|
+
return result
|
|
292
|
+
except ET.ParseError:
|
|
293
|
+
return result
|
|
294
|
+
|
|
295
|
+
# Fall back to JSON inside <params>...</params>
|
|
296
|
+
if not params_content.startswith("{"):
|
|
297
|
+
return result
|
|
298
|
+
|
|
299
|
+
try:
|
|
300
|
+
loaded = json.loads(params_content)
|
|
301
|
+
except json.JSONDecodeError:
|
|
302
|
+
return result
|
|
303
|
+
|
|
304
|
+
if not isinstance(loaded, dict):
|
|
305
|
+
return result
|
|
306
|
+
|
|
307
|
+
for action_name, action_params_raw in loaded.items():
|
|
308
|
+
if not isinstance(action_params_raw, dict):
|
|
309
|
+
continue
|
|
310
|
+
params: dict[str, str] = {}
|
|
311
|
+
for k, v in action_params_raw.items():
|
|
312
|
+
params[str(k)] = str(v)
|
|
313
|
+
if params:
|
|
314
|
+
result.setdefault(str(action_name).upper(), []).append(params)
|
|
315
|
+
|
|
316
|
+
return result
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
class DefaultMessageService(IMessageService):
|
|
320
|
+
"""Canonical message service that processes the full Eliza agent loop.
|
|
321
|
+
|
|
322
|
+
This service implements the canonical flow:
|
|
323
|
+
1. Save incoming message to memory
|
|
324
|
+
2. Compose state from providers
|
|
325
|
+
3. Generate response with MESSAGE_HANDLER_TEMPLATE (includes action selection)
|
|
326
|
+
4. Parse actions from XML response
|
|
327
|
+
5. Process actions via runtime.process_actions()
|
|
328
|
+
6. Run evaluators via runtime.evaluate()
|
|
329
|
+
7. Return result
|
|
330
|
+
|
|
331
|
+
"""
|
|
332
|
+
|
|
333
|
+
async def handle_message(
|
|
334
|
+
self,
|
|
335
|
+
runtime: IAgentRuntime,
|
|
336
|
+
message: Memory,
|
|
337
|
+
callback: HandlerCallback | None = None,
|
|
338
|
+
) -> MessageProcessingResult:
|
|
339
|
+
"""Handle an incoming message through the full agent loop.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
runtime: The Eliza runtime.
|
|
343
|
+
message: The incoming message.
|
|
344
|
+
callback: Optional callback for streaming responses.
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
MessageProcessingResult with response content and state.
|
|
348
|
+
|
|
349
|
+
"""
|
|
350
|
+
from elizaos.prompts import (
|
|
351
|
+
MESSAGE_HANDLER_TEMPLATE,
|
|
352
|
+
MULTI_STEP_DECISION_TEMPLATE,
|
|
353
|
+
MULTI_STEP_SUMMARY_TEMPLATE,
|
|
354
|
+
)
|
|
355
|
+
from elizaos.utils import compose_prompt_from_state
|
|
356
|
+
|
|
357
|
+
_ = runtime.start_run(message.room_id)
|
|
358
|
+
|
|
359
|
+
# Check for custom message handler template from character
|
|
360
|
+
template = MESSAGE_HANDLER_TEMPLATE
|
|
361
|
+
if runtime.character.templates and "messageHandlerTemplate" in runtime.character.templates:
|
|
362
|
+
template = runtime.character.templates["messageHandlerTemplate"]
|
|
363
|
+
start_time = time.time()
|
|
364
|
+
|
|
365
|
+
# Optional trajectory logging (end-to-end capture)
|
|
366
|
+
traj_step_id: str | None = None
|
|
367
|
+
if message.metadata is not None:
|
|
368
|
+
maybe_step = getattr(message.metadata, "trajectoryStepId", None)
|
|
369
|
+
if isinstance(maybe_step, str) and maybe_step:
|
|
370
|
+
traj_step_id = maybe_step
|
|
371
|
+
|
|
372
|
+
from typing import Protocol, runtime_checkable
|
|
373
|
+
|
|
374
|
+
@runtime_checkable
|
|
375
|
+
class _TrajectoryLogger(Protocol):
|
|
376
|
+
def log_llm_call(
|
|
377
|
+
self,
|
|
378
|
+
*,
|
|
379
|
+
step_id: str,
|
|
380
|
+
model: str,
|
|
381
|
+
system_prompt: str,
|
|
382
|
+
user_prompt: str,
|
|
383
|
+
response: str,
|
|
384
|
+
purpose: str,
|
|
385
|
+
action_type: str | None = None,
|
|
386
|
+
model_version: str | None = None,
|
|
387
|
+
temperature: float = 0.7,
|
|
388
|
+
max_tokens: int = 2048,
|
|
389
|
+
top_p: float | None = None,
|
|
390
|
+
prompt_tokens: int | None = None,
|
|
391
|
+
completion_tokens: int | None = None,
|
|
392
|
+
latency_ms: int | None = None,
|
|
393
|
+
reasoning: str | None = None,
|
|
394
|
+
) -> str: ...
|
|
395
|
+
|
|
396
|
+
def log_provider_access(
|
|
397
|
+
self,
|
|
398
|
+
*,
|
|
399
|
+
step_id: str,
|
|
400
|
+
provider_name: str,
|
|
401
|
+
data: dict[str, str | int | float | bool | None],
|
|
402
|
+
purpose: str,
|
|
403
|
+
query: dict[str, str | int | float | bool | None] | None = None,
|
|
404
|
+
) -> None: ...
|
|
405
|
+
|
|
406
|
+
traj_svc = runtime.get_service("trajectory_logger")
|
|
407
|
+
traj_logger = traj_svc if isinstance(traj_svc, _TrajectoryLogger) else None
|
|
408
|
+
|
|
409
|
+
def _as_json_scalar(value: object) -> str | int | float | bool | None:
|
|
410
|
+
if value is None:
|
|
411
|
+
return None
|
|
412
|
+
if isinstance(value, (str, int, float, bool)):
|
|
413
|
+
if isinstance(value, str):
|
|
414
|
+
return value[:2000]
|
|
415
|
+
return value
|
|
416
|
+
return str(value)[:2000]
|
|
417
|
+
|
|
418
|
+
from elizaos.trajectory_context import CURRENT_TRAJECTORY_STEP_ID
|
|
419
|
+
|
|
420
|
+
token = CURRENT_TRAJECTORY_STEP_ID.set(traj_step_id)
|
|
421
|
+
try:
|
|
422
|
+
check_should_respond = runtime.is_check_should_respond_enabled()
|
|
423
|
+
if not check_should_respond:
|
|
424
|
+
runtime.logger.debug(
|
|
425
|
+
"check_should_respond disabled, always responding (ChatGPT mode)"
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Step 0: Run pre-evaluator middleware BEFORE saving to memory
|
|
429
|
+
# Pre-evaluators can block the message (e.g. prompt injection)
|
|
430
|
+
# or rewrite it (e.g. redact credentials).
|
|
431
|
+
pre_result = await runtime.evaluate_pre(message)
|
|
432
|
+
if pre_result.blocked:
|
|
433
|
+
runtime.logger.warning(
|
|
434
|
+
f"Message blocked by pre-evaluator: {pre_result.reason}"
|
|
435
|
+
)
|
|
436
|
+
return MessageProcessingResult(
|
|
437
|
+
did_respond=False,
|
|
438
|
+
response_content=None,
|
|
439
|
+
response_messages=[],
|
|
440
|
+
state=None,
|
|
441
|
+
)
|
|
442
|
+
if pre_result.rewritten_text is not None:
|
|
443
|
+
runtime.logger.info(
|
|
444
|
+
f"Pre-evaluator rewrote message text: {pre_result.reason}"
|
|
445
|
+
)
|
|
446
|
+
message.content.text = pre_result.rewritten_text
|
|
447
|
+
|
|
448
|
+
# Step 1: Save incoming message to memory (if adapter available)
|
|
449
|
+
if message.id is None:
|
|
450
|
+
message.id = as_uuid(str(uuid.uuid4()))
|
|
451
|
+
try:
|
|
452
|
+
existing_memory = await runtime.get_memory_by_id(message.id)
|
|
453
|
+
if not existing_memory:
|
|
454
|
+
await runtime.create_memory(message, "messages")
|
|
455
|
+
except RuntimeError:
|
|
456
|
+
# No database adapter - skip persistence (benchmark mode)
|
|
457
|
+
runtime.logger.debug("No database adapter, skipping message persistence")
|
|
458
|
+
|
|
459
|
+
# Step 2: Compose state from providers
|
|
460
|
+
state = await runtime.compose_state(message)
|
|
461
|
+
|
|
462
|
+
# Optional: multi-step strategy (TypeScript parity)
|
|
463
|
+
use_multi_step = _parse_bool(runtime.get_setting("USE_MULTI_STEP"))
|
|
464
|
+
max_multi_step_iterations = _parse_int(
|
|
465
|
+
runtime.get_setting("MAX_MULTISTEP_ITERATIONS"), default=6
|
|
466
|
+
)
|
|
467
|
+
if use_multi_step:
|
|
468
|
+
return await self._run_multi_step_core(
|
|
469
|
+
runtime=runtime,
|
|
470
|
+
message=message,
|
|
471
|
+
state=state,
|
|
472
|
+
callback=callback,
|
|
473
|
+
max_iterations=max_multi_step_iterations,
|
|
474
|
+
decision_template=runtime.character.templates.get(
|
|
475
|
+
"multiStepDecisionTemplate", MULTI_STEP_DECISION_TEMPLATE
|
|
476
|
+
)
|
|
477
|
+
if runtime.character.templates
|
|
478
|
+
else MULTI_STEP_DECISION_TEMPLATE,
|
|
479
|
+
summary_template=runtime.character.templates.get(
|
|
480
|
+
"multiStepSummaryTemplate", MULTI_STEP_SUMMARY_TEMPLATE
|
|
481
|
+
)
|
|
482
|
+
if runtime.character.templates
|
|
483
|
+
else MULTI_STEP_SUMMARY_TEMPLATE,
|
|
484
|
+
compose_prompt_from_state=compose_prompt_from_state,
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
# Step 3-5: Use dynamicPromptExecFromState for structured message response
|
|
488
|
+
schema = [
|
|
489
|
+
SchemaRow(
|
|
490
|
+
field="thought",
|
|
491
|
+
description="Your internal reasoning about the message and what to do",
|
|
492
|
+
required=True,
|
|
493
|
+
validate_field=False,
|
|
494
|
+
stream_field=False,
|
|
495
|
+
),
|
|
496
|
+
SchemaRow(
|
|
497
|
+
field="providers",
|
|
498
|
+
description="List of providers to use for additional context (comma-separated)",
|
|
499
|
+
validate_field=False,
|
|
500
|
+
stream_field=False,
|
|
501
|
+
),
|
|
502
|
+
SchemaRow(
|
|
503
|
+
field="actions",
|
|
504
|
+
description="List of actions to take (comma-separated)",
|
|
505
|
+
required=True,
|
|
506
|
+
validate_field=False,
|
|
507
|
+
stream_field=False,
|
|
508
|
+
),
|
|
509
|
+
SchemaRow(
|
|
510
|
+
field="params",
|
|
511
|
+
description='JSON object with action parameters, e.g. {"ACTION_NAME": {"param": "value"}}',
|
|
512
|
+
validate_field=False,
|
|
513
|
+
stream_field=False,
|
|
514
|
+
),
|
|
515
|
+
SchemaRow(
|
|
516
|
+
field="text",
|
|
517
|
+
description="The text response to send to the user",
|
|
518
|
+
stream_field=True,
|
|
519
|
+
),
|
|
520
|
+
SchemaRow(
|
|
521
|
+
field="simple",
|
|
522
|
+
description="Whether this is a simple response (true/false)",
|
|
523
|
+
validate_field=False,
|
|
524
|
+
stream_field=False,
|
|
525
|
+
),
|
|
526
|
+
]
|
|
527
|
+
|
|
528
|
+
from elizaos.runtime import DynamicPromptOptions
|
|
529
|
+
|
|
530
|
+
parsed_response = await runtime.dynamic_prompt_exec_from_state(
|
|
531
|
+
state=state,
|
|
532
|
+
prompt=template,
|
|
533
|
+
schema=schema,
|
|
534
|
+
options=DynamicPromptOptions(
|
|
535
|
+
model_size="large",
|
|
536
|
+
force_format="xml",
|
|
537
|
+
required_fields=["thought", "actions"],
|
|
538
|
+
),
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
# Handle complete generation failure
|
|
542
|
+
if parsed_response is None:
|
|
543
|
+
runtime.logger.error("Generation failed completely - returning did_respond=False")
|
|
544
|
+
return MessageProcessingResult(
|
|
545
|
+
did_respond=False,
|
|
546
|
+
response_content=None,
|
|
547
|
+
response_messages=[],
|
|
548
|
+
state=state,
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
# Extract parsed fields
|
|
552
|
+
thought = str(parsed_response.get("thought", ""))
|
|
553
|
+
actions_raw = str(parsed_response.get("actions", "REPLY"))
|
|
554
|
+
providers_raw = str(parsed_response.get("providers", ""))
|
|
555
|
+
response_text = str(parsed_response.get("text", ""))
|
|
556
|
+
|
|
557
|
+
# Parse actions and providers from comma-separated strings
|
|
558
|
+
actions = [a.strip().upper() for a in actions_raw.split(",") if a.strip()]
|
|
559
|
+
providers = [p.strip() for p in providers_raw.split(",") if p.strip()]
|
|
560
|
+
|
|
561
|
+
# Parse params from response if available
|
|
562
|
+
import json as json_module
|
|
563
|
+
|
|
564
|
+
params: dict[str, list[dict[str, str]]] = {}
|
|
565
|
+
if parsed_response:
|
|
566
|
+
params_raw = parsed_response.get("params", "")
|
|
567
|
+
if params_raw:
|
|
568
|
+
# Try to parse as JSON if it's a string
|
|
569
|
+
if isinstance(params_raw, str):
|
|
570
|
+
try:
|
|
571
|
+
params_parsed = json_module.loads(params_raw)
|
|
572
|
+
if isinstance(params_parsed, dict):
|
|
573
|
+
# Convert to expected format: {ACTION: [{param: value}]}
|
|
574
|
+
for action_name, action_params in params_parsed.items():
|
|
575
|
+
if isinstance(action_params, dict):
|
|
576
|
+
params[action_name] = [action_params]
|
|
577
|
+
except json_module.JSONDecodeError:
|
|
578
|
+
pass
|
|
579
|
+
elif isinstance(params_raw, dict):
|
|
580
|
+
# Already a dict, convert to expected format
|
|
581
|
+
for action_name, action_params in params_raw.items():
|
|
582
|
+
if isinstance(action_params, dict):
|
|
583
|
+
params[action_name] = [action_params]
|
|
584
|
+
raw_response_str = json_module.dumps(parsed_response) if parsed_response else ""
|
|
585
|
+
|
|
586
|
+
# Step 5b: If actions require params but none were provided, run a parameter-repair pass
|
|
587
|
+
# using the SAME model. This keeps behavior canonical while preventing "action without params"
|
|
588
|
+
# failures from stalling the agent.
|
|
589
|
+
if actions:
|
|
590
|
+
params = await self._repair_missing_action_params(
|
|
591
|
+
runtime=runtime,
|
|
592
|
+
message=message,
|
|
593
|
+
state=state,
|
|
594
|
+
actions=actions,
|
|
595
|
+
providers=providers,
|
|
596
|
+
raw_response=raw_response_str,
|
|
597
|
+
params=params,
|
|
598
|
+
template=template,
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
# Log parsed action selection / params as a structured provider access
|
|
602
|
+
if traj_step_id and traj_logger is not None:
|
|
603
|
+
try:
|
|
604
|
+
traj_logger.log_provider_access(
|
|
605
|
+
step_id=traj_step_id,
|
|
606
|
+
provider_name="MESSAGE_SERVICE",
|
|
607
|
+
data={
|
|
608
|
+
"actions": _as_json_scalar(",".join(actions)),
|
|
609
|
+
"providers": _as_json_scalar(",".join(providers)),
|
|
610
|
+
"hasParams": _as_json_scalar(bool(params)),
|
|
611
|
+
"params": _as_json_scalar(str(params)[:2000]),
|
|
612
|
+
},
|
|
613
|
+
purpose="parsed_response",
|
|
614
|
+
query={"roomId": _as_json_scalar(str(message.room_id))},
|
|
615
|
+
)
|
|
616
|
+
except Exception as e:
|
|
617
|
+
runtime.logger.debug(f"Trajectory logger failed: {e}")
|
|
618
|
+
|
|
619
|
+
# If no text parsed, use raw response (fallback for non-XML responses)
|
|
620
|
+
if not response_text:
|
|
621
|
+
response_text = raw_response_str
|
|
622
|
+
|
|
623
|
+
# Benchmark mode: force action-based response generation.
|
|
624
|
+
# If the context-bench provider is active, require REPLY to run so the
|
|
625
|
+
# full Provider -> Model -> Action -> Evaluator loop is exercised.
|
|
626
|
+
benchmark_mode = False
|
|
627
|
+
if state.values:
|
|
628
|
+
# Handle both dict-like and protobuf StateValues
|
|
629
|
+
if hasattr(state.values, "get") and callable(state.values.get):
|
|
630
|
+
benchmark_mode = bool(state.values.get("benchmark_has_context"))
|
|
631
|
+
elif hasattr(state.values, "extra"):
|
|
632
|
+
# Protobuf - check extra map field
|
|
633
|
+
extra = state.values.extra
|
|
634
|
+
if hasattr(extra, "get") and callable(extra.get):
|
|
635
|
+
benchmark_mode = bool(extra.get("benchmark_has_context", ""))
|
|
636
|
+
if benchmark_mode:
|
|
637
|
+
if not actions:
|
|
638
|
+
actions = ["REPLY"]
|
|
639
|
+
if not providers:
|
|
640
|
+
providers = ["CONTEXT_BENCH"]
|
|
641
|
+
# Suppress any direct planner answer; the REPLY action should generate
|
|
642
|
+
# the final user-visible answer (captured via callback).
|
|
643
|
+
if "REPLY" in actions:
|
|
644
|
+
response_text = ""
|
|
645
|
+
|
|
646
|
+
runtime.logger.debug(
|
|
647
|
+
f"Parsed response: actions={actions}, providers={providers}, "
|
|
648
|
+
f"text_length={len(response_text)}, thought_length={len(thought)}"
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
# Step 6: Create response content with actions
|
|
652
|
+
response_content = Content(
|
|
653
|
+
text=response_text,
|
|
654
|
+
thought=thought if thought else None,
|
|
655
|
+
actions=actions if actions else None,
|
|
656
|
+
providers=providers if providers else None,
|
|
657
|
+
)
|
|
658
|
+
# Store params in Content.data for protobuf compatibility
|
|
659
|
+
if params:
|
|
660
|
+
if not response_content.data:
|
|
661
|
+
response_content.data = Struct()
|
|
662
|
+
response_content.data.update({"params": params})
|
|
663
|
+
|
|
664
|
+
response_id = as_uuid(str(uuid.uuid4()))
|
|
665
|
+
response_memory = Memory(
|
|
666
|
+
id=response_id,
|
|
667
|
+
entity_id=runtime.agent_id,
|
|
668
|
+
agent_id=runtime.agent_id,
|
|
669
|
+
room_id=message.room_id,
|
|
670
|
+
content=response_content,
|
|
671
|
+
created_at=int(time.time() * 1000),
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
# Save response memory (if adapter available)
|
|
675
|
+
try:
|
|
676
|
+
await runtime.create_memory(response_memory, "messages")
|
|
677
|
+
except RuntimeError:
|
|
678
|
+
# No database adapter - skip persistence (benchmark mode)
|
|
679
|
+
runtime.logger.debug("No database adapter, skipping response persistence")
|
|
680
|
+
|
|
681
|
+
# Emit MESSAGE_SENT event after successfully creating response memory
|
|
682
|
+
await runtime.emit_event(
|
|
683
|
+
"MESSAGE_SENT",
|
|
684
|
+
{
|
|
685
|
+
"runtime": runtime,
|
|
686
|
+
"source": "message-service",
|
|
687
|
+
"message": response_memory,
|
|
688
|
+
},
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
responses = [response_memory]
|
|
692
|
+
|
|
693
|
+
# Step 7: Process actions via runtime.process_actions()
|
|
694
|
+
# By default, we treat a plain REPLY as a chat-style response.
|
|
695
|
+
# In benchmark mode (context-bench), we WANT to execute REPLY so the full
|
|
696
|
+
# Provider -> Model -> Action -> Evaluator loop is exercised.
|
|
697
|
+
if actions and (benchmark_mode or not (len(actions) == 1 and actions[0] == "REPLY")):
|
|
698
|
+
runtime.logger.debug(f"Processing {len(actions)} actions: {actions}")
|
|
699
|
+
await runtime.process_actions(message, responses, state, callback)
|
|
700
|
+
elif callback:
|
|
701
|
+
# Simple chat-style response
|
|
702
|
+
await callback(response_content)
|
|
703
|
+
|
|
704
|
+
# Step 8: Run evaluators via runtime.evaluate()
|
|
705
|
+
runtime.logger.debug("Running evaluators")
|
|
706
|
+
await runtime.evaluate(
|
|
707
|
+
message,
|
|
708
|
+
state,
|
|
709
|
+
did_respond=True,
|
|
710
|
+
callback=callback,
|
|
711
|
+
responses=responses,
|
|
712
|
+
)
|
|
713
|
+
|
|
714
|
+
_ = time.time() - start_time
|
|
715
|
+
|
|
716
|
+
return MessageProcessingResult(
|
|
717
|
+
did_respond=True,
|
|
718
|
+
response_content=response_content,
|
|
719
|
+
response_messages=responses,
|
|
720
|
+
state=state,
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
except Exception as e:
|
|
724
|
+
import traceback as _tb
|
|
725
|
+
|
|
726
|
+
runtime.logger.error(
|
|
727
|
+
f"Error processing message: {e}\n{''.join(_tb.format_exception(e))}"
|
|
728
|
+
)
|
|
729
|
+
raise
|
|
730
|
+
finally:
|
|
731
|
+
CURRENT_TRAJECTORY_STEP_ID.reset(token)
|
|
732
|
+
runtime.end_run()
|
|
733
|
+
|
|
734
|
+
def _build_canonical_prompt(
|
|
735
|
+
self,
|
|
736
|
+
runtime: IAgentRuntime,
|
|
737
|
+
message: Memory,
|
|
738
|
+
state: State,
|
|
739
|
+
template: str,
|
|
740
|
+
) -> str:
|
|
741
|
+
"""Build the canonical prompt using MESSAGE_HANDLER_TEMPLATE.
|
|
742
|
+
|
|
743
|
+
Args:
|
|
744
|
+
runtime: The Eliza runtime.
|
|
745
|
+
message: The incoming message.
|
|
746
|
+
state: Composed state from providers.
|
|
747
|
+
template: The message handler template.
|
|
748
|
+
|
|
749
|
+
Returns:
|
|
750
|
+
Formatted prompt string.
|
|
751
|
+
|
|
752
|
+
"""
|
|
753
|
+
character = runtime.character
|
|
754
|
+
user_text = message.content.text or ""
|
|
755
|
+
|
|
756
|
+
# Get provider context from state
|
|
757
|
+
context = state.text if state.text else ""
|
|
758
|
+
# Always include the current user message explicitly so the model has the
|
|
759
|
+
# latest instruction even when RECENT_MESSAGES is unavailable (e.g. no DB adapter).
|
|
760
|
+
if user_text:
|
|
761
|
+
context = f"{context}\n\n# Current Message\nUser: {user_text}".strip()
|
|
762
|
+
|
|
763
|
+
# Build values for template substitution
|
|
764
|
+
# Handle both dict-like and protobuf StateValues
|
|
765
|
+
if state.values:
|
|
766
|
+
if hasattr(state.values, "agent_name"):
|
|
767
|
+
# Protobuf StateValues
|
|
768
|
+
values = {
|
|
769
|
+
"agentName": state.values.agent_name or character.name,
|
|
770
|
+
"actionNames": state.values.action_names or "",
|
|
771
|
+
"providers": state.values.providers or context,
|
|
772
|
+
}
|
|
773
|
+
elif hasattr(state.values, "items"):
|
|
774
|
+
# Dict-like
|
|
775
|
+
values = dict(state.values)
|
|
776
|
+
else:
|
|
777
|
+
values = {}
|
|
778
|
+
else:
|
|
779
|
+
values = {}
|
|
780
|
+
|
|
781
|
+
values["agentName"] = character.name
|
|
782
|
+
values["providers"] = context
|
|
783
|
+
|
|
784
|
+
# Add user message to context
|
|
785
|
+
if "recentMessages" not in values:
|
|
786
|
+
values["recentMessages"] = f"User: {user_text}"
|
|
787
|
+
|
|
788
|
+
# Simple template substitution
|
|
789
|
+
prompt = template
|
|
790
|
+
for key, value in values.items():
|
|
791
|
+
placeholder = "{{" + key + "}}"
|
|
792
|
+
prompt = prompt.replace(placeholder, str(value))
|
|
793
|
+
|
|
794
|
+
return prompt
|
|
795
|
+
|
|
796
|
+
async def _run_multi_step_core(
|
|
797
|
+
self,
|
|
798
|
+
*,
|
|
799
|
+
runtime: IAgentRuntime,
|
|
800
|
+
message: Memory,
|
|
801
|
+
state: State,
|
|
802
|
+
callback: HandlerCallback | None,
|
|
803
|
+
max_iterations: int,
|
|
804
|
+
decision_template: str,
|
|
805
|
+
summary_template: str,
|
|
806
|
+
compose_prompt_from_state: Callable[..., str],
|
|
807
|
+
) -> MessageProcessingResult:
|
|
808
|
+
"""Iterative multi-step workflow (TypeScript parity).
|
|
809
|
+
|
|
810
|
+
Each iteration:
|
|
811
|
+
- (Re)compose core state (RECENT_MESSAGES/ACTIONS/PROVIDERS/ACTION_STATE)
|
|
812
|
+
- Ask the model for the next providers + at most one action
|
|
813
|
+
- Run selected providers (optional)
|
|
814
|
+
- Execute selected action (optional)
|
|
815
|
+
- Accumulate action results
|
|
816
|
+
"""
|
|
817
|
+
import json
|
|
818
|
+
|
|
819
|
+
from elizaos.runtime import DynamicPromptOptions
|
|
820
|
+
from elizaos.types.components import ActionResult
|
|
821
|
+
|
|
822
|
+
trace_results: list[ActionResult] = []
|
|
823
|
+
last_action_results_len = 0
|
|
824
|
+
last_thought = ""
|
|
825
|
+
|
|
826
|
+
iteration = 0
|
|
827
|
+
while iteration < max(1, int(max_iterations)):
|
|
828
|
+
iteration += 1
|
|
829
|
+
|
|
830
|
+
# Keep state fresh each iteration; include descriptions lists
|
|
831
|
+
state = await runtime.compose_state(
|
|
832
|
+
message,
|
|
833
|
+
include_list=["RECENT_MESSAGES", "ACTION_STATE", "ACTIONS", "PROVIDERS"],
|
|
834
|
+
only_include=True,
|
|
835
|
+
skip_cache=True,
|
|
836
|
+
)
|
|
837
|
+
state.data.action_results = list(trace_results)
|
|
838
|
+
state.values["actionResults"] = _format_action_results(trace_results)
|
|
839
|
+
|
|
840
|
+
# Use dynamicPromptExecFromState for multi-step decision
|
|
841
|
+
decision_schema = [
|
|
842
|
+
SchemaRow(
|
|
843
|
+
field="thought",
|
|
844
|
+
description="Your reasoning for the selected providers and/or action",
|
|
845
|
+
validate_field=False,
|
|
846
|
+
stream_field=False,
|
|
847
|
+
),
|
|
848
|
+
SchemaRow(
|
|
849
|
+
field="providers",
|
|
850
|
+
description="Comma-separated list of providers to call",
|
|
851
|
+
validate_field=False,
|
|
852
|
+
stream_field=False,
|
|
853
|
+
),
|
|
854
|
+
SchemaRow(
|
|
855
|
+
field="action",
|
|
856
|
+
description="Name of the action to execute (can be empty)",
|
|
857
|
+
validate_field=False,
|
|
858
|
+
stream_field=False,
|
|
859
|
+
),
|
|
860
|
+
SchemaRow(
|
|
861
|
+
field="parameters",
|
|
862
|
+
description="JSON object with parameter names and values",
|
|
863
|
+
validate_field=False,
|
|
864
|
+
stream_field=False,
|
|
865
|
+
),
|
|
866
|
+
SchemaRow(
|
|
867
|
+
field="isFinish",
|
|
868
|
+
description="true if task is complete, false otherwise",
|
|
869
|
+
validate_field=False,
|
|
870
|
+
stream_field=False,
|
|
871
|
+
),
|
|
872
|
+
]
|
|
873
|
+
|
|
874
|
+
decision_result = await runtime.dynamic_prompt_exec_from_state(
|
|
875
|
+
state=state,
|
|
876
|
+
prompt=decision_template,
|
|
877
|
+
schema=decision_schema,
|
|
878
|
+
options=DynamicPromptOptions(
|
|
879
|
+
model_size="large",
|
|
880
|
+
force_format="xml",
|
|
881
|
+
),
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
thought = str(decision_result.get("thought", "")) if decision_result else ""
|
|
885
|
+
action_name = str(decision_result.get("action", "")).strip() if decision_result else ""
|
|
886
|
+
providers_csv = (
|
|
887
|
+
str(decision_result.get("providers", "")).strip() if decision_result else ""
|
|
888
|
+
)
|
|
889
|
+
parameters_raw = decision_result.get("parameters", "") if decision_result else ""
|
|
890
|
+
is_finish_raw = (
|
|
891
|
+
str(decision_result.get("isFinish", "")).strip().lower() if decision_result else ""
|
|
892
|
+
)
|
|
893
|
+
is_finish = is_finish_raw in ("true", "yes", "1")
|
|
894
|
+
|
|
895
|
+
# Parse parameters (may be JSON string or dict)
|
|
896
|
+
action_params: dict[str, Any] = {}
|
|
897
|
+
if parameters_raw:
|
|
898
|
+
if isinstance(parameters_raw, str):
|
|
899
|
+
try:
|
|
900
|
+
parsed_params = json.loads(parameters_raw)
|
|
901
|
+
if isinstance(parsed_params, dict):
|
|
902
|
+
action_params = parsed_params
|
|
903
|
+
except json.JSONDecodeError:
|
|
904
|
+
pass
|
|
905
|
+
elif isinstance(parameters_raw, dict):
|
|
906
|
+
action_params = parameters_raw
|
|
907
|
+
|
|
908
|
+
last_thought = thought
|
|
909
|
+
|
|
910
|
+
if is_finish:
|
|
911
|
+
break
|
|
912
|
+
|
|
913
|
+
providers = [p.strip() for p in providers_csv.split(",") if p.strip()]
|
|
914
|
+
if providers:
|
|
915
|
+
# Execute selected providers only; bypass cache so they run
|
|
916
|
+
state = await runtime.compose_state(
|
|
917
|
+
message,
|
|
918
|
+
include_list=providers,
|
|
919
|
+
only_include=True,
|
|
920
|
+
skip_cache=True,
|
|
921
|
+
)
|
|
922
|
+
|
|
923
|
+
if action_name:
|
|
924
|
+
# Synthetic response memory to drive runtime.process_actions()
|
|
925
|
+
response_id = as_uuid(str(uuid.uuid4()))
|
|
926
|
+
response_content = Content(
|
|
927
|
+
text="",
|
|
928
|
+
thought=thought if thought else None,
|
|
929
|
+
actions=[action_name],
|
|
930
|
+
providers=providers if providers else None,
|
|
931
|
+
params=json.dumps(action_params) if action_params else None,
|
|
932
|
+
)
|
|
933
|
+
response_memory = Memory(
|
|
934
|
+
id=response_id,
|
|
935
|
+
entity_id=runtime.agent_id,
|
|
936
|
+
agent_id=runtime.agent_id,
|
|
937
|
+
room_id=message.room_id,
|
|
938
|
+
content=response_content,
|
|
939
|
+
created_at=int(time.time() * 1000),
|
|
940
|
+
)
|
|
941
|
+
await runtime.process_actions(message, [response_memory], state, callback)
|
|
942
|
+
|
|
943
|
+
# Pull newly recorded action results from runtime (if message.id is set)
|
|
944
|
+
if message.id:
|
|
945
|
+
all_results = runtime.get_action_results(message.id)
|
|
946
|
+
if len(all_results) > last_action_results_len:
|
|
947
|
+
trace_results.extend(all_results[last_action_results_len:])
|
|
948
|
+
last_action_results_len = len(all_results)
|
|
949
|
+
|
|
950
|
+
# Final summary
|
|
951
|
+
state = await runtime.compose_state(
|
|
952
|
+
message,
|
|
953
|
+
include_list=["RECENT_MESSAGES", "ACTION_STATE", "ACTIONS", "PROVIDERS"],
|
|
954
|
+
only_include=True,
|
|
955
|
+
skip_cache=True,
|
|
956
|
+
)
|
|
957
|
+
state.data.action_results = list(trace_results)
|
|
958
|
+
state.values["actionResults"] = _format_action_results(trace_results)
|
|
959
|
+
state.values["recentMessage"] = last_thought
|
|
960
|
+
# Best-effort fill template values
|
|
961
|
+
bio_val = runtime.character.bio if isinstance(runtime.character.bio, str) else ""
|
|
962
|
+
state.values["bio"] = bio_val
|
|
963
|
+
state.values["system"] = runtime.character.system or ""
|
|
964
|
+
state.values["messageDirections"] = ""
|
|
965
|
+
|
|
966
|
+
# Use dynamicPromptExecFromState for final summary
|
|
967
|
+
summary_schema = [
|
|
968
|
+
SchemaRow(
|
|
969
|
+
field="thought",
|
|
970
|
+
description="Your internal reasoning about the summary",
|
|
971
|
+
validate_field=False,
|
|
972
|
+
stream_field=False,
|
|
973
|
+
),
|
|
974
|
+
SchemaRow(
|
|
975
|
+
field="text",
|
|
976
|
+
description="The final summary message to send to the user",
|
|
977
|
+
required=True,
|
|
978
|
+
stream_field=True,
|
|
979
|
+
),
|
|
980
|
+
]
|
|
981
|
+
|
|
982
|
+
summary_result = await runtime.dynamic_prompt_exec_from_state(
|
|
983
|
+
state=state,
|
|
984
|
+
prompt=summary_template,
|
|
985
|
+
schema=summary_schema,
|
|
986
|
+
options=DynamicPromptOptions(
|
|
987
|
+
model_size="large",
|
|
988
|
+
force_format="xml",
|
|
989
|
+
required_fields=["text"],
|
|
990
|
+
),
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
# Handle summary generation failure
|
|
994
|
+
if summary_result is None:
|
|
995
|
+
runtime.logger.error(
|
|
996
|
+
"Multi-step summary generation failed - returning did_respond=False"
|
|
997
|
+
)
|
|
998
|
+
return MessageProcessingResult(
|
|
999
|
+
did_respond=False,
|
|
1000
|
+
response_content=None,
|
|
1001
|
+
response_messages=[],
|
|
1002
|
+
state=state,
|
|
1003
|
+
)
|
|
1004
|
+
|
|
1005
|
+
final_thought = str(summary_result.get("thought", ""))
|
|
1006
|
+
final_text = str(summary_result.get("text", ""))
|
|
1007
|
+
|
|
1008
|
+
# If text is empty, treat as failure
|
|
1009
|
+
if not final_text.strip():
|
|
1010
|
+
runtime.logger.error(
|
|
1011
|
+
"Multi-step summary returned empty text - returning did_respond=False"
|
|
1012
|
+
)
|
|
1013
|
+
return MessageProcessingResult(
|
|
1014
|
+
did_respond=False,
|
|
1015
|
+
response_content=None,
|
|
1016
|
+
response_messages=[],
|
|
1017
|
+
state=state,
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
final_content = Content(text=final_text, thought=final_thought)
|
|
1021
|
+
if callback:
|
|
1022
|
+
await callback(final_content)
|
|
1023
|
+
|
|
1024
|
+
return MessageProcessingResult(
|
|
1025
|
+
did_respond=True,
|
|
1026
|
+
response_content=final_content,
|
|
1027
|
+
response_messages=[],
|
|
1028
|
+
state=state,
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
async def _repair_missing_action_params(
|
|
1032
|
+
self,
|
|
1033
|
+
*,
|
|
1034
|
+
runtime: IAgentRuntime,
|
|
1035
|
+
message: Memory,
|
|
1036
|
+
state: State,
|
|
1037
|
+
actions: list[str],
|
|
1038
|
+
providers: list[str],
|
|
1039
|
+
raw_response: str,
|
|
1040
|
+
params: dict[str, list[dict[str, str]]],
|
|
1041
|
+
template: str,
|
|
1042
|
+
) -> dict[str, list[dict[str, str]]]:
|
|
1043
|
+
"""
|
|
1044
|
+
Ensure required action parameters are present.
|
|
1045
|
+
|
|
1046
|
+
If the model selected actions that require parameters but omitted <params>,
|
|
1047
|
+
we ask the same model to return ONLY a <params> block.
|
|
1048
|
+
"""
|
|
1049
|
+
# Build a requirement map for actions that have parameters
|
|
1050
|
+
required_by_action: dict[str, list[str]] = {}
|
|
1051
|
+
for a in runtime.actions:
|
|
1052
|
+
action_name = a.name.upper()
|
|
1053
|
+
if action_name not in [x.upper() for x in actions]:
|
|
1054
|
+
continue
|
|
1055
|
+
if not a.parameters:
|
|
1056
|
+
continue
|
|
1057
|
+
required: list[str] = []
|
|
1058
|
+
for p in a.parameters:
|
|
1059
|
+
if p.required:
|
|
1060
|
+
required.append(p.name)
|
|
1061
|
+
if required:
|
|
1062
|
+
required_by_action[action_name] = required
|
|
1063
|
+
|
|
1064
|
+
if not required_by_action:
|
|
1065
|
+
return params
|
|
1066
|
+
|
|
1067
|
+
action_counts: dict[str, int] = {}
|
|
1068
|
+
for a in actions:
|
|
1069
|
+
action_counts[a.upper()] = action_counts.get(a.upper(), 0) + 1
|
|
1070
|
+
|
|
1071
|
+
def _entry_has_required(entry: dict[str, str], req: list[str]) -> bool:
|
|
1072
|
+
for r in req:
|
|
1073
|
+
if r in entry:
|
|
1074
|
+
continue
|
|
1075
|
+
found = False
|
|
1076
|
+
for k in entry:
|
|
1077
|
+
if isinstance(k, str) and k.lower() == r.lower():
|
|
1078
|
+
found = True
|
|
1079
|
+
break
|
|
1080
|
+
if not found:
|
|
1081
|
+
return False
|
|
1082
|
+
return True
|
|
1083
|
+
|
|
1084
|
+
missing_actions: dict[str, list[str]] = {}
|
|
1085
|
+
for action_name, req in required_by_action.items():
|
|
1086
|
+
expected = action_counts.get(action_name, 0)
|
|
1087
|
+
existing_entries = params.get(action_name, [])
|
|
1088
|
+
if len(existing_entries) < expected:
|
|
1089
|
+
missing_actions[action_name] = req
|
|
1090
|
+
continue
|
|
1091
|
+
for entry in existing_entries[:expected]:
|
|
1092
|
+
if not _entry_has_required(entry, req):
|
|
1093
|
+
missing_actions[action_name] = req
|
|
1094
|
+
break
|
|
1095
|
+
|
|
1096
|
+
if not missing_actions:
|
|
1097
|
+
return params
|
|
1098
|
+
|
|
1099
|
+
runtime.logger.warning(
|
|
1100
|
+
f"Missing required action params for: {', '.join(sorted(missing_actions.keys()))}. "
|
|
1101
|
+
"Attempting param repair."
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
# Compose a minimal "repair" prompt. Prefer JSON output for robustness.
|
|
1105
|
+
missing_lines = "\n".join(
|
|
1106
|
+
f"- {a}: required params = {', '.join(req)}" for a, req in missing_actions.items()
|
|
1107
|
+
)
|
|
1108
|
+
user_text = message.content.text or ""
|
|
1109
|
+
|
|
1110
|
+
actions_json = ", ".join([f'"{a.upper()}"' for a in actions if isinstance(a, str)])
|
|
1111
|
+
repair_prompt = (
|
|
1112
|
+
"You previously selected actions that require parameters, but you did not provide them.\n\n"
|
|
1113
|
+
f"Missing params:\n{missing_lines}\n\n"
|
|
1114
|
+
"Return ONLY JSON (no code fences).\n"
|
|
1115
|
+
"IMPORTANT: return a JSON ARRAY of action-parameter objects IN THE SAME ORDER as the action list.\n"
|
|
1116
|
+
"Action list:\n"
|
|
1117
|
+
f"[{actions_json}]\n\n"
|
|
1118
|
+
"Examples:\n"
|
|
1119
|
+
'[{"EXECUTE": {"command": "ls -la /workspace"}}]\n'
|
|
1120
|
+
'[{"WRITE_FILE": {"path": "/workspace/x.txt", "content": "line1\\nline2\\n"}}]\n\n'
|
|
1121
|
+
"IMPORTANT:\n"
|
|
1122
|
+
"- The JSON must be directly parseable.\n"
|
|
1123
|
+
"- For WRITE_FILE.content, include real newlines using \\n escapes.\n\n"
|
|
1124
|
+
f"Current message:\n{user_text}\n\n"
|
|
1125
|
+
"Your previous response (for reference):\n"
|
|
1126
|
+
f"{raw_response}\n"
|
|
1127
|
+
)
|
|
1128
|
+
|
|
1129
|
+
# Use the same model handler as the main message handler.
|
|
1130
|
+
repaired_raw = await runtime.use_model(
|
|
1131
|
+
ModelType.TEXT_LARGE.value,
|
|
1132
|
+
{
|
|
1133
|
+
"prompt": repair_prompt,
|
|
1134
|
+
"system": runtime.character.system,
|
|
1135
|
+
"temperature": 0.0,
|
|
1136
|
+
},
|
|
1137
|
+
)
|
|
1138
|
+
repaired_str = str(repaired_raw)
|
|
1139
|
+
repaired_params = _parse_params_from_xml(repaired_str)
|
|
1140
|
+
if not repaired_params:
|
|
1141
|
+
runtime.logger.warning(
|
|
1142
|
+
"Param repair failed to parse. "
|
|
1143
|
+
f"Repair model output (truncated): {repaired_str[:500]!r}"
|
|
1144
|
+
)
|
|
1145
|
+
return params
|
|
1146
|
+
|
|
1147
|
+
merged: dict[str, list[dict[str, str]]] = {**params}
|
|
1148
|
+
for action_name, entries in repaired_params.items():
|
|
1149
|
+
merged.setdefault(action_name, [])
|
|
1150
|
+
merged[action_name].extend(entries)
|
|
1151
|
+
|
|
1152
|
+
return merged
|
|
1153
|
+
|
|
1154
|
+
async def _handle_message_stream_impl(
|
|
1155
|
+
self,
|
|
1156
|
+
runtime: IAgentRuntime,
|
|
1157
|
+
message: Memory,
|
|
1158
|
+
) -> AsyncIterator[str | StreamingMessageResult]:
|
|
1159
|
+
"""Internal implementation of streaming message handling."""
|
|
1160
|
+
_ = runtime.start_run(message.room_id)
|
|
1161
|
+
|
|
1162
|
+
try:
|
|
1163
|
+
check_should_respond = runtime.is_check_should_respond_enabled()
|
|
1164
|
+
if not check_should_respond:
|
|
1165
|
+
runtime.logger.debug(
|
|
1166
|
+
"check_should_respond disabled, always responding (ChatGPT mode)"
|
|
1167
|
+
)
|
|
1168
|
+
|
|
1169
|
+
runtime.logger.debug("Saving incoming message to memory")
|
|
1170
|
+
if message.id:
|
|
1171
|
+
existing_memory = await runtime.get_memory_by_id(message.id)
|
|
1172
|
+
if not existing_memory:
|
|
1173
|
+
await runtime.create_memory(message, "messages")
|
|
1174
|
+
else:
|
|
1175
|
+
message.id = as_uuid(str(uuid.uuid4()))
|
|
1176
|
+
await runtime.create_memory(message, "messages")
|
|
1177
|
+
|
|
1178
|
+
# Compose state from providers
|
|
1179
|
+
state = await runtime.compose_state(message)
|
|
1180
|
+
|
|
1181
|
+
# Build the prompt using canonical template
|
|
1182
|
+
from elizaos.prompts import MESSAGE_HANDLER_TEMPLATE
|
|
1183
|
+
|
|
1184
|
+
template = MESSAGE_HANDLER_TEMPLATE
|
|
1185
|
+
if (
|
|
1186
|
+
runtime.character.templates
|
|
1187
|
+
and "messageHandlerTemplate" in runtime.character.templates
|
|
1188
|
+
):
|
|
1189
|
+
template = runtime.character.templates["messageHandlerTemplate"]
|
|
1190
|
+
prompt = self._build_canonical_prompt(runtime, message, state, template)
|
|
1191
|
+
|
|
1192
|
+
# Collect full response while streaming
|
|
1193
|
+
full_response_parts: list[str] = []
|
|
1194
|
+
|
|
1195
|
+
# Stream response using the streaming model
|
|
1196
|
+
async for chunk in runtime.use_model_stream(
|
|
1197
|
+
ModelType.TEXT_LARGE_STREAM.value,
|
|
1198
|
+
{
|
|
1199
|
+
"prompt": prompt,
|
|
1200
|
+
"system": runtime.character.system,
|
|
1201
|
+
"temperature": 0.7,
|
|
1202
|
+
},
|
|
1203
|
+
):
|
|
1204
|
+
full_response_parts.append(chunk)
|
|
1205
|
+
yield chunk
|
|
1206
|
+
|
|
1207
|
+
# Build the complete response
|
|
1208
|
+
full_response = "".join(full_response_parts)
|
|
1209
|
+
response_content = Content(text=full_response)
|
|
1210
|
+
response_id = as_uuid(str(uuid.uuid4()))
|
|
1211
|
+
response_memory = Memory(
|
|
1212
|
+
id=response_id,
|
|
1213
|
+
entityId=runtime.agent_id,
|
|
1214
|
+
agentId=runtime.agent_id,
|
|
1215
|
+
roomId=message.room_id,
|
|
1216
|
+
content=response_content,
|
|
1217
|
+
createdAt=int(time.time() * 1000),
|
|
1218
|
+
)
|
|
1219
|
+
|
|
1220
|
+
# Save response memory
|
|
1221
|
+
runtime.logger.debug("Saving response to memory")
|
|
1222
|
+
await runtime.create_memory(response_memory, "messages")
|
|
1223
|
+
|
|
1224
|
+
# Yield final result with metadata
|
|
1225
|
+
yield StreamingMessageResult(
|
|
1226
|
+
response_memory=response_memory,
|
|
1227
|
+
state=state,
|
|
1228
|
+
)
|
|
1229
|
+
|
|
1230
|
+
except Exception as e:
|
|
1231
|
+
runtime.logger.error(f"Error processing streaming message: {e}")
|
|
1232
|
+
raise
|
|
1233
|
+
finally:
|
|
1234
|
+
runtime.end_run()
|
|
1235
|
+
|
|
1236
|
+
def handle_message_stream(
|
|
1237
|
+
self,
|
|
1238
|
+
runtime: IAgentRuntime,
|
|
1239
|
+
message: Memory,
|
|
1240
|
+
) -> AsyncIterator[str | StreamingMessageResult]:
|
|
1241
|
+
"""
|
|
1242
|
+
Process a message and stream the response token by token.
|
|
1243
|
+
|
|
1244
|
+
Yields:
|
|
1245
|
+
str: Text chunks as they are generated
|
|
1246
|
+
StreamingMessageResult: Final result with metadata (yielded last)
|
|
1247
|
+
"""
|
|
1248
|
+
return self._handle_message_stream_impl(runtime, message)
|