agentscope-runtime 0.1.3__py3-none-any.whl → 0.1.5b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentscope_runtime/engine/agents/agentscope_agent/agent.py +3 -0
- agentscope_runtime/engine/deployers/__init__.py +13 -0
- agentscope_runtime/engine/deployers/adapter/responses/__init__.py +0 -0
- agentscope_runtime/engine/deployers/adapter/responses/response_api_adapter_utils.py +2886 -0
- agentscope_runtime/engine/deployers/adapter/responses/response_api_agent_adapter.py +51 -0
- agentscope_runtime/engine/deployers/adapter/responses/response_api_protocol_adapter.py +314 -0
- agentscope_runtime/engine/deployers/cli_fc_deploy.py +143 -0
- agentscope_runtime/engine/deployers/kubernetes_deployer.py +265 -0
- agentscope_runtime/engine/deployers/local_deployer.py +356 -501
- agentscope_runtime/engine/deployers/modelstudio_deployer.py +626 -0
- agentscope_runtime/engine/deployers/utils/__init__.py +0 -0
- agentscope_runtime/engine/deployers/utils/deployment_modes.py +14 -0
- agentscope_runtime/engine/deployers/utils/docker_image_utils/__init__.py +8 -0
- agentscope_runtime/engine/deployers/utils/docker_image_utils/docker_image_builder.py +429 -0
- agentscope_runtime/engine/deployers/utils/docker_image_utils/dockerfile_generator.py +240 -0
- agentscope_runtime/engine/deployers/utils/docker_image_utils/runner_image_factory.py +297 -0
- agentscope_runtime/engine/deployers/utils/package_project_utils.py +932 -0
- agentscope_runtime/engine/deployers/utils/service_utils/__init__.py +9 -0
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_factory.py +504 -0
- agentscope_runtime/engine/deployers/utils/service_utils/fastapi_templates.py +157 -0
- agentscope_runtime/engine/deployers/utils/service_utils/process_manager.py +268 -0
- agentscope_runtime/engine/deployers/utils/service_utils/service_config.py +75 -0
- agentscope_runtime/engine/deployers/utils/service_utils/service_factory.py +220 -0
- agentscope_runtime/engine/deployers/utils/wheel_packager.py +389 -0
- agentscope_runtime/engine/helpers/agent_api_builder.py +651 -0
- agentscope_runtime/engine/runner.py +36 -10
- agentscope_runtime/engine/schemas/agent_schemas.py +70 -2
- agentscope_runtime/engine/schemas/embedding.py +37 -0
- agentscope_runtime/engine/schemas/modelstudio_llm.py +310 -0
- agentscope_runtime/engine/schemas/oai_llm.py +538 -0
- agentscope_runtime/engine/schemas/realtime.py +254 -0
- agentscope_runtime/engine/services/context_manager.py +2 -0
- agentscope_runtime/engine/services/mem0_memory_service.py +124 -0
- agentscope_runtime/engine/services/memory_service.py +2 -1
- agentscope_runtime/engine/services/redis_session_history_service.py +4 -3
- agentscope_runtime/engine/services/session_history_service.py +4 -3
- agentscope_runtime/sandbox/manager/container_clients/kubernetes_client.py +555 -10
- agentscope_runtime/version.py +1 -1
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/METADATA +25 -5
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/RECORD +44 -17
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/entry_points.txt +1 -0
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/WHEEL +0 -0
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/licenses/LICENSE +0 -0
- {agentscope_runtime-0.1.3.dist-info → agentscope_runtime-0.1.5b1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
import os
|
|
3
|
+
import uuid
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from typing import Optional, List, Union, Any
|
|
6
|
+
|
|
7
|
+
from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
|
|
8
|
+
from pydantic import BaseModel, model_validator, Field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class AsrVendor(str, Enum):
|
|
12
|
+
MODELSTUDIO = "modelstudio"
|
|
13
|
+
AZURE = "azure"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class TtsVendor(str, Enum):
|
|
17
|
+
MODELSTUDIO = "modelstudio"
|
|
18
|
+
AZURE = "azure"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ModelstudioConnection(BaseModel):
|
|
22
|
+
base_url: Optional[str] = None
|
|
23
|
+
api_key: Optional[str] = None
|
|
24
|
+
workspace_id: Optional[str] = None
|
|
25
|
+
user_agent: Optional[str] = None
|
|
26
|
+
data_inspection: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TtsConfig(BaseModel):
|
|
30
|
+
model: Optional[str] = None
|
|
31
|
+
voice: Optional[str] = None
|
|
32
|
+
sample_rate: Optional[int] = None
|
|
33
|
+
format: Optional[str] = None
|
|
34
|
+
bits_per_sample: Optional[int] = None
|
|
35
|
+
nb_channels: Optional[int] = None
|
|
36
|
+
chat_id: Optional[str] = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ModelstudioTtsConfig(TtsConfig, ModelstudioConnection):
|
|
40
|
+
model: str = "cosyvoice-v2"
|
|
41
|
+
voice: str = os.getenv("TTS_VOICE", "longcheng_v2")
|
|
42
|
+
sample_rate: int = 16000
|
|
43
|
+
format: Optional[str] = "pcm"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class AsrConfig(BaseModel):
|
|
47
|
+
model: Optional[str] = None
|
|
48
|
+
language: Optional[str] = None
|
|
49
|
+
sample_rate: Optional[int] = None
|
|
50
|
+
format: Optional[str] = None
|
|
51
|
+
bits_per_sample: Optional[int] = None
|
|
52
|
+
nb_channels: Optional[int] = None
|
|
53
|
+
initial_silence_timeout: Optional[int] = None
|
|
54
|
+
max_end_silence: Optional[int] = None
|
|
55
|
+
fast_vad_min_duration: Optional[int] = None
|
|
56
|
+
fast_vad_max_duration: Optional[int] = None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ModelstudioAsrConfig(AsrConfig, ModelstudioConnection):
|
|
60
|
+
model: Optional[str] = "gummy-realtime-v1"
|
|
61
|
+
sample_rate: Optional[int] = 16000
|
|
62
|
+
format: Optional[str] = "pcm"
|
|
63
|
+
max_end_silence: Optional[int] = 700
|
|
64
|
+
fast_vad_min_duration: Optional[int] = 200
|
|
65
|
+
fast_vad_max_duration: Optional[int] = 1100
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class ModelstudioKnowledgeBaseConfig(BaseModel):
|
|
69
|
+
index_ids: List[str]
|
|
70
|
+
workspace_id: str
|
|
71
|
+
api_key: str
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ModelstudioVoiceChatUpstream(BaseModel):
|
|
75
|
+
dialog_mode: Optional[str] = "duplex"
|
|
76
|
+
enable_server_vad: Optional[bool] = True
|
|
77
|
+
modalities: Optional[List[str]] = Field(default_factory=lambda: ["audio"])
|
|
78
|
+
asr_vendor: Optional[AsrVendor] = AsrVendor(
|
|
79
|
+
os.getenv("ASR_VENDOR", AsrVendor.MODELSTUDIO.value),
|
|
80
|
+
)
|
|
81
|
+
asr_options: Optional[AsrConfig] = AsrConfig()
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class ModelstudioVoiceChatDownstream(BaseModel):
|
|
85
|
+
modalities: Optional[List[str]] = Field(
|
|
86
|
+
default_factory=lambda: ["audio", "text"],
|
|
87
|
+
)
|
|
88
|
+
tts_vendor: Optional[TtsVendor] = TtsVendor(
|
|
89
|
+
os.getenv("TTS_VENDOR", TtsVendor.MODELSTUDIO.value),
|
|
90
|
+
)
|
|
91
|
+
tts_options: Optional[TtsConfig] = TtsConfig()
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class ModelstudioVoiceChatParameters(BaseModel):
|
|
95
|
+
modelstudio_kb: Optional[ModelstudioKnowledgeBaseConfig] = None
|
|
96
|
+
enable_tool_call: Optional[bool] = False
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class ModelstudioVoiceChatInput(BaseModel):
|
|
100
|
+
dialog_id: Optional[str] = None
|
|
101
|
+
app_id: Optional[str] = None
|
|
102
|
+
text: Optional[str] = None
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class ModelstudioVoiceChatDirective(str, Enum):
|
|
106
|
+
SESSION_START = "SessionStart"
|
|
107
|
+
SESSION_STOP = "SessionStop"
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class ModelstudioVoiceChatEvent(str, Enum):
|
|
111
|
+
SESSION_STARTED = "SessionStarted"
|
|
112
|
+
SESSION_STOPPED = "SessionStopped"
|
|
113
|
+
AUDIO_TRANSCRIPT = "AudioTranscript"
|
|
114
|
+
RESPONSE_TEXT = "ResponseText"
|
|
115
|
+
RESPONSE_AUDIO_STARTED = "ResponseAudioStarted"
|
|
116
|
+
RESPONSE_AUDIO_ENDED = "ResponseAudioEnded"
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class ModelstudioVoiceChatInPayload(BaseModel):
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class ModelstudioVoiceChatSessionStartPayload(ModelstudioVoiceChatInPayload):
|
|
124
|
+
session_id: Optional[str] = Field(
|
|
125
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
126
|
+
)
|
|
127
|
+
upstream: Optional[
|
|
128
|
+
ModelstudioVoiceChatUpstream
|
|
129
|
+
] = ModelstudioVoiceChatUpstream()
|
|
130
|
+
downstream: Optional[
|
|
131
|
+
ModelstudioVoiceChatDownstream
|
|
132
|
+
] = ModelstudioVoiceChatDownstream()
|
|
133
|
+
parameters: Optional[
|
|
134
|
+
ModelstudioVoiceChatParameters
|
|
135
|
+
] = ModelstudioVoiceChatParameters()
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class ModelstudioVoiceChatSessionStopPayload(ModelstudioVoiceChatInPayload):
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class ModelstudioVoiceChatRequest(BaseModel):
|
|
143
|
+
directive: ModelstudioVoiceChatDirective
|
|
144
|
+
payload: Union[
|
|
145
|
+
ModelstudioVoiceChatSessionStartPayload,
|
|
146
|
+
ModelstudioVoiceChatSessionStopPayload,
|
|
147
|
+
]
|
|
148
|
+
|
|
149
|
+
@model_validator(mode="wrap")
|
|
150
|
+
def parse_payload_based_on_directive(
|
|
151
|
+
self,
|
|
152
|
+
values: Any,
|
|
153
|
+
handler: Any,
|
|
154
|
+
) -> None:
|
|
155
|
+
data = values if isinstance(values, dict) else values.model_dump()
|
|
156
|
+
|
|
157
|
+
directive = data.get("directive")
|
|
158
|
+
payload_data = data.get("payload", {})
|
|
159
|
+
|
|
160
|
+
if directive == ModelstudioVoiceChatDirective.SESSION_START:
|
|
161
|
+
data["payload"] = ModelstudioVoiceChatSessionStartPayload(
|
|
162
|
+
**payload_data,
|
|
163
|
+
)
|
|
164
|
+
elif directive == ModelstudioVoiceChatDirective.SESSION_STOP:
|
|
165
|
+
data["payload"] = ModelstudioVoiceChatSessionStopPayload(
|
|
166
|
+
**payload_data,
|
|
167
|
+
)
|
|
168
|
+
else:
|
|
169
|
+
raise ValueError(f"Unsupported directive: {directive}")
|
|
170
|
+
|
|
171
|
+
return handler(data)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
class ModelstudioVoiceChatOutPayload(BaseModel):
|
|
175
|
+
session_id: Optional[str] = None
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
class ModelstudioVoiceChatSessionStartedPayload(
|
|
179
|
+
ModelstudioVoiceChatOutPayload,
|
|
180
|
+
):
|
|
181
|
+
pass
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class ModelstudioVoiceChatSessionStoppedPayload(
|
|
185
|
+
ModelstudioVoiceChatOutPayload,
|
|
186
|
+
):
|
|
187
|
+
pass
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class ModelstudioVoiceChatAudioTranscriptPayload(
|
|
191
|
+
ModelstudioVoiceChatOutPayload,
|
|
192
|
+
):
|
|
193
|
+
text: Optional[str] = ""
|
|
194
|
+
finished: bool
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class ModelstudioVoiceChatResponseTextPayload(
|
|
198
|
+
ModelstudioVoiceChatOutPayload,
|
|
199
|
+
):
|
|
200
|
+
text: Optional[str] = ""
|
|
201
|
+
tool_calls: Optional[List[ChoiceDeltaToolCall]] = Field(
|
|
202
|
+
default_factory=list,
|
|
203
|
+
)
|
|
204
|
+
finished: bool
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class ModelstudioVoiceChatResponseAudioStartedPayload(
|
|
208
|
+
ModelstudioVoiceChatOutPayload,
|
|
209
|
+
):
|
|
210
|
+
pass
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class ModelstudioVoiceChatResponseAudioStoppedPayload(
|
|
214
|
+
ModelstudioVoiceChatOutPayload,
|
|
215
|
+
):
|
|
216
|
+
pass
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class ModelstudioVoiceChatResponse(BaseModel):
|
|
220
|
+
event: Optional[ModelstudioVoiceChatEvent]
|
|
221
|
+
payload: Union[
|
|
222
|
+
ModelstudioVoiceChatSessionStartedPayload,
|
|
223
|
+
ModelstudioVoiceChatSessionStoppedPayload,
|
|
224
|
+
ModelstudioVoiceChatAudioTranscriptPayload,
|
|
225
|
+
ModelstudioVoiceChatResponseTextPayload,
|
|
226
|
+
ModelstudioVoiceChatResponseAudioStartedPayload,
|
|
227
|
+
ModelstudioVoiceChatResponseAudioStoppedPayload,
|
|
228
|
+
]
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class AzureConnection(BaseModel):
|
|
232
|
+
key: Optional[str] = None
|
|
233
|
+
region: Optional[str] = None
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
class AzureAsrConfig(AsrConfig, AzureConnection):
|
|
237
|
+
sample_rate: Optional[int] = 16000
|
|
238
|
+
format: Optional[str] = "pcm"
|
|
239
|
+
bits_per_sample: Optional[int] = 16
|
|
240
|
+
nb_channels: Optional[int] = 1
|
|
241
|
+
initial_silence_timeout: Optional[int] = 5000
|
|
242
|
+
max_end_silence: Optional[int] = 800
|
|
243
|
+
language: Optional[str] = os.getenv("ASR_LANG", "en-US")
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
class AzureTtsConfig(TtsConfig, AzureConnection):
|
|
247
|
+
voice: Optional[str] = os.getenv(
|
|
248
|
+
"TTS_VOICE",
|
|
249
|
+
"en-US-AvaMultilingualNeural",
|
|
250
|
+
)
|
|
251
|
+
sample_rate: Optional[int] = 16000
|
|
252
|
+
format: Optional[str] = "pcm"
|
|
253
|
+
bits_per_sample: Optional[int] = 16
|
|
254
|
+
nb_channels: Optional[int] = 1
|
|
@@ -151,11 +151,13 @@ async def create_context_manager(
|
|
|
151
151
|
memory_service: MemoryService = None,
|
|
152
152
|
session_history_service: SessionHistoryService = None,
|
|
153
153
|
rag_service: RAGService = None,
|
|
154
|
+
context_composer_cls=ContextComposer,
|
|
154
155
|
):
|
|
155
156
|
manager = ContextManager(
|
|
156
157
|
memory_service=memory_service,
|
|
157
158
|
session_history_service=session_history_service,
|
|
158
159
|
rag_service=rag_service,
|
|
160
|
+
context_composer_cls=context_composer_cls,
|
|
159
161
|
)
|
|
160
162
|
|
|
161
163
|
async with manager:
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
import os
|
|
3
|
+
from typing import Optional, Dict, Any, List
|
|
4
|
+
from .memory_service import MemoryService
|
|
5
|
+
from ..schemas.agent_schemas import Message, MessageType, ContentType
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Mem0MemoryService(MemoryService):
|
|
9
|
+
"""
|
|
10
|
+
Memory service that uses mem0 to store and retrieve memories.
|
|
11
|
+
To get the api key, please refer to the following link:
|
|
12
|
+
https://docs.mem0.ai/platform/quickstart
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, **kwargs):
|
|
16
|
+
super().__init__(**kwargs)
|
|
17
|
+
from mem0 import AsyncMemoryClient
|
|
18
|
+
|
|
19
|
+
mem0_api_key = os.getenv("MEM0_API_KEY")
|
|
20
|
+
if mem0_api_key is None:
|
|
21
|
+
raise ValueError("MEM0_API_KEY is not set")
|
|
22
|
+
mem0_api_key = os.getenv("MEM0_API_KEY")
|
|
23
|
+
|
|
24
|
+
# get the mem0 client instance
|
|
25
|
+
self.service = AsyncMemoryClient(api_key=mem0_api_key)
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
async def get_query_text(message: Message) -> str:
|
|
29
|
+
"""
|
|
30
|
+
Get the query text from the message.
|
|
31
|
+
"""
|
|
32
|
+
if message:
|
|
33
|
+
if message.type == MessageType.MESSAGE:
|
|
34
|
+
for content in message.content:
|
|
35
|
+
if content.type == ContentType.TEXT:
|
|
36
|
+
return content.text
|
|
37
|
+
return ""
|
|
38
|
+
|
|
39
|
+
@staticmethod
|
|
40
|
+
def transform_message(message: Message) -> dict:
|
|
41
|
+
content_text = None
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
if hasattr(message, "content") and isinstance(
|
|
45
|
+
message.content,
|
|
46
|
+
list,
|
|
47
|
+
):
|
|
48
|
+
if len(message.content) > 0 and hasattr(
|
|
49
|
+
message.content[0],
|
|
50
|
+
"text",
|
|
51
|
+
):
|
|
52
|
+
content_text = message.content[0].text
|
|
53
|
+
except (AttributeError, IndexError):
|
|
54
|
+
# Log error or handle appropriately
|
|
55
|
+
pass
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
"role": getattr(message, "role", None),
|
|
59
|
+
"content": content_text,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
async def transform_messages(self, messages: List[Message]) -> List[dict]:
|
|
63
|
+
return [self.transform_message(message) for message in messages]
|
|
64
|
+
|
|
65
|
+
async def start(self):
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
async def stop(self):
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
async def health(self):
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
async def add_memory(
|
|
75
|
+
self,
|
|
76
|
+
user_id: str,
|
|
77
|
+
messages: list,
|
|
78
|
+
session_id: Optional[str] = None,
|
|
79
|
+
):
|
|
80
|
+
messages = await self.transform_messages(messages)
|
|
81
|
+
return await self.service.add(
|
|
82
|
+
messages=messages,
|
|
83
|
+
user_id=user_id,
|
|
84
|
+
run_id=session_id,
|
|
85
|
+
# async_mode=True,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
async def search_memory(
|
|
89
|
+
self,
|
|
90
|
+
user_id: str,
|
|
91
|
+
messages: list,
|
|
92
|
+
filters: Optional[Dict[str, Any]] = None,
|
|
93
|
+
) -> list:
|
|
94
|
+
query = await self.get_query_text(messages[-1])
|
|
95
|
+
kwargs = {
|
|
96
|
+
"query": query,
|
|
97
|
+
"user_id": user_id,
|
|
98
|
+
}
|
|
99
|
+
if filters:
|
|
100
|
+
kwargs["filters"] = filters
|
|
101
|
+
return await self.service.search(**kwargs)
|
|
102
|
+
|
|
103
|
+
async def list_memory(
|
|
104
|
+
self,
|
|
105
|
+
user_id: str,
|
|
106
|
+
filters: Optional[Dict[str, Any]] = None,
|
|
107
|
+
) -> list:
|
|
108
|
+
kwargs = {"user_id": user_id}
|
|
109
|
+
if filters:
|
|
110
|
+
kwargs["filters"] = filters
|
|
111
|
+
return await self.service.get_all(**kwargs)
|
|
112
|
+
|
|
113
|
+
async def delete_memory(
|
|
114
|
+
self,
|
|
115
|
+
user_id: str,
|
|
116
|
+
session_id: Optional[str] = None,
|
|
117
|
+
) -> None:
|
|
118
|
+
if session_id:
|
|
119
|
+
return await self.service.delete_all(
|
|
120
|
+
user_id=user_id,
|
|
121
|
+
run_id=session_id,
|
|
122
|
+
)
|
|
123
|
+
else:
|
|
124
|
+
return await self.service.delete_all(user_id=user_id)
|
|
@@ -137,7 +137,8 @@ class InMemoryMemoryService(MemoryService):
|
|
|
137
137
|
if storage_key not in self._store[user_id]:
|
|
138
138
|
self._store[user_id][storage_key] = []
|
|
139
139
|
|
|
140
|
-
|
|
140
|
+
if messages:
|
|
141
|
+
self._store[user_id][storage_key].extend(messages)
|
|
141
142
|
|
|
142
143
|
async def search_memory(
|
|
143
144
|
self,
|
|
@@ -112,9 +112,10 @@ class RedisSessionHistoryService(SessionHistoryService):
|
|
|
112
112
|
message = [message]
|
|
113
113
|
norm_message = []
|
|
114
114
|
for msg in message:
|
|
115
|
-
if not
|
|
116
|
-
|
|
117
|
-
|
|
115
|
+
if msg is not None:
|
|
116
|
+
if not isinstance(msg, Message):
|
|
117
|
+
msg = Message.model_validate(msg)
|
|
118
|
+
norm_message.append(msg)
|
|
118
119
|
|
|
119
120
|
session.messages.extend(norm_message)
|
|
120
121
|
|
|
@@ -238,9 +238,10 @@ class InMemorySessionHistoryService(SessionHistoryService):
|
|
|
238
238
|
|
|
239
239
|
norm_message = []
|
|
240
240
|
for msg in message:
|
|
241
|
-
if not
|
|
242
|
-
|
|
243
|
-
|
|
241
|
+
if msg is not None:
|
|
242
|
+
if not isinstance(msg, Message):
|
|
243
|
+
msg = Message.model_validate(msg)
|
|
244
|
+
norm_message.append(msg)
|
|
244
245
|
session.messages.extend(norm_message)
|
|
245
246
|
|
|
246
247
|
# update the in memory copy
|