pycityagent 2.0.0a52__cp310-cp310-macosx_11_0_arm64.whl → 2.0.0a53__cp310-cp310-macosx_11_0_arm64.whl
Sign up to get free protection for your applications and to get access to all the features.
- pycityagent/agent/agent.py +48 -62
- pycityagent/agent/agent_base.py +66 -53
- pycityagent/cityagent/bankagent.py +5 -7
- pycityagent/cityagent/blocks/__init__.py +0 -2
- pycityagent/cityagent/blocks/cognition_block.py +149 -172
- pycityagent/cityagent/blocks/economy_block.py +90 -129
- pycityagent/cityagent/blocks/mobility_block.py +56 -29
- pycityagent/cityagent/blocks/needs_block.py +163 -145
- pycityagent/cityagent/blocks/other_block.py +17 -9
- pycityagent/cityagent/blocks/plan_block.py +44 -56
- pycityagent/cityagent/blocks/social_block.py +70 -51
- pycityagent/cityagent/blocks/utils.py +2 -0
- pycityagent/cityagent/firmagent.py +6 -7
- pycityagent/cityagent/governmentagent.py +7 -9
- pycityagent/cityagent/memory_config.py +48 -48
- pycityagent/cityagent/nbsagent.py +6 -29
- pycityagent/cityagent/societyagent.py +204 -119
- pycityagent/environment/sim/client.py +10 -1
- pycityagent/environment/sim/clock_service.py +2 -2
- pycityagent/environment/sim/pause_service.py +61 -0
- pycityagent/environment/simulator.py +17 -12
- pycityagent/llm/embeddings.py +0 -24
- pycityagent/memory/faiss_query.py +29 -26
- pycityagent/memory/memory.py +720 -272
- pycityagent/pycityagent-sim +0 -0
- pycityagent/simulation/agentgroup.py +92 -99
- pycityagent/simulation/simulation.py +115 -40
- pycityagent/tools/tool.py +7 -9
- pycityagent/workflow/block.py +11 -4
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/METADATA +1 -1
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/RECORD +35 -35
- pycityagent/cityagent/blocks/time_block.py +0 -116
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/LICENSE +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/WHEEL +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/entry_points.txt +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a53.dist-info}/top_level.txt +0 -0
@@ -8,7 +8,6 @@ from pycityagent.agent import Agent
|
|
8
8
|
from pycityagent.economy import EconomyClient
|
9
9
|
from pycityagent.llm.llm import LLM
|
10
10
|
from pycityagent.memory import Memory
|
11
|
-
from pycityagent.message import Messager
|
12
11
|
from pycityagent.workflow import Block
|
13
12
|
from pycityagent.tools import UpdateWithSimulator
|
14
13
|
|
@@ -20,7 +19,7 @@ logger = logging.getLogger("pycityagent")
|
|
20
19
|
|
21
20
|
|
22
21
|
class PlanAndActionBlock(Block):
|
23
|
-
"""
|
22
|
+
"""Active workflow based on needs model and plan behavior model"""
|
24
23
|
|
25
24
|
longTermDecisionBlock: MonthPlanBlock
|
26
25
|
needsBlock: NeedsBlock
|
@@ -55,87 +54,23 @@ class PlanAndActionBlock(Block):
|
|
55
54
|
llm=llm, memory=memory, simulator=simulator, economy_client=economy_client
|
56
55
|
)
|
57
56
|
self.otherBlock = OtherBlock(llm=llm, memory=memory)
|
58
|
-
|
59
|
-
async def
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
logger.info("Agent is moving")
|
64
|
-
await asyncio.sleep(1)
|
65
|
-
return False
|
66
|
-
|
67
|
-
# 获取上一步信息
|
68
|
-
current_step = await self.memory.get("current_step")
|
69
|
-
if current_step["intention"] == "" or current_step["type"] == "":
|
70
|
-
# 没有上一步,直接返回
|
71
|
-
return True
|
72
|
-
time_now = int(await self.simulator.get_time())
|
73
|
-
step_start_time = current_step["start_time"]
|
74
|
-
step_consumed_time = current_step["evaluation"]["consumed_time"]
|
75
|
-
time_end_plan = step_start_time + int(step_consumed_time) * 60
|
76
|
-
if time_now >= time_end_plan:
|
77
|
-
# 上一步执行完成
|
78
|
-
current_plan = await self.memory.get("current_plan")
|
79
|
-
current_step["evaluation"]["consumed_time"] = (
|
80
|
-
time_now - step_start_time
|
81
|
-
) / 60
|
82
|
-
current_step_index = next(
|
83
|
-
(
|
84
|
-
i
|
85
|
-
for i, step in enumerate(current_plan["steps"])
|
86
|
-
if step["intention"] == current_step["intention"]
|
87
|
-
and step["type"] == current_step["type"]
|
88
|
-
),
|
89
|
-
None,
|
90
|
-
)
|
91
|
-
current_plan["steps"][current_step_index] = current_step
|
92
|
-
await self.memory.update("current_plan", current_plan)
|
93
|
-
if current_step_index is not None and current_step_index + 1 < len(
|
94
|
-
current_plan["steps"]
|
95
|
-
):
|
96
|
-
next_step = current_plan["steps"][current_step_index + 1]
|
97
|
-
await self.memory.update("current_step", next_step)
|
98
|
-
else:
|
99
|
-
# 标记计划完成
|
100
|
-
current_plan["completed"] = True
|
101
|
-
current_plan["end_time"] = await self.simulator.get_time(
|
102
|
-
format_time=True
|
103
|
-
)
|
104
|
-
await self.memory.update("current_plan", current_plan)
|
105
|
-
await self.memory.update("current_step", {"intention": "", "type": ""})
|
106
|
-
logger.info("Current plan execution completed.\n")
|
107
|
-
return True
|
108
|
-
# 上一步未执行完成
|
109
|
-
return False
|
110
|
-
|
111
|
-
async def forward(self):
|
112
|
-
# 与模拟器同步智能体的状态
|
113
|
-
await self._agent.update_with_sim()
|
114
|
-
# 检测上一步是否执行完成
|
115
|
-
if not await self.check_and_update_step():
|
116
|
-
return
|
117
|
-
|
118
|
-
# 长期决策
|
119
|
-
await self.longTermDecisionBlock.forward()
|
120
|
-
|
121
|
-
# 需求更新
|
122
|
-
time_now = await self.simulator.get_time(format_time=True)
|
123
|
-
logger.info(f"Current time: {time_now}")
|
124
|
-
await self.needsBlock.forward()
|
125
|
-
current_need = await self.memory.get("current_need")
|
126
|
-
logger.info(f"Current need: {current_need}")
|
127
|
-
|
128
|
-
# 计划生成
|
129
|
-
current_plan = await self.memory.get("current_plan")
|
57
|
+
|
58
|
+
async def plan_generation(self):
|
59
|
+
"""Generate plan"""
|
60
|
+
current_plan = await self.memory.status.get("current_plan")
|
61
|
+
current_need = await self.memory.status.get("current_need")
|
130
62
|
if current_need != "none" and not current_plan:
|
131
63
|
await self.planBlock.forward()
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
64
|
+
|
65
|
+
async def step_execution(self):
|
66
|
+
"""Execute the current step"""
|
67
|
+
current_plan = await self.memory.status.get("current_plan")
|
68
|
+
execution_context = await self.memory.status.get("execution_context")
|
69
|
+
current_step = await self.memory.status.get("current_step")
|
70
|
+
# check current_step is valid (not empty)
|
136
71
|
if current_step and current_step.get("type") and current_step.get("intention"):
|
137
72
|
step_type = current_step.get("type")
|
138
|
-
position = await self.memory.get("position")
|
73
|
+
position = await self.memory.status.get("position")
|
139
74
|
if "aoi_position" in position:
|
140
75
|
current_step["position"] = position["aoi_position"]["aoi_id"]
|
141
76
|
current_step["start_time"] = int(await self.simulator.get_time())
|
@@ -144,22 +79,50 @@ class PlanAndActionBlock(Block):
|
|
144
79
|
)
|
145
80
|
result = None
|
146
81
|
if step_type == "mobility":
|
147
|
-
|
148
|
-
|
149
|
-
|
82
|
+
if self.enable_mobility:
|
83
|
+
result = await self.mobilityBlock.forward(
|
84
|
+
current_step, execution_context
|
85
|
+
)
|
86
|
+
else:
|
87
|
+
result = {
|
88
|
+
'success': False,
|
89
|
+
'evaluation': f'Mobility Behavior is disabled',
|
90
|
+
'consumed_time': 0,
|
91
|
+
'node_id': None
|
92
|
+
}
|
150
93
|
elif step_type == "social":
|
151
|
-
|
94
|
+
if self.enable_social:
|
95
|
+
result = await self.socialBlock.forward(
|
96
|
+
current_step, execution_context
|
97
|
+
)
|
98
|
+
else:
|
99
|
+
result = {
|
100
|
+
'success': False,
|
101
|
+
'evaluation': f'Social Behavior is disabled',
|
102
|
+
'consumed_time': 0,
|
103
|
+
'node_id': None
|
104
|
+
}
|
152
105
|
elif step_type == "economy":
|
153
|
-
|
106
|
+
if self.enable_economy:
|
107
|
+
result = await self.economyBlock.forward(
|
108
|
+
current_step, execution_context
|
109
|
+
)
|
110
|
+
else:
|
111
|
+
result = {
|
112
|
+
'success': False,
|
113
|
+
'evaluation': f'Economy Behavior is disabled',
|
114
|
+
'consumed_time': 0,
|
115
|
+
'node_id': None
|
116
|
+
}
|
117
|
+
elif step_type == "other":
|
118
|
+
result = await self.otherBlock.forward(
|
154
119
|
current_step, execution_context
|
155
120
|
)
|
156
|
-
elif step_type == "other":
|
157
|
-
result = await self.otherBlock.forward(current_step, execution_context)
|
158
121
|
if result != None:
|
159
122
|
logger.info(f"Execution result: {result}")
|
160
123
|
current_step["evaluation"] = result
|
161
124
|
|
162
|
-
#
|
125
|
+
# Update current_step, plan, and execution_context information
|
163
126
|
current_step_index = next(
|
164
127
|
(
|
165
128
|
i
|
@@ -170,30 +133,51 @@ class PlanAndActionBlock(Block):
|
|
170
133
|
None,
|
171
134
|
)
|
172
135
|
current_plan["steps"][current_step_index] = current_step
|
173
|
-
await self.memory.update("current_step", current_step)
|
174
|
-
await self.memory.update("current_plan", current_plan)
|
175
|
-
await self.memory.update("execution_context", execution_context)
|
136
|
+
await self.memory.status.update("current_step", current_step)
|
137
|
+
await self.memory.status.update("current_plan", current_plan)
|
138
|
+
await self.memory.status.update("execution_context", execution_context)
|
176
139
|
|
140
|
+
async def forward(self):
|
141
|
+
# Long-term decision
|
142
|
+
await self.longTermDecisionBlock.forward()
|
177
143
|
|
178
|
-
|
179
|
-
|
144
|
+
# update needs
|
145
|
+
await self.needsBlock.forward()
|
146
|
+
|
147
|
+
# plan generation
|
148
|
+
await self.plan_generation()
|
180
149
|
|
150
|
+
# step execution
|
151
|
+
await self.step_execution()
|
152
|
+
|
153
|
+
class MindBlock(Block):
|
154
|
+
"""Cognition workflow"""
|
181
155
|
cognitionBlock: CognitionBlock
|
182
156
|
|
183
157
|
def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
|
184
158
|
super().__init__(name="mind_block", llm=llm, memory=memory, simulator=simulator)
|
185
|
-
self.cognitionBlock = CognitionBlock(
|
186
|
-
llm=llm, memory=memory, simulator=simulator
|
187
|
-
)
|
159
|
+
self.cognitionBlock = CognitionBlock(llm=self.llm, memory=self.memory, simulator=simulator)
|
188
160
|
|
189
161
|
async def forward(self):
|
190
162
|
await self.cognitionBlock.forward()
|
191
163
|
|
192
|
-
|
193
164
|
class SocietyAgent(CitizenAgent):
|
194
165
|
mindBlock: MindBlock
|
195
166
|
planAndActionBlock: PlanAndActionBlock
|
196
167
|
update_with_sim = UpdateWithSimulator()
|
168
|
+
configurable_fields = ["enable_cognition", "enable_mobility", "enable_social", "enable_economy"]
|
169
|
+
default_values = {
|
170
|
+
"enable_cognition": True,
|
171
|
+
"enable_mobility": True,
|
172
|
+
"enable_social": True,
|
173
|
+
"enable_economy": True,
|
174
|
+
}
|
175
|
+
fields_description = {
|
176
|
+
"enable_cognition": "Enable cognition workflow",
|
177
|
+
"enable_mobility": "Enable mobility workflow",
|
178
|
+
"enable_social": "Enable social workflow",
|
179
|
+
"enable_economy": "Enable economy workflow",
|
180
|
+
}
|
197
181
|
|
198
182
|
def __init__(
|
199
183
|
self,
|
@@ -211,27 +195,115 @@ class SocietyAgent(CitizenAgent):
|
|
211
195
|
economy_client=economy_client,
|
212
196
|
)
|
213
197
|
self.mindBlock = MindBlock(
|
214
|
-
llm=self._llm_client, memory=self.
|
198
|
+
llm=self._llm_client, memory=self.memory, simulator=self._simulator
|
215
199
|
)
|
216
200
|
self.planAndActionBlock = PlanAndActionBlock(
|
217
201
|
agent=self,
|
218
202
|
llm=self._llm_client,
|
219
|
-
memory=self.
|
203
|
+
memory=self.memory,
|
220
204
|
simulator=self._simulator,
|
221
205
|
economy_client=self._economy_client,
|
222
206
|
)
|
223
207
|
self.step_count = -1
|
208
|
+
self.cognition_update = -1
|
209
|
+
|
210
|
+
# config
|
211
|
+
self.enable_cognition = True
|
212
|
+
self.enable_mobility = True
|
213
|
+
self.enable_social = True
|
214
|
+
self.enable_economy = True
|
224
215
|
|
225
216
|
# Main workflow
|
226
217
|
async def forward(self):
|
227
|
-
logger.info(f"Agent {self._uuid} forward")
|
228
218
|
self.step_count += 1
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
await
|
219
|
+
logger.info(f"Agent {self._uuid} forward [step_count: {self.step_count}]")
|
220
|
+
# sync agent status with simulator
|
221
|
+
await self.update_with_sim()
|
222
|
+
|
223
|
+
# check last step
|
224
|
+
if not await self.check_and_update_step():
|
225
|
+
return
|
226
|
+
|
227
|
+
await self.planAndActionBlock.forward()
|
228
|
+
|
229
|
+
if self.enable_cognition:
|
230
|
+
await self.mindBlock.forward()
|
231
|
+
|
232
|
+
async def check_and_update_step(self):
|
233
|
+
"""Check if the previous step has been completed"""
|
234
|
+
status = await self.memory.status.get("status")
|
235
|
+
if status == 2:
|
236
|
+
# Agent is moving
|
237
|
+
logger.info("Agent is moving")
|
238
|
+
await asyncio.sleep(1)
|
239
|
+
return False
|
240
|
+
|
241
|
+
# Get the previous step information
|
242
|
+
current_step = await self.memory.status.get("current_step")
|
243
|
+
if current_step["intention"] == "" or current_step["type"] == "":
|
244
|
+
# No previous step, return directly
|
245
|
+
return True
|
246
|
+
time_now = int(await self.simulator.get_time())
|
247
|
+
step_start_time = current_step["start_time"]
|
248
|
+
step_consumed_time = current_step["evaluation"]["consumed_time"]
|
249
|
+
time_end_plan = step_start_time + int(step_consumed_time) * 60
|
250
|
+
if time_now >= time_end_plan:
|
251
|
+
# The previous step has been completed
|
252
|
+
current_plan = await self.memory.status.get("current_plan")
|
253
|
+
current_step["evaluation"]["consumed_time"] = (
|
254
|
+
time_now - step_start_time
|
255
|
+
) / 60
|
256
|
+
current_plan["stream_nodes"].append(current_step["evaluation"]["node_id"])
|
257
|
+
if current_step["evaluation"]["success"]:
|
258
|
+
# Last step is completed
|
259
|
+
current_step_index = next(
|
260
|
+
(
|
261
|
+
i
|
262
|
+
for i, step in enumerate(current_plan["steps"])
|
263
|
+
if step["intention"] == current_step["intention"]
|
264
|
+
and step["type"] == current_step["type"]
|
265
|
+
),
|
266
|
+
None,
|
267
|
+
)
|
268
|
+
current_plan["steps"][current_step_index] = current_step
|
269
|
+
await self.memory.status.update("current_plan", current_plan)
|
270
|
+
if current_step_index is not None and current_step_index + 1 < len(
|
271
|
+
current_plan["steps"]
|
272
|
+
):
|
273
|
+
next_step = current_plan["steps"][current_step_index + 1]
|
274
|
+
await self.memory.status.update("current_step", next_step)
|
275
|
+
else:
|
276
|
+
# Whole plan is completed
|
277
|
+
current_plan["completed"] = True
|
278
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
279
|
+
format_time=True
|
280
|
+
)
|
281
|
+
if self.enable_cognition:
|
282
|
+
# Update emotion for the plan
|
283
|
+
related_memories = await self.memory.stream.get_by_ids(current_plan["stream_nodes"])
|
284
|
+
incident = f"You have successfully completed the plan: {related_memories}"
|
285
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(incident)
|
286
|
+
await self.memory.stream.add_cognition(description=conclusion)
|
287
|
+
await self.memory.stream.add_cognition_to_memory(current_plan["stream_nodes"], conclusion)
|
288
|
+
await self.memory.status.update("current_plan", current_plan)
|
289
|
+
await self.memory.status.update("current_step", {"intention": "", "type": ""})
|
290
|
+
return True
|
291
|
+
else:
|
292
|
+
current_plan["failed"] = True
|
293
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
294
|
+
format_time=True
|
295
|
+
)
|
296
|
+
if self.enable_cognition:
|
297
|
+
# Update emotion for the plan
|
298
|
+
related_memories = await self.memory.stream.get_by_ids(current_plan["stream_nodes"])
|
299
|
+
incident = f"You have failed to complete the plan: {related_memories}"
|
300
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(incident)
|
301
|
+
await self.memory.stream.add_cognition(description=conclusion)
|
302
|
+
await self.memory.stream.add_cognition_to_memory(current_plan["stream_nodes"], conclusion)
|
303
|
+
await self.memory.status.update("current_plan", current_plan)
|
304
|
+
await self.memory.status.update("current_step", {"intention": "", "type": ""})
|
305
|
+
# The previous step has not been completed
|
306
|
+
return False
|
235
307
|
|
236
308
|
async def process_agent_chat_response(self, payload: dict) -> str:
|
237
309
|
if payload["type"] == "social":
|
@@ -256,9 +328,16 @@ class SocietyAgent(CitizenAgent):
|
|
256
328
|
|
257
329
|
if not content:
|
258
330
|
return ""
|
331
|
+
|
332
|
+
# 添加记忆
|
333
|
+
description = f"You received a social message: {content}"
|
334
|
+
await self.memory.stream.add_social(description=description)
|
335
|
+
if self.enable_cognition:
|
336
|
+
# 更新情绪
|
337
|
+
await self.mindBlock.cognitionBlock.emotion_update(description)
|
259
338
|
|
260
339
|
# Get chat histories and ensure proper format
|
261
|
-
chat_histories = await self.
|
340
|
+
chat_histories = await self.memory.status.get("chat_histories") or {}
|
262
341
|
if not isinstance(chat_histories, dict):
|
263
342
|
chat_histories = {}
|
264
343
|
|
@@ -271,14 +350,14 @@ class SocietyAgent(CitizenAgent):
|
|
271
350
|
|
272
351
|
# Check propagation limit
|
273
352
|
if propagation_count > 5:
|
274
|
-
await self.
|
353
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
275
354
|
logger.info(
|
276
355
|
f"Message propagation limit reached ({propagation_count} > 5), stopping propagation"
|
277
356
|
)
|
278
357
|
return ""
|
279
358
|
|
280
359
|
# Get relationship score
|
281
|
-
relationships = await self.
|
360
|
+
relationships = await self.memory.status.get("relationships") or {}
|
282
361
|
relationship_score = relationships.get(sender_id, 50)
|
283
362
|
|
284
363
|
# Decision prompt
|
@@ -286,11 +365,12 @@ class SocietyAgent(CitizenAgent):
|
|
286
365
|
- Received message: "{content}"
|
287
366
|
- Our relationship score: {relationship_score}/100
|
288
367
|
- My profile: {{
|
289
|
-
"gender": "{await self.
|
290
|
-
"education": "{await self.
|
291
|
-
"personality": "{await self.
|
292
|
-
"occupation": "{await self.
|
368
|
+
"gender": "{await self.memory.status.get("gender") or ""}",
|
369
|
+
"education": "{await self.memory.status.get("education") or ""}",
|
370
|
+
"personality": "{await self.memory.status.get("personality") or ""}",
|
371
|
+
"occupation": "{await self.memory.status.get("occupation") or ""}"
|
293
372
|
}}
|
373
|
+
- My current emotion: {await self.memory.status.get("emotion_types")}
|
294
374
|
- Recent chat history: {chat_histories.get(sender_id, "")}
|
295
375
|
|
296
376
|
Should I respond to this message? Consider:
|
@@ -311,18 +391,19 @@ class SocietyAgent(CitizenAgent):
|
|
311
391
|
)
|
312
392
|
|
313
393
|
if should_respond.strip().upper() != "YES":
|
314
|
-
await self.
|
394
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
315
395
|
return ""
|
316
396
|
|
317
397
|
response_prompt = f"""Based on:
|
318
398
|
- Received message: "{content}"
|
319
399
|
- Our relationship score: {relationship_score}/100
|
320
400
|
- My profile: {{
|
321
|
-
"gender": "{await self.
|
322
|
-
"education": "{await self.
|
323
|
-
"personality": "{await self.
|
324
|
-
"occupation": "{await self.
|
401
|
+
"gender": "{await self.memory.status.get("gender") or ""}",
|
402
|
+
"education": "{await self.memory.status.get("education") or ""}",
|
403
|
+
"personality": "{await self.memory.status.get("personality") or ""}",
|
404
|
+
"occupation": "{await self.memory.status.get("occupation") or ""}"
|
325
405
|
}}
|
406
|
+
- My current emotion: {await self.memory.status.get("emotion_types")}
|
326
407
|
- Recent chat history: {chat_histories.get(sender_id, "")}
|
327
408
|
|
328
409
|
Generate an appropriate response that:
|
@@ -346,7 +427,7 @@ class SocietyAgent(CitizenAgent):
|
|
346
427
|
if response:
|
347
428
|
# Update chat history with response
|
348
429
|
chat_histories[sender_id] += f",me: {response}"
|
349
|
-
await self.
|
430
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
350
431
|
|
351
432
|
# Send response
|
352
433
|
serialized_response = json.dumps(
|
@@ -371,4 +452,8 @@ class SocietyAgent(CitizenAgent):
|
|
371
452
|
value = float(value)
|
372
453
|
else:
|
373
454
|
value = int(value)
|
374
|
-
await self.memory.
|
455
|
+
description = f"You received a economic message: Your {key} has changed from {await self.memory.status.get(key)} to {value}"
|
456
|
+
await self.memory.status.update(key, value)
|
457
|
+
await self.memory.stream.add_economic(description=description)
|
458
|
+
if self.enable_cognition:
|
459
|
+
await self.mindBlock.cognitionBlock.emotion_update(description)
|
@@ -10,7 +10,7 @@ from .road_service import RoadService
|
|
10
10
|
from .social_service import SocialService
|
11
11
|
from .economy_services import EconomyPersonService, EconomyOrgService
|
12
12
|
from .light_service import LightService
|
13
|
-
|
13
|
+
from .pause_service import PauseService
|
14
14
|
from ..utils.grpc import create_aio_channel
|
15
15
|
|
16
16
|
__all__ = ["CityClient"]
|
@@ -44,6 +44,7 @@ class CityClient:
|
|
44
44
|
self._economy_person_service = EconomyPersonService(aio_channel)
|
45
45
|
self._economy_org_service = EconomyOrgService(aio_channel)
|
46
46
|
self._light_service = LightService(aio_channel)
|
47
|
+
self._pause_service = PauseService(aio_channel)
|
47
48
|
|
48
49
|
@staticmethod
|
49
50
|
def from_sidecar(sidecar: OnlyClientSidecar, name: str = NAME):
|
@@ -61,6 +62,14 @@ class CityClient:
|
|
61
62
|
"""
|
62
63
|
return self._clock_service
|
63
64
|
|
65
|
+
@property
|
66
|
+
def pause_service(self):
|
67
|
+
"""
|
68
|
+
模拟器暂停服务子模块
|
69
|
+
Simulator pause service submodule
|
70
|
+
"""
|
71
|
+
return self._pause_service
|
72
|
+
|
64
73
|
@property
|
65
74
|
def lane_service(self):
|
66
75
|
"""
|
@@ -30,10 +30,10 @@ class ClockService:
|
|
30
30
|
Getting current simulation clock
|
31
31
|
|
32
32
|
Args:
|
33
|
-
- req (dict): https://cityproto.
|
33
|
+
- req (dict): https://cityproto.readthedocs.io/en/latest/docs.html#nowrequest
|
34
34
|
|
35
35
|
Returns:
|
36
|
-
- https://cityproto.
|
36
|
+
- https://cityproto.readthedocs.io/en/latest/docs.html#nowresponse
|
37
37
|
"""
|
38
38
|
if type(req) != clock_service.NowRequest:
|
39
39
|
req = ParseDict(req, clock_service.NowRequest())
|
@@ -0,0 +1,61 @@
|
|
1
|
+
from collections.abc import Awaitable, Coroutine
|
2
|
+
from typing import Any, Dict, Union, cast
|
3
|
+
|
4
|
+
import grpc
|
5
|
+
from google.protobuf.json_format import ParseDict
|
6
|
+
from pycityproto.city.pause.v1 import pause_service_pb2 as pause_service
|
7
|
+
from pycityproto.city.pause.v1 import pause_service_pb2_grpc as pause_grpc
|
8
|
+
|
9
|
+
from ..utils.protobuf import async_parse
|
10
|
+
|
11
|
+
__all__ = ["PauseService"]
|
12
|
+
|
13
|
+
|
14
|
+
class PauseService:
|
15
|
+
"""
|
16
|
+
城市模拟暂停服务
|
17
|
+
City simulation pause service
|
18
|
+
"""
|
19
|
+
|
20
|
+
def __init__(self, aio_channel: grpc.aio.Channel):
|
21
|
+
self._aio_stub = pause_grpc.PauseServiceStub(aio_channel)
|
22
|
+
|
23
|
+
async def pause(
|
24
|
+
self,
|
25
|
+
) -> Coroutine[Any, Any, Union[Dict[str, Any], pause_service.PauseResponse]]:
|
26
|
+
"""
|
27
|
+
暂停模拟
|
28
|
+
Pause the simulation
|
29
|
+
|
30
|
+
Args:
|
31
|
+
- req (dict): https://cityproto.readthedocs.io/en/latest/docs.html#pauserequest
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
- https://cityproto.readthedocs.io/en/latest/docs.html#pauseresponse
|
35
|
+
"""
|
36
|
+
req = pause_service.PauseRequest()
|
37
|
+
res = cast(
|
38
|
+
Awaitable[pause_service.PauseResponse],
|
39
|
+
self._aio_stub.Pause(req),
|
40
|
+
)
|
41
|
+
return
|
42
|
+
|
43
|
+
async def resume(
|
44
|
+
self,
|
45
|
+
) -> Coroutine[Any, Any, Union[Dict[str, Any], pause_service.ResumeResponse]]:
|
46
|
+
"""
|
47
|
+
恢复模拟
|
48
|
+
Resume the simulation
|
49
|
+
|
50
|
+
Args:
|
51
|
+
- req (dict): https://cityproto.readthedocs.io/en/latest/docs.html#resumerequest
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
- https://cityproto.readthedocs.io/en/latest/docs.html#resumeresponse
|
55
|
+
"""
|
56
|
+
req = pause_service.ResumeRequest()
|
57
|
+
res = cast(
|
58
|
+
Awaitable[pause_service.ResumeResponse],
|
59
|
+
self._aio_stub.Resume(req),
|
60
|
+
)
|
61
|
+
return
|
@@ -162,35 +162,40 @@ class Simulator:
|
|
162
162
|
Returns:
|
163
163
|
- time Union[int, str]: 时间 time in second(int) or formatted time(str)
|
164
164
|
"""
|
165
|
-
|
166
|
-
|
167
|
-
self.time =
|
165
|
+
now = await self._client.clock_service.Now({})
|
166
|
+
now = cast(dict[str, int], now)
|
167
|
+
self.time = now["t"]
|
168
168
|
if format_time:
|
169
169
|
current_date = datetime.now().date()
|
170
170
|
start_of_day = datetime.combine(current_date, datetime.min.time())
|
171
|
-
current_time = start_of_day + timedelta(seconds=
|
171
|
+
current_time = start_of_day + timedelta(seconds=now["t"])
|
172
172
|
formatted_time = current_time.strftime(format)
|
173
173
|
return formatted_time
|
174
174
|
else:
|
175
|
-
|
176
|
-
|
175
|
+
return int(now["t"])
|
176
|
+
|
177
|
+
async def pause(self):
|
178
|
+
await self._client.pause_service.pause()
|
179
|
+
|
180
|
+
async def resume(self):
|
181
|
+
await self._client.pause_service.resume()
|
177
182
|
|
178
183
|
async def get_simulator_day(self) -> int:
|
179
184
|
"""
|
180
185
|
获取模拟器到第几日
|
181
186
|
"""
|
182
|
-
|
183
|
-
|
184
|
-
day =
|
187
|
+
now = await self._client.clock_service.Now({})
|
188
|
+
now = cast(dict[str, int], now)
|
189
|
+
day = now["day"]
|
185
190
|
return day
|
186
191
|
|
187
192
|
async def get_simulator_second_from_start_of_day(self) -> int:
|
188
193
|
"""
|
189
194
|
获取模拟器从00:00:00到当前的秒数
|
190
195
|
"""
|
191
|
-
|
192
|
-
|
193
|
-
return
|
196
|
+
now = await self._client.clock_service.Now({})
|
197
|
+
now = cast(dict[str, int], now)
|
198
|
+
return now["t"] % 86400
|
194
199
|
|
195
200
|
async def get_person(self, person_id: int) -> dict:
|
196
201
|
return await self._client.person_service.GetPerson(
|
pycityagent/llm/embeddings.py
CHANGED
@@ -196,30 +196,6 @@ class SimpleEmbedding(Embeddings):
|
|
196
196
|
"""Embed query text."""
|
197
197
|
return self._embed(text)
|
198
198
|
|
199
|
-
# def save(self, file_path: str):
|
200
|
-
# """保存模型"""
|
201
|
-
# state = {
|
202
|
-
# "vector_dim": self.vector_dim,
|
203
|
-
# "cache_size": self.cache_size,
|
204
|
-
# "vocab": self._vocab,
|
205
|
-
# "idf": self._idf,
|
206
|
-
# "doc_count": self._doc_count,
|
207
|
-
# }
|
208
|
-
# with open(file_path, "w") as f:
|
209
|
-
# json.dump(state, f)
|
210
|
-
|
211
|
-
# def load(self, file_path: str):
|
212
|
-
# """加载模型"""
|
213
|
-
# with open(file_path, "r") as f:
|
214
|
-
# state = json.load(f)
|
215
|
-
# self.vector_dim = state["vector_dim"]
|
216
|
-
# self.cache_size = state["cache_size"]
|
217
|
-
# self._vocab = state["vocab"]
|
218
|
-
# self._idf = state["idf"]
|
219
|
-
# self._doc_count = state["doc_count"]
|
220
|
-
# self._cache = {} # 清空缓存
|
221
|
-
|
222
|
-
|
223
199
|
if __name__ == "__main__":
|
224
200
|
# se = SentenceEmbedding(
|
225
201
|
# pretrained_model_name_or_path="ignore/BAAI--bge-m3", cache_dir="ignore"
|