pycityagent 2.0.0a52__cp311-cp311-macosx_11_0_arm64.whl → 2.0.0a54__cp311-cp311-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycityagent/agent/agent.py +83 -62
- pycityagent/agent/agent_base.py +81 -54
- pycityagent/cityagent/bankagent.py +5 -7
- pycityagent/cityagent/blocks/__init__.py +0 -2
- pycityagent/cityagent/blocks/cognition_block.py +149 -172
- pycityagent/cityagent/blocks/economy_block.py +90 -129
- pycityagent/cityagent/blocks/mobility_block.py +56 -29
- pycityagent/cityagent/blocks/needs_block.py +163 -145
- pycityagent/cityagent/blocks/other_block.py +17 -9
- pycityagent/cityagent/blocks/plan_block.py +45 -57
- pycityagent/cityagent/blocks/social_block.py +70 -51
- pycityagent/cityagent/blocks/utils.py +2 -0
- pycityagent/cityagent/firmagent.py +6 -7
- pycityagent/cityagent/governmentagent.py +7 -9
- pycityagent/cityagent/memory_config.py +48 -48
- pycityagent/cityagent/message_intercept.py +99 -0
- pycityagent/cityagent/nbsagent.py +6 -29
- pycityagent/cityagent/societyagent.py +325 -127
- pycityagent/cli/wrapper.py +4 -0
- pycityagent/economy/econ_client.py +0 -2
- pycityagent/environment/__init__.py +7 -1
- pycityagent/environment/sim/client.py +10 -1
- pycityagent/environment/sim/clock_service.py +2 -2
- pycityagent/environment/sim/pause_service.py +61 -0
- pycityagent/environment/sim/sim_env.py +34 -46
- pycityagent/environment/simulator.py +18 -14
- pycityagent/llm/embeddings.py +0 -24
- pycityagent/llm/llm.py +18 -10
- pycityagent/memory/faiss_query.py +29 -26
- pycityagent/memory/memory.py +733 -247
- pycityagent/message/__init__.py +8 -1
- pycityagent/message/message_interceptor.py +322 -0
- pycityagent/message/messager.py +42 -11
- pycityagent/pycityagent-sim +0 -0
- pycityagent/simulation/agentgroup.py +137 -96
- pycityagent/simulation/simulation.py +184 -38
- pycityagent/simulation/storage/pg.py +2 -2
- pycityagent/tools/tool.py +7 -9
- pycityagent/utils/__init__.py +7 -2
- pycityagent/utils/pg_query.py +1 -0
- pycityagent/utils/survey_util.py +26 -23
- pycityagent/workflow/block.py +14 -7
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/METADATA +2 -2
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/RECORD +48 -46
- pycityagent/cityagent/blocks/time_block.py +0 -116
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/LICENSE +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/WHEEL +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/entry_points.txt +0 -0
- {pycityagent-2.0.0a52.dist-info → pycityagent-2.0.0a54.dist-info}/top_level.txt +0 -0
@@ -8,9 +8,8 @@ from pycityagent.agent import Agent
|
|
8
8
|
from pycityagent.economy import EconomyClient
|
9
9
|
from pycityagent.llm.llm import LLM
|
10
10
|
from pycityagent.memory import Memory
|
11
|
-
from pycityagent.message import Messager
|
12
|
-
from pycityagent.workflow import Block
|
13
11
|
from pycityagent.tools import UpdateWithSimulator
|
12
|
+
from pycityagent.workflow import Block
|
14
13
|
|
15
14
|
from .blocks import (CognitionBlock, EconomyBlock, MobilityBlock, NeedsBlock,
|
16
15
|
OtherBlock, PlanBlock, SocialBlock)
|
@@ -20,7 +19,7 @@ logger = logging.getLogger("pycityagent")
|
|
20
19
|
|
21
20
|
|
22
21
|
class PlanAndActionBlock(Block):
|
23
|
-
"""
|
22
|
+
"""Active workflow based on needs model and plan behavior model"""
|
24
23
|
|
25
24
|
longTermDecisionBlock: MonthPlanBlock
|
26
25
|
needsBlock: NeedsBlock
|
@@ -55,87 +54,22 @@ class PlanAndActionBlock(Block):
|
|
55
54
|
llm=llm, memory=memory, simulator=simulator, economy_client=economy_client
|
56
55
|
)
|
57
56
|
self.otherBlock = OtherBlock(llm=llm, memory=memory)
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
# 正在运动
|
63
|
-
logger.info("Agent is moving")
|
64
|
-
await asyncio.sleep(1)
|
65
|
-
return False
|
66
|
-
|
67
|
-
# 获取上一步信息
|
68
|
-
current_step = await self.memory.get("current_step")
|
69
|
-
if current_step["intention"] == "" or current_step["type"] == "":
|
70
|
-
# 没有上一步,直接返回
|
71
|
-
return True
|
72
|
-
time_now = int(await self.simulator.get_time())
|
73
|
-
step_start_time = current_step["start_time"]
|
74
|
-
step_consumed_time = current_step["evaluation"]["consumed_time"]
|
75
|
-
time_end_plan = step_start_time + int(step_consumed_time) * 60
|
76
|
-
if time_now >= time_end_plan:
|
77
|
-
# 上一步执行完成
|
78
|
-
current_plan = await self.memory.get("current_plan")
|
79
|
-
current_step["evaluation"]["consumed_time"] = (
|
80
|
-
time_now - step_start_time
|
81
|
-
) / 60
|
82
|
-
current_step_index = next(
|
83
|
-
(
|
84
|
-
i
|
85
|
-
for i, step in enumerate(current_plan["steps"])
|
86
|
-
if step["intention"] == current_step["intention"]
|
87
|
-
and step["type"] == current_step["type"]
|
88
|
-
),
|
89
|
-
None,
|
90
|
-
)
|
91
|
-
current_plan["steps"][current_step_index] = current_step
|
92
|
-
await self.memory.update("current_plan", current_plan)
|
93
|
-
if current_step_index is not None and current_step_index + 1 < len(
|
94
|
-
current_plan["steps"]
|
95
|
-
):
|
96
|
-
next_step = current_plan["steps"][current_step_index + 1]
|
97
|
-
await self.memory.update("current_step", next_step)
|
98
|
-
else:
|
99
|
-
# 标记计划完成
|
100
|
-
current_plan["completed"] = True
|
101
|
-
current_plan["end_time"] = await self.simulator.get_time(
|
102
|
-
format_time=True
|
103
|
-
)
|
104
|
-
await self.memory.update("current_plan", current_plan)
|
105
|
-
await self.memory.update("current_step", {"intention": "", "type": ""})
|
106
|
-
logger.info("Current plan execution completed.\n")
|
107
|
-
return True
|
108
|
-
# 上一步未执行完成
|
109
|
-
return False
|
110
|
-
|
111
|
-
async def forward(self):
|
112
|
-
# 与模拟器同步智能体的状态
|
113
|
-
await self._agent.update_with_sim()
|
114
|
-
# 检测上一步是否执行完成
|
115
|
-
if not await self.check_and_update_step():
|
116
|
-
return
|
117
|
-
|
118
|
-
# 长期决策
|
119
|
-
await self.longTermDecisionBlock.forward()
|
120
|
-
|
121
|
-
# 需求更新
|
122
|
-
time_now = await self.simulator.get_time(format_time=True)
|
123
|
-
logger.info(f"Current time: {time_now}")
|
124
|
-
await self.needsBlock.forward()
|
125
|
-
current_need = await self.memory.get("current_need")
|
126
|
-
logger.info(f"Current need: {current_need}")
|
127
|
-
|
128
|
-
# 计划生成
|
129
|
-
current_plan = await self.memory.get("current_plan")
|
57
|
+
async def plan_generation(self):
|
58
|
+
"""Generate plan"""
|
59
|
+
current_plan = await self.memory.status.get("current_plan")
|
60
|
+
current_need = await self.memory.status.get("current_need")
|
130
61
|
if current_need != "none" and not current_plan:
|
131
62
|
await self.planBlock.forward()
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
63
|
+
|
64
|
+
async def step_execution(self):
|
65
|
+
"""Execute the current step"""
|
66
|
+
current_plan = await self.memory.status.get("current_plan")
|
67
|
+
execution_context = await self.memory.status.get("execution_context")
|
68
|
+
current_step = await self.memory.status.get("current_step")
|
69
|
+
# check current_step is valid (not empty)
|
136
70
|
if current_step and current_step.get("type") and current_step.get("intention"):
|
137
71
|
step_type = current_step.get("type")
|
138
|
-
position = await self.memory.get("position")
|
72
|
+
position = await self.memory.status.get("position")
|
139
73
|
if "aoi_position" in position:
|
140
74
|
current_step["position"] = position["aoi_position"]["aoi_id"]
|
141
75
|
current_step["start_time"] = int(await self.simulator.get_time())
|
@@ -144,22 +78,48 @@ class PlanAndActionBlock(Block):
|
|
144
78
|
)
|
145
79
|
result = None
|
146
80
|
if step_type == "mobility":
|
147
|
-
|
148
|
-
|
149
|
-
|
81
|
+
if self.enable_mobility: # type:ignore
|
82
|
+
result = await self.mobilityBlock.forward(
|
83
|
+
current_step, execution_context
|
84
|
+
)
|
85
|
+
else:
|
86
|
+
result = {
|
87
|
+
"success": False,
|
88
|
+
"evaluation": f"Mobility Behavior is disabled",
|
89
|
+
"consumed_time": 0,
|
90
|
+
"node_id": None,
|
91
|
+
}
|
150
92
|
elif step_type == "social":
|
151
|
-
|
93
|
+
if self.enable_social: # type:ignore
|
94
|
+
result = await self.socialBlock.forward(
|
95
|
+
current_step, execution_context
|
96
|
+
)
|
97
|
+
else:
|
98
|
+
result = {
|
99
|
+
"success": False,
|
100
|
+
"evaluation": f"Social Behavior is disabled",
|
101
|
+
"consumed_time": 0,
|
102
|
+
"node_id": None,
|
103
|
+
}
|
152
104
|
elif step_type == "economy":
|
153
|
-
|
154
|
-
|
155
|
-
|
105
|
+
if self.enable_economy: # type:ignore
|
106
|
+
result = await self.economyBlock.forward(
|
107
|
+
current_step, execution_context
|
108
|
+
)
|
109
|
+
else:
|
110
|
+
result = {
|
111
|
+
"success": False,
|
112
|
+
"evaluation": f"Economy Behavior is disabled",
|
113
|
+
"consumed_time": 0,
|
114
|
+
"node_id": None,
|
115
|
+
}
|
156
116
|
elif step_type == "other":
|
157
117
|
result = await self.otherBlock.forward(current_step, execution_context)
|
158
118
|
if result != None:
|
159
119
|
logger.info(f"Execution result: {result}")
|
160
120
|
current_step["evaluation"] = result
|
161
121
|
|
162
|
-
#
|
122
|
+
# Update current_step, plan, and execution_context information
|
163
123
|
current_step_index = next(
|
164
124
|
(
|
165
125
|
i
|
@@ -170,30 +130,58 @@ class PlanAndActionBlock(Block):
|
|
170
130
|
None,
|
171
131
|
)
|
172
132
|
current_plan["steps"][current_step_index] = current_step
|
173
|
-
await self.memory.update("current_step", current_step)
|
174
|
-
await self.memory.update("current_plan", current_plan)
|
175
|
-
await self.memory.update("execution_context", execution_context)
|
133
|
+
await self.memory.status.update("current_step", current_step)
|
134
|
+
await self.memory.status.update("current_plan", current_plan)
|
135
|
+
await self.memory.status.update("execution_context", execution_context)
|
136
|
+
|
137
|
+
async def forward(self):
|
138
|
+
# Long-term decision
|
139
|
+
await self.longTermDecisionBlock.forward()
|
140
|
+
|
141
|
+
# update needs
|
142
|
+
await self.needsBlock.forward()
|
176
143
|
|
144
|
+
# plan generation
|
145
|
+
await self.plan_generation()
|
177
146
|
|
178
|
-
|
179
|
-
|
147
|
+
# step execution
|
148
|
+
await self.step_execution()
|
180
149
|
|
150
|
+
class MindBlock(Block):
|
151
|
+
"""Cognition workflow"""
|
181
152
|
cognitionBlock: CognitionBlock
|
182
153
|
|
183
154
|
def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
|
184
155
|
super().__init__(name="mind_block", llm=llm, memory=memory, simulator=simulator)
|
185
156
|
self.cognitionBlock = CognitionBlock(
|
186
|
-
llm=llm, memory=memory, simulator=simulator
|
157
|
+
llm=self.llm, memory=self.memory, simulator=simulator
|
187
158
|
)
|
188
159
|
|
189
160
|
async def forward(self):
|
190
161
|
await self.cognitionBlock.forward()
|
191
162
|
|
192
|
-
|
193
163
|
class SocietyAgent(CitizenAgent):
|
194
164
|
mindBlock: MindBlock
|
195
165
|
planAndActionBlock: PlanAndActionBlock
|
196
166
|
update_with_sim = UpdateWithSimulator()
|
167
|
+
configurable_fields = [
|
168
|
+
"enable_cognition",
|
169
|
+
"enable_mobility",
|
170
|
+
"enable_social",
|
171
|
+
"enable_economy",
|
172
|
+
]
|
173
|
+
default_values = {
|
174
|
+
"enable_cognition": True,
|
175
|
+
"enable_mobility": True,
|
176
|
+
"enable_social": True,
|
177
|
+
"enable_economy": True,
|
178
|
+
}
|
179
|
+
fields_description = {
|
180
|
+
"enable_cognition": "Enable cognition workflow",
|
181
|
+
"enable_mobility": "Enable mobility workflow",
|
182
|
+
"enable_social": "Enable social workflow",
|
183
|
+
"enable_economy": "Enable economy workflow",
|
184
|
+
}
|
197
185
|
|
198
186
|
def __init__(
|
199
187
|
self,
|
@@ -211,29 +199,224 @@ class SocietyAgent(CitizenAgent):
|
|
211
199
|
economy_client=economy_client,
|
212
200
|
)
|
213
201
|
self.mindBlock = MindBlock(
|
214
|
-
llm=self.
|
202
|
+
llm=self.llm, memory=self.memory, simulator=self.simulator
|
215
203
|
)
|
216
204
|
self.planAndActionBlock = PlanAndActionBlock(
|
217
205
|
agent=self,
|
218
|
-
llm=self.
|
219
|
-
memory=self.
|
220
|
-
simulator=self.
|
221
|
-
economy_client=self.
|
206
|
+
llm=self.llm,
|
207
|
+
memory=self.memory,
|
208
|
+
simulator=self.simulator,
|
209
|
+
economy_client=self.economy_client,
|
222
210
|
)
|
223
211
|
self.step_count = -1
|
212
|
+
self.cognition_update = -1
|
213
|
+
|
214
|
+
# config
|
215
|
+
self.enable_cognition = True
|
216
|
+
self.enable_mobility = True
|
217
|
+
self.enable_social = True
|
218
|
+
self.enable_economy = True
|
224
219
|
|
225
220
|
# Main workflow
|
226
221
|
async def forward(self):
|
227
|
-
logger.info(f"Agent {self._uuid} forward")
|
228
222
|
self.step_count += 1
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
await
|
235
|
-
|
236
|
-
|
223
|
+
logger.info(f"Agent {self._uuid} forward [step_count: {self.step_count}]")
|
224
|
+
# sync agent status with simulator
|
225
|
+
await self.update_with_sim()
|
226
|
+
|
227
|
+
# check last step
|
228
|
+
if not await self.check_and_update_step():
|
229
|
+
return
|
230
|
+
|
231
|
+
await self.planAndActionBlock.forward()
|
232
|
+
|
233
|
+
if self.enable_cognition:
|
234
|
+
await self.mindBlock.forward()
|
235
|
+
|
236
|
+
async def check_and_update_step(self):
|
237
|
+
"""Check if the previous step has been completed"""
|
238
|
+
status = await self.memory.status.get("status")
|
239
|
+
if status == 2:
|
240
|
+
# Agent is moving
|
241
|
+
logger.info("Agent is moving")
|
242
|
+
await asyncio.sleep(1)
|
243
|
+
return False
|
244
|
+
|
245
|
+
# Get the previous step information
|
246
|
+
current_step = await self.memory.status.get("current_step")
|
247
|
+
if current_step["intention"] == "" or current_step["type"] == "":
|
248
|
+
# No previous step, return directly
|
249
|
+
return True
|
250
|
+
time_now = int(await self.simulator.get_time())
|
251
|
+
step_start_time = current_step["start_time"]
|
252
|
+
step_consumed_time = current_step["evaluation"]["consumed_time"]
|
253
|
+
time_end_plan = step_start_time + int(step_consumed_time) * 60
|
254
|
+
if time_now >= time_end_plan:
|
255
|
+
# The previous step has been completed
|
256
|
+
current_plan = await self.memory.status.get("current_plan")
|
257
|
+
current_step["evaluation"]["consumed_time"] = (
|
258
|
+
time_now - step_start_time
|
259
|
+
) / 60
|
260
|
+
current_plan["stream_nodes"].append(current_step["evaluation"]["node_id"])
|
261
|
+
if current_step["evaluation"]["success"]:
|
262
|
+
# Last step is completed
|
263
|
+
current_step_index = next(
|
264
|
+
(
|
265
|
+
i
|
266
|
+
for i, step in enumerate(current_plan["steps"])
|
267
|
+
if step["intention"] == current_step["intention"]
|
268
|
+
and step["type"] == current_step["type"]
|
269
|
+
),
|
270
|
+
None,
|
271
|
+
)
|
272
|
+
current_plan["steps"][current_step_index] = current_step
|
273
|
+
await self.memory.status.update("current_plan", current_plan)
|
274
|
+
if current_step_index is not None and current_step_index + 1 < len(
|
275
|
+
current_plan["steps"]
|
276
|
+
):
|
277
|
+
next_step = current_plan["steps"][current_step_index + 1]
|
278
|
+
await self.memory.status.update("current_step", next_step)
|
279
|
+
else:
|
280
|
+
# Whole plan is completed
|
281
|
+
current_plan["completed"] = True
|
282
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
283
|
+
format_time=True
|
284
|
+
)
|
285
|
+
if self.enable_cognition:
|
286
|
+
# Update emotion for the plan
|
287
|
+
related_memories = await self.memory.stream.get_by_ids(current_plan["stream_nodes"])
|
288
|
+
incident = f"You have successfully completed the plan: {related_memories}"
|
289
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(incident)
|
290
|
+
await self.memory.stream.add_cognition(description=conclusion)
|
291
|
+
await self.memory.stream.add_cognition_to_memory(current_plan["stream_nodes"], conclusion)
|
292
|
+
await self.memory.status.update("current_plan", current_plan)
|
293
|
+
await self.memory.status.update("current_step", {"intention": "", "type": ""})
|
294
|
+
return True
|
295
|
+
else:
|
296
|
+
current_plan["failed"] = True
|
297
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
298
|
+
format_time=True
|
299
|
+
)
|
300
|
+
if self.enable_cognition:
|
301
|
+
# Update emotion for the plan
|
302
|
+
related_memories = await self.memory.stream.get_by_ids(current_plan["stream_nodes"])
|
303
|
+
incident = f"You have failed to complete the plan: {related_memories}"
|
304
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(incident)
|
305
|
+
await self.memory.stream.add_cognition(description=conclusion)
|
306
|
+
await self.memory.stream.add_cognition_to_memory(current_plan["stream_nodes"], conclusion)
|
307
|
+
await self.memory.status.update("current_plan", current_plan)
|
308
|
+
await self.memory.status.update("current_step", {"intention": "", "type": ""})
|
309
|
+
# The previous step has not been completed
|
310
|
+
return False
|
311
|
+
|
312
|
+
# check last step
|
313
|
+
if not await self.check_and_update_step():
|
314
|
+
return
|
315
|
+
|
316
|
+
await self.planAndActionBlock.forward()
|
317
|
+
|
318
|
+
if self.enable_cognition:
|
319
|
+
await self.mindBlock.forward()
|
320
|
+
|
321
|
+
async def check_and_update_step(self):
|
322
|
+
"""Check if the previous step has been completed"""
|
323
|
+
status = await self.memory.status.get("status")
|
324
|
+
if status == 2:
|
325
|
+
# Agent is moving
|
326
|
+
logger.info("Agent is moving")
|
327
|
+
await asyncio.sleep(1)
|
328
|
+
return False
|
329
|
+
|
330
|
+
# Get the previous step information
|
331
|
+
current_step = await self.memory.status.get("current_step")
|
332
|
+
if current_step["intention"] == "" or current_step["type"] == "":
|
333
|
+
# No previous step, return directly
|
334
|
+
return True
|
335
|
+
time_now = int(await self.simulator.get_time())
|
336
|
+
step_start_time = current_step["start_time"]
|
337
|
+
step_consumed_time = current_step["evaluation"]["consumed_time"]
|
338
|
+
time_end_plan = step_start_time + int(step_consumed_time) * 60
|
339
|
+
if time_now >= time_end_plan:
|
340
|
+
# The previous step has been completed
|
341
|
+
current_plan = await self.memory.status.get("current_plan")
|
342
|
+
current_step["evaluation"]["consumed_time"] = (
|
343
|
+
time_now - step_start_time
|
344
|
+
) / 60
|
345
|
+
current_plan["stream_nodes"].append(current_step["evaluation"]["node_id"])
|
346
|
+
if current_step["evaluation"]["success"]:
|
347
|
+
# Last step is completed
|
348
|
+
current_step_index = next(
|
349
|
+
(
|
350
|
+
i
|
351
|
+
for i, step in enumerate(current_plan["steps"])
|
352
|
+
if step["intention"] == current_step["intention"]
|
353
|
+
and step["type"] == current_step["type"]
|
354
|
+
),
|
355
|
+
None,
|
356
|
+
)
|
357
|
+
current_plan["steps"][current_step_index] = current_step
|
358
|
+
await self.memory.status.update("current_plan", current_plan)
|
359
|
+
if current_step_index is not None and current_step_index + 1 < len(
|
360
|
+
current_plan["steps"]
|
361
|
+
):
|
362
|
+
next_step = current_plan["steps"][current_step_index + 1]
|
363
|
+
await self.memory.status.update("current_step", next_step)
|
364
|
+
else:
|
365
|
+
# Whole plan is completed
|
366
|
+
current_plan["completed"] = True
|
367
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
368
|
+
format_time=True
|
369
|
+
)
|
370
|
+
if self.enable_cognition:
|
371
|
+
# Update emotion for the plan
|
372
|
+
related_memories = await self.memory.stream.get_by_ids(
|
373
|
+
current_plan["stream_nodes"]
|
374
|
+
)
|
375
|
+
incident = f"You have successfully completed the plan: {related_memories}"
|
376
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(
|
377
|
+
incident
|
378
|
+
)
|
379
|
+
await self.memory.stream.add_cognition(
|
380
|
+
description=conclusion # type:ignore
|
381
|
+
)
|
382
|
+
await self.memory.stream.add_cognition_to_memory(
|
383
|
+
current_plan["stream_nodes"], conclusion # type:ignore
|
384
|
+
)
|
385
|
+
await self.memory.status.update("current_plan", current_plan)
|
386
|
+
await self.memory.status.update(
|
387
|
+
"current_step", {"intention": "", "type": ""}
|
388
|
+
)
|
389
|
+
return True
|
390
|
+
else:
|
391
|
+
current_plan["failed"] = True
|
392
|
+
current_plan["end_time"] = await self.simulator.get_time(
|
393
|
+
format_time=True
|
394
|
+
)
|
395
|
+
if self.enable_cognition:
|
396
|
+
# Update emotion for the plan
|
397
|
+
related_memories = await self.memory.stream.get_by_ids(
|
398
|
+
current_plan["stream_nodes"]
|
399
|
+
)
|
400
|
+
incident = (
|
401
|
+
f"You have failed to complete the plan: {related_memories}"
|
402
|
+
)
|
403
|
+
conclusion = await self.mindBlock.cognitionBlock.emotion_update(
|
404
|
+
incident
|
405
|
+
)
|
406
|
+
await self.memory.stream.add_cognition(
|
407
|
+
description=conclusion # type:ignore
|
408
|
+
)
|
409
|
+
await self.memory.stream.add_cognition_to_memory(
|
410
|
+
current_plan["stream_nodes"], conclusion # type:ignore
|
411
|
+
)
|
412
|
+
await self.memory.status.update("current_plan", current_plan)
|
413
|
+
await self.memory.status.update(
|
414
|
+
"current_step", {"intention": "", "type": ""}
|
415
|
+
)
|
416
|
+
# The previous step has not been completed
|
417
|
+
return False
|
418
|
+
|
419
|
+
async def process_agent_chat_response(self, payload: dict) -> str: # type:ignore
|
237
420
|
if payload["type"] == "social":
|
238
421
|
resp = f"Agent {self._uuid} received agent chat response: {payload}"
|
239
422
|
logger.info(resp)
|
@@ -256,9 +439,16 @@ class SocietyAgent(CitizenAgent):
|
|
256
439
|
|
257
440
|
if not content:
|
258
441
|
return ""
|
442
|
+
|
443
|
+
# 添加记忆
|
444
|
+
description = f"You received a social message: {content}"
|
445
|
+
await self.memory.stream.add_social(description=description)
|
446
|
+
if self.enable_cognition:
|
447
|
+
# 更新情绪
|
448
|
+
await self.mindBlock.cognitionBlock.emotion_update(description)
|
259
449
|
|
260
450
|
# Get chat histories and ensure proper format
|
261
|
-
chat_histories = await self.
|
451
|
+
chat_histories = await self.memory.status.get("chat_histories") or {}
|
262
452
|
if not isinstance(chat_histories, dict):
|
263
453
|
chat_histories = {}
|
264
454
|
|
@@ -271,14 +461,14 @@ class SocietyAgent(CitizenAgent):
|
|
271
461
|
|
272
462
|
# Check propagation limit
|
273
463
|
if propagation_count > 5:
|
274
|
-
await self.
|
464
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
275
465
|
logger.info(
|
276
466
|
f"Message propagation limit reached ({propagation_count} > 5), stopping propagation"
|
277
467
|
)
|
278
468
|
return ""
|
279
469
|
|
280
470
|
# Get relationship score
|
281
|
-
relationships = await self.
|
471
|
+
relationships = await self.memory.status.get("relationships") or {}
|
282
472
|
relationship_score = relationships.get(sender_id, 50)
|
283
473
|
|
284
474
|
# Decision prompt
|
@@ -286,11 +476,12 @@ class SocietyAgent(CitizenAgent):
|
|
286
476
|
- Received message: "{content}"
|
287
477
|
- Our relationship score: {relationship_score}/100
|
288
478
|
- My profile: {{
|
289
|
-
"gender": "{await self.
|
290
|
-
"education": "{await self.
|
291
|
-
"personality": "{await self.
|
292
|
-
"occupation": "{await self.
|
479
|
+
"gender": "{await self.memory.status.get("gender") or ""}",
|
480
|
+
"education": "{await self.memory.status.get("education") or ""}",
|
481
|
+
"personality": "{await self.memory.status.get("personality") or ""}",
|
482
|
+
"occupation": "{await self.memory.status.get("occupation") or ""}"
|
293
483
|
}}
|
484
|
+
- My current emotion: {await self.memory.status.get("emotion_types")}
|
294
485
|
- Recent chat history: {chat_histories.get(sender_id, "")}
|
295
486
|
|
296
487
|
Should I respond to this message? Consider:
|
@@ -300,7 +491,7 @@ class SocietyAgent(CitizenAgent):
|
|
300
491
|
|
301
492
|
Answer only YES or NO."""
|
302
493
|
|
303
|
-
should_respond = await self._llm_client.atext_request(
|
494
|
+
should_respond = await self._llm_client.atext_request( # type:ignore
|
304
495
|
[
|
305
496
|
{
|
306
497
|
"role": "system",
|
@@ -310,19 +501,20 @@ class SocietyAgent(CitizenAgent):
|
|
310
501
|
]
|
311
502
|
)
|
312
503
|
|
313
|
-
if should_respond.strip().upper() != "YES":
|
314
|
-
await self.
|
504
|
+
if should_respond.strip().upper() != "YES": # type:ignore
|
505
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
315
506
|
return ""
|
316
507
|
|
317
508
|
response_prompt = f"""Based on:
|
318
509
|
- Received message: "{content}"
|
319
510
|
- Our relationship score: {relationship_score}/100
|
320
511
|
- My profile: {{
|
321
|
-
"gender": "{await self.
|
322
|
-
"education": "{await self.
|
323
|
-
"personality": "{await self.
|
324
|
-
"occupation": "{await self.
|
512
|
+
"gender": "{await self.memory.status.get("gender") or ""}",
|
513
|
+
"education": "{await self.memory.status.get("education") or ""}",
|
514
|
+
"personality": "{await self.memory.status.get("personality") or ""}",
|
515
|
+
"occupation": "{await self.memory.status.get("occupation") or ""}"
|
325
516
|
}}
|
517
|
+
- My current emotion: {await self.memory.status.get("emotion_types")}
|
326
518
|
- Recent chat history: {chat_histories.get(sender_id, "")}
|
327
519
|
|
328
520
|
Generate an appropriate response that:
|
@@ -333,7 +525,7 @@ class SocietyAgent(CitizenAgent):
|
|
333
525
|
|
334
526
|
Response should be ONLY the message text, no explanations."""
|
335
527
|
|
336
|
-
response = await self.
|
528
|
+
response = await self.llm.atext_request(
|
337
529
|
[
|
338
530
|
{
|
339
531
|
"role": "system",
|
@@ -346,7 +538,7 @@ class SocietyAgent(CitizenAgent):
|
|
346
538
|
if response:
|
347
539
|
# Update chat history with response
|
348
540
|
chat_histories[sender_id] += f",me: {response}"
|
349
|
-
await self.
|
541
|
+
await self.memory.status.update("chat_histories", chat_histories)
|
350
542
|
|
351
543
|
# Send response
|
352
544
|
serialized_response = json.dumps(
|
@@ -359,7 +551,7 @@ class SocietyAgent(CitizenAgent):
|
|
359
551
|
await self.send_message_to_agent(sender_id, serialized_response)
|
360
552
|
logger.info("sender_id", sender_id)
|
361
553
|
logger.info("message", serialized_response)
|
362
|
-
return response
|
554
|
+
return response # type:ignore
|
363
555
|
|
364
556
|
except Exception as e:
|
365
557
|
logger.warning(f"Error in process_agent_chat_response: {str(e)}")
|
@@ -371,4 +563,10 @@ class SocietyAgent(CitizenAgent):
|
|
371
563
|
value = float(value)
|
372
564
|
else:
|
373
565
|
value = int(value)
|
374
|
-
await self.memory.
|
566
|
+
description = f"You received a economic message: Your {key} has changed from {await self.memory.status.get(key)} to {value}"
|
567
|
+
await self.memory.status.update(key, value)
|
568
|
+
await self.memory.stream.add_economic( # type:ignore
|
569
|
+
description=description
|
570
|
+
)
|
571
|
+
if self.enable_cognition:
|
572
|
+
await self.mindBlock.cognitionBlock.emotion_update(description)
|
pycityagent/cli/wrapper.py
CHANGED
@@ -6,6 +6,7 @@ import signal
|
|
6
6
|
_script_dir = os.path.dirname(os.path.abspath(__file__))
|
7
7
|
_parent_dir = os.path.dirname(_script_dir)
|
8
8
|
|
9
|
+
|
9
10
|
def wrapper(bin: str):
|
10
11
|
binary_path = os.path.join(_parent_dir, bin)
|
11
12
|
if not os.path.exists(binary_path):
|
@@ -21,12 +22,14 @@ def wrapper(bin: str):
|
|
21
22
|
stdout=sys.stdout,
|
22
23
|
stderr=sys.stderr,
|
23
24
|
)
|
25
|
+
|
24
26
|
# register signal handler
|
25
27
|
def signal_handler(sig, frame):
|
26
28
|
if p.poll() is None:
|
27
29
|
p.send_signal(sig)
|
28
30
|
else:
|
29
31
|
sys.exit(p.poll())
|
32
|
+
|
30
33
|
signals = [signal.SIGINT, signal.SIGTERM, signal.SIGHUP]
|
31
34
|
for sig in signals:
|
32
35
|
signal.signal(sig, signal_handler)
|
@@ -40,5 +43,6 @@ def wrapper(bin: str):
|
|
40
43
|
def pycityagent_sim():
|
41
44
|
wrapper("pycityagent-sim")
|
42
45
|
|
46
|
+
|
43
47
|
def pycityagent_ui():
|
44
48
|
wrapper("pycityagent-ui")
|
@@ -4,4 +4,10 @@ from .sence.static import LEVEL_ONE_PRE, POI_TYPE_DICT
|
|
4
4
|
from .sim import AoiService, PersonService
|
5
5
|
from .simulator import Simulator
|
6
6
|
|
7
|
-
__all__ = [
|
7
|
+
__all__ = [
|
8
|
+
"Simulator",
|
9
|
+
"POI_TYPE_DICT",
|
10
|
+
"LEVEL_ONE_PRE",
|
11
|
+
"PersonService",
|
12
|
+
"AoiService",
|
13
|
+
]
|