pycityagent 2.0.0a94__cp311-cp311-macosx_11_0_arm64.whl → 2.0.0a96__cp311-cp311-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycityagent/agent/agent.py +5 -5
- pycityagent/agent/agent_base.py +1 -6
- pycityagent/cityagent/__init__.py +6 -5
- pycityagent/cityagent/bankagent.py +2 -2
- pycityagent/cityagent/blocks/__init__.py +4 -4
- pycityagent/cityagent/blocks/cognition_block.py +7 -4
- pycityagent/cityagent/blocks/economy_block.py +227 -135
- pycityagent/cityagent/blocks/mobility_block.py +70 -27
- pycityagent/cityagent/blocks/needs_block.py +11 -12
- pycityagent/cityagent/blocks/other_block.py +2 -2
- pycityagent/cityagent/blocks/plan_block.py +22 -24
- pycityagent/cityagent/blocks/social_block.py +15 -17
- pycityagent/cityagent/blocks/utils.py +3 -2
- pycityagent/cityagent/firmagent.py +1 -1
- pycityagent/cityagent/governmentagent.py +1 -1
- pycityagent/cityagent/initial.py +1 -1
- pycityagent/cityagent/memory_config.py +0 -1
- pycityagent/cityagent/message_intercept.py +7 -8
- pycityagent/cityagent/nbsagent.py +1 -1
- pycityagent/cityagent/societyagent.py +1 -2
- pycityagent/configs/__init__.py +18 -0
- pycityagent/configs/exp_config.py +202 -0
- pycityagent/configs/sim_config.py +251 -0
- pycityagent/configs/utils.py +17 -0
- pycityagent/environment/__init__.py +2 -0
- pycityagent/{economy → environment/economy}/econ_client.py +14 -32
- pycityagent/environment/sim/sim_env.py +17 -24
- pycityagent/environment/simulator.py +36 -113
- pycityagent/llm/__init__.py +1 -2
- pycityagent/llm/llm.py +54 -167
- pycityagent/memory/memory.py +13 -12
- pycityagent/message/message_interceptor.py +5 -4
- pycityagent/message/messager.py +3 -5
- pycityagent/metrics/__init__.py +1 -1
- pycityagent/metrics/mlflow_client.py +20 -17
- pycityagent/pycityagent-sim +0 -0
- pycityagent/simulation/agentgroup.py +18 -20
- pycityagent/simulation/simulation.py +157 -210
- pycityagent/survey/manager.py +0 -2
- pycityagent/utils/__init__.py +3 -0
- pycityagent/utils/config_const.py +20 -0
- pycityagent/workflow/__init__.py +1 -2
- pycityagent/workflow/block.py +0 -3
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/METADATA +7 -24
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/RECORD +50 -46
- pycityagent/llm/llmconfig.py +0 -18
- /pycityagent/{economy → environment/economy}/__init__.py +0 -0
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/LICENSE +0 -0
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/WHEEL +0 -0
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/entry_points.txt +0 -0
- {pycityagent-2.0.0a94.dist-info → pycityagent-2.0.0a96.dist-info}/top_level.txt +0 -0
@@ -1,17 +1,19 @@
|
|
1
|
+
import json
|
2
|
+
import logging
|
1
3
|
import math
|
4
|
+
import random
|
5
|
+
from operator import itemgetter
|
2
6
|
from typing import List
|
3
7
|
|
8
|
+
import numpy as np
|
4
9
|
import ray
|
5
10
|
|
6
11
|
from pycityagent.environment.simulator import Simulator
|
7
12
|
from pycityagent.llm import LLM
|
8
13
|
from pycityagent.memory import Memory
|
9
14
|
from pycityagent.workflow.block import Block
|
10
|
-
import numpy as np
|
11
|
-
from operator import itemgetter
|
12
|
-
import random
|
13
|
-
import logging
|
14
15
|
from pycityagent.workflow.prompt import FormatPrompt
|
16
|
+
|
15
17
|
from .dispatcher import BlockDispatcher
|
16
18
|
|
17
19
|
logger = logging.getLogger("pycityagent")
|
@@ -22,6 +24,11 @@ As an intelligent decision system, please determine the type of place the user n
|
|
22
24
|
User Plan: {plan}
|
23
25
|
User requirement: {intention}
|
24
26
|
Your output must be a single selection from {poi_category} without any additional text or explanation.
|
27
|
+
|
28
|
+
Please response in json format (Do not return any other text), example:
|
29
|
+
{{
|
30
|
+
"place_type": "shopping"
|
31
|
+
}}
|
25
32
|
"""
|
26
33
|
|
27
34
|
PLACE_SECOND_TYPE_SELECTION_PROMPT = """
|
@@ -29,6 +36,11 @@ As an intelligent decision system, please determine the type of place the user n
|
|
29
36
|
User Plan: {plan}
|
30
37
|
User requirement: {intention}
|
31
38
|
Your output must be a single selection from {poi_category} without any additional text or explanation.
|
39
|
+
|
40
|
+
Please response in json format (Do not return any other text), example:
|
41
|
+
{{
|
42
|
+
"place_type": "shopping"
|
43
|
+
}}
|
32
44
|
"""
|
33
45
|
|
34
46
|
PLACE_ANALYSIS_PROMPT = """
|
@@ -37,6 +49,11 @@ User Plan: {plan}
|
|
37
49
|
User requirement: {intention}
|
38
50
|
|
39
51
|
Your output must be a single selection from ['home', 'workplace', 'other'] without any additional text or explanation.
|
52
|
+
|
53
|
+
Please response in json format (Do not return any other text), example:
|
54
|
+
{{
|
55
|
+
"place_type": "home"
|
56
|
+
}}
|
40
57
|
"""
|
41
58
|
|
42
59
|
RADIUS_PROMPT = """As an intelligent decision system, please determine the maximum travel radius (in meters) based on the current emotional state.
|
@@ -48,7 +65,11 @@ Your current thought: {thought}
|
|
48
65
|
|
49
66
|
Please analyze how these emotions would affect travel willingness and return only a single integer number between 3000-200000 representing the maximum travel radius in meters. A more positive emotional state generally leads to greater willingness to travel further.
|
50
67
|
|
51
|
-
|
68
|
+
Please response in json format (Do not return any other text), example:
|
69
|
+
{{
|
70
|
+
"radius": 10000
|
71
|
+
}}
|
72
|
+
"""
|
52
73
|
|
53
74
|
|
54
75
|
def gravity_model(pois):
|
@@ -121,7 +142,7 @@ class PlaceSelectionBlock(Block):
|
|
121
142
|
"""
|
122
143
|
|
123
144
|
configurable_fields: List[str] = ["search_limit"]
|
124
|
-
default_values = {"search_limit":
|
145
|
+
default_values = {"search_limit": 50}
|
125
146
|
|
126
147
|
def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
|
127
148
|
super().__init__(
|
@@ -134,7 +155,7 @@ class PlaceSelectionBlock(Block):
|
|
134
155
|
)
|
135
156
|
self.radiusPrompt = FormatPrompt(RADIUS_PROMPT)
|
136
157
|
# configurable fields
|
137
|
-
self.search_limit =
|
158
|
+
self.search_limit = 50
|
138
159
|
|
139
160
|
async def forward(self, step, context):
|
140
161
|
poi_cate = self.simulator.get_poi_cate()
|
@@ -143,8 +164,9 @@ class PlaceSelectionBlock(Block):
|
|
143
164
|
intention=step["intention"],
|
144
165
|
poi_category=list(poi_cate.keys()),
|
145
166
|
)
|
146
|
-
levelOneType = await self.llm.atext_request(self.typeSelectionPrompt.to_dialog()) # type: ignore
|
167
|
+
levelOneType = await self.llm.atext_request(self.typeSelectionPrompt.to_dialog(), response_format={"type": "json_object"}) # type: ignore
|
147
168
|
try:
|
169
|
+
levelOneType = json.loads(levelOneType)["place_type"]
|
148
170
|
sub_category = poi_cate[levelOneType]
|
149
171
|
except Exception as e:
|
150
172
|
logger.warning(f"Wrong type of poi, raw response: {levelOneType}")
|
@@ -153,7 +175,12 @@ class PlaceSelectionBlock(Block):
|
|
153
175
|
self.secondTypeSelectionPrompt.format(
|
154
176
|
plan=context["plan"], intention=step["intention"], poi_category=sub_category
|
155
177
|
)
|
156
|
-
levelTwoType = await self.llm.atext_request(self.secondTypeSelectionPrompt.to_dialog()) # type: ignore
|
178
|
+
levelTwoType = await self.llm.atext_request(self.secondTypeSelectionPrompt.to_dialog(), response_format={"type": "json_object"}) # type: ignore
|
179
|
+
try:
|
180
|
+
levelTwoType = json.loads(levelTwoType)["place_type"]
|
181
|
+
except Exception as e:
|
182
|
+
logger.warning(f"Wrong type of poi, raw response: {levelTwoType}")
|
183
|
+
levelTwoType = random.choice(sub_category)
|
157
184
|
center = await self.memory.status.get("position")
|
158
185
|
center = (center["xy_position"]["x"], center["xy_position"]["y"])
|
159
186
|
self.radiusPrompt.format(
|
@@ -162,23 +189,28 @@ class PlaceSelectionBlock(Block):
|
|
162
189
|
weather=self.simulator.sence("weather"),
|
163
190
|
temperature=self.simulator.sence("temperature"),
|
164
191
|
)
|
165
|
-
radius =
|
192
|
+
radius = await self.llm.atext_request(self.radiusPrompt.to_dialog(), response_format={"type": "json_object"}) # type: ignore
|
166
193
|
try:
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
194
|
+
radius = int(json.loads(radius)["radius"])
|
195
|
+
pois = ray.get(
|
196
|
+
self.simulator.map.query_pois.remote(
|
197
|
+
center=center,
|
198
|
+
category_prefix=levelTwoType,
|
199
|
+
radius=radius,
|
200
|
+
limit=self.search_limit,
|
201
|
+
)
|
202
|
+
)
|
173
203
|
except Exception as e:
|
174
204
|
logger.warning(f"Error querying pois: {e}")
|
175
|
-
|
176
|
-
pois = ray.get(
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
205
|
+
radius = 10000
|
206
|
+
pois = ray.get(
|
207
|
+
self.simulator.map.query_pois.remote(
|
208
|
+
center=center,
|
209
|
+
category_prefix=levelTwoType,
|
210
|
+
radius=radius,
|
211
|
+
limit=self.search_limit,
|
212
|
+
)
|
213
|
+
)
|
182
214
|
if len(pois) > 0:
|
183
215
|
pois = gravity_model(pois)
|
184
216
|
probabilities = [item[2] for item in pois]
|
@@ -233,10 +265,12 @@ class MoveBlock(Block):
|
|
233
265
|
self.placeAnalysisPrompt.format(
|
234
266
|
plan=context["plan"], intention=step["intention"]
|
235
267
|
)
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
268
|
+
response = await self.llm.atext_request(self.placeAnalysisPrompt.to_dialog(), response_format={"type": "json_object"}) # type: ignore
|
269
|
+
try:
|
270
|
+
response = json.loads(response)["place_type"]
|
271
|
+
except Exception as e:
|
272
|
+
logger.warning(f"Wrong type of poi, raw response: {response}")
|
273
|
+
response = "home"
|
240
274
|
if response == "home":
|
241
275
|
# 返回到家
|
242
276
|
home = await self.memory.status.get("home")
|
@@ -260,6 +294,9 @@ class MoveBlock(Block):
|
|
260
294
|
person_id=agent_id,
|
261
295
|
target_positions=home,
|
262
296
|
)
|
297
|
+
number_poi_visited = await self.memory.status.get("number_poi_visited")
|
298
|
+
number_poi_visited += 1
|
299
|
+
await self.memory.status.update("number_poi_visited", number_poi_visited)
|
263
300
|
return {
|
264
301
|
"success": True,
|
265
302
|
"evaluation": f"Successfully returned home",
|
@@ -290,6 +327,9 @@ class MoveBlock(Block):
|
|
290
327
|
person_id=agent_id,
|
291
328
|
target_positions=work,
|
292
329
|
)
|
330
|
+
number_poi_visited = await self.memory.status.get("number_poi_visited")
|
331
|
+
number_poi_visited += 1
|
332
|
+
await self.memory.status.update("number_poi_visited", number_poi_visited)
|
293
333
|
return {
|
294
334
|
"success": True,
|
295
335
|
"evaluation": f"Successfully reached the workplace",
|
@@ -322,6 +362,9 @@ class MoveBlock(Block):
|
|
322
362
|
person_id=agent_id,
|
323
363
|
target_positions=next_place[1],
|
324
364
|
)
|
365
|
+
number_poi_visited = await self.memory.status.get("number_poi_visited")
|
366
|
+
number_poi_visited += 1
|
367
|
+
await self.memory.status.update("number_poi_visited", number_poi_visited)
|
325
368
|
return {
|
326
369
|
"success": True,
|
327
370
|
"evaluation": f"Successfully reached the destination: {next_place}",
|
@@ -31,7 +31,7 @@ Current satisfaction levels (0-1 float values, lower means less satisfied):
|
|
31
31
|
- safety_satisfaction: Safety satisfaction level (Normally, the agent will be more satisfied with safety when they have high income and currency)
|
32
32
|
- social_satisfaction: Social satisfaction level
|
33
33
|
|
34
|
-
|
34
|
+
Please response in json format (Do not return any other text), example:
|
35
35
|
{{
|
36
36
|
"current_satisfaction": {{
|
37
37
|
"hunger_satisfaction": 0.8,
|
@@ -64,12 +64,12 @@ Notes:
|
|
64
64
|
2. If the current need is not "whatever", only return the new value for the current need. Otherwise, return both safe and social need values.
|
65
65
|
3. Ensure the return value is in valid JSON format, examples below:
|
66
66
|
|
67
|
-
|
67
|
+
Please response in json format for specific need (hungry here) adjustment (Do not return any other text), example:
|
68
68
|
{{
|
69
69
|
"hunger_satisfaction": new_need_value
|
70
70
|
}}
|
71
71
|
|
72
|
-
|
72
|
+
Please response in json format for whatever need adjustment (Do not return any other text), example:
|
73
73
|
{{
|
74
74
|
"safety_satisfaction": new_safe_value,
|
75
75
|
"social_satisfaction": new_social_value
|
@@ -93,23 +93,22 @@ class NeedsBlock(Block):
|
|
93
93
|
self.token_consumption = 0
|
94
94
|
self.initialized = False
|
95
95
|
self.alpha_H, self.alpha_D, self.alpha_P, self.alpha_C = (
|
96
|
-
0.
|
96
|
+
0.15,
|
97
97
|
0.08,
|
98
98
|
0.05,
|
99
99
|
0.1,
|
100
100
|
) # Hunger decay rate, Energy decay rate, Safety decay rate, Social decay rate
|
101
101
|
self.T_H, self.T_D, self.T_P, self.T_C = (
|
102
|
-
0.1,
|
103
102
|
0.2,
|
104
103
|
0.2,
|
105
|
-
0.
|
104
|
+
0.2,
|
105
|
+
0.3,
|
106
106
|
) # Hunger threshold, Energy threshold, Safety threshold, Social threshold
|
107
107
|
|
108
108
|
async def initialize(self):
|
109
109
|
day = await self.simulator.get_simulator_day()
|
110
110
|
if day != self.now_day:
|
111
111
|
self.now_day = day
|
112
|
-
self.initialized = False
|
113
112
|
workday = self.simulator.sence("day")
|
114
113
|
if workday == "Workday":
|
115
114
|
self.need_work = True
|
@@ -126,7 +125,7 @@ class NeedsBlock(Block):
|
|
126
125
|
income=await self.memory.status.get("income"),
|
127
126
|
now_time=await self.simulator.get_time(format_time=True),
|
128
127
|
)
|
129
|
-
response = await self.llm.atext_request(self.initial_prompt.to_dialog())
|
128
|
+
response = await self.llm.atext_request(self.initial_prompt.to_dialog(), response_format={"type": "json_object"})
|
130
129
|
response = self.clean_json_response(response)
|
131
130
|
retry = 3
|
132
131
|
while retry > 0:
|
@@ -229,13 +228,13 @@ class NeedsBlock(Block):
|
|
229
228
|
await self.memory.status.update("current_need", "hungry")
|
230
229
|
elif energy_satisfaction <= self.T_D:
|
231
230
|
await self.memory.status.update("current_need", "tired")
|
231
|
+
elif self.need_work:
|
232
|
+
await self.memory.status.update("current_need", "safe")
|
233
|
+
self.need_work = False
|
232
234
|
elif safety_satisfaction <= self.T_P:
|
233
235
|
await self.memory.status.update("current_need", "safe")
|
234
236
|
elif social_satisfaction <= self.T_C:
|
235
237
|
await self.memory.status.update("current_need", "social")
|
236
|
-
elif self.need_work:
|
237
|
-
await self.memory.status.update("current_need", "safe")
|
238
|
-
self.need_work = False
|
239
238
|
else:
|
240
239
|
await self.memory.status.update("current_need", "whatever")
|
241
240
|
else:
|
@@ -308,7 +307,7 @@ class NeedsBlock(Block):
|
|
308
307
|
|
309
308
|
retry = 3
|
310
309
|
while retry > 0:
|
311
|
-
response = await self.llm.atext_request(self.evaluation_prompt.to_dialog())
|
310
|
+
response = await self.llm.atext_request(self.evaluation_prompt.to_dialog(), response_format={"type": "json_object"})
|
312
311
|
try:
|
313
312
|
new_satisfaction = json.loads(self.clean_json_response(response)) # type: ignore
|
314
313
|
# 更新所有需求的数值
|
@@ -25,7 +25,7 @@ class SleepBlock(Block):
|
|
25
25
|
intention=step["intention"],
|
26
26
|
emotion_types=await self.memory.status.get("emotion_types"),
|
27
27
|
)
|
28
|
-
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
28
|
+
result = await self.llm.atext_request(self.guidance_prompt.to_dialog(), response_format={"type": "json_object"})
|
29
29
|
result = clean_json_response(result)
|
30
30
|
node_id = await self.memory.stream.add_other(description=f"I slept")
|
31
31
|
try:
|
@@ -63,7 +63,7 @@ class OtherNoneBlock(Block):
|
|
63
63
|
intention=step["intention"],
|
64
64
|
emotion_types=await self.memory.status.get("emotion_types"),
|
65
65
|
)
|
66
|
-
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
66
|
+
result = await self.llm.atext_request(self.guidance_prompt.to_dialog(), response_format={"type": "json_object"})
|
67
67
|
result = clean_json_response(result)
|
68
68
|
node_id = await self.memory.stream.add_other(
|
69
69
|
description=f"I {step['intention']}"
|
@@ -31,7 +31,7 @@ Please evaluate and select the most appropriate option based on these three dime
|
|
31
31
|
2. Subjective Norm: Social environment and others' views on this behavior
|
32
32
|
3. Perceived Control: Difficulty and controllability of executing this option
|
33
33
|
|
34
|
-
Please
|
34
|
+
Please response in json format (Do not return any other text), example:
|
35
35
|
{{
|
36
36
|
"selected_option": "Select the most suitable option from available options",
|
37
37
|
"evaluation": {{
|
@@ -54,19 +54,6 @@ Current time: {current_time}
|
|
54
54
|
Your emotion: {emotion_types}
|
55
55
|
Your thought: {thought}
|
56
56
|
|
57
|
-
Please generate specific execution steps and return in JSON format:
|
58
|
-
{{
|
59
|
-
"plan": {{
|
60
|
-
"target": "Specific goal",
|
61
|
-
"steps": [
|
62
|
-
{{
|
63
|
-
"intention": "Specific intention",
|
64
|
-
"type": "Step type"
|
65
|
-
}}
|
66
|
-
]
|
67
|
-
}}
|
68
|
-
}}
|
69
|
-
|
70
57
|
Notes:
|
71
58
|
1. type can only be one of these four: mobility, social, economy, other
|
72
59
|
1.1 mobility: Decisions or behaviors related to large-scale spatial movement, such as location selection, going to a place, etc.
|
@@ -76,7 +63,7 @@ Notes:
|
|
76
63
|
2. steps should only include steps necessary to fulfill the target (limited to {max_plan_steps} steps)
|
77
64
|
3. intention in each step should be concise and clear
|
78
65
|
|
79
|
-
|
66
|
+
Please response in json format (Do not return any other text), example:
|
80
67
|
{{
|
81
68
|
"plan": {{
|
82
69
|
"target": "Eat at home",
|
@@ -172,10 +159,10 @@ class PlanBlock(Block):
|
|
172
159
|
self.token_consumption = 0
|
173
160
|
self.guidance_options = {
|
174
161
|
"hungry": ["Eat at home", "Eat outside"],
|
175
|
-
"tired": ["Sleep"
|
162
|
+
"tired": ["Sleep"],
|
176
163
|
"safe": ["Go to work"],
|
177
164
|
"social": ["Contact with friends", "Shopping"],
|
178
|
-
"whatever": ["Contact with friends", "Hang out"],
|
165
|
+
"whatever": ["Contact with friends", "Hang out", "Entertainment"],
|
179
166
|
}
|
180
167
|
|
181
168
|
# configurable fields
|
@@ -211,7 +198,7 @@ class PlanBlock(Block):
|
|
211
198
|
)
|
212
199
|
|
213
200
|
response = await self.llm.atext_request(
|
214
|
-
self.guidance_prompt.to_dialog()
|
201
|
+
self.guidance_prompt.to_dialog(), response_format={"type": "json_object"}
|
215
202
|
) # type: ignore
|
216
203
|
retry = 3
|
217
204
|
while retry > 0:
|
@@ -219,8 +206,15 @@ class PlanBlock(Block):
|
|
219
206
|
result = json.loads(self.clean_json_response(response)) # type: ignore
|
220
207
|
if "selected_option" not in result or "evaluation" not in result:
|
221
208
|
raise ValueError("Invalid guidance selection format")
|
222
|
-
if
|
223
|
-
|
209
|
+
if (
|
210
|
+
"attitude" not in result["evaluation"]
|
211
|
+
or "subjective_norm" not in result["evaluation"]
|
212
|
+
or "perceived_control" not in result["evaluation"]
|
213
|
+
or "reasoning" not in result["evaluation"]
|
214
|
+
):
|
215
|
+
raise ValueError(
|
216
|
+
"Evaluation must include attitude, subjective_norm, perceived_control, and reasoning"
|
217
|
+
)
|
224
218
|
return result
|
225
219
|
except Exception as e:
|
226
220
|
logger.warning(f"Error parsing guidance selection response: {str(e)}")
|
@@ -228,7 +222,7 @@ class PlanBlock(Block):
|
|
228
222
|
return None
|
229
223
|
|
230
224
|
async def generate_detailed_plan(
|
231
|
-
self,
|
225
|
+
self, selected_option: str
|
232
226
|
) -> Dict:
|
233
227
|
"""Generate detailed execution plan"""
|
234
228
|
position_now = await self.memory.status.get("position")
|
@@ -262,7 +256,11 @@ class PlanBlock(Block):
|
|
262
256
|
while retry > 0:
|
263
257
|
try:
|
264
258
|
result = json.loads(self.clean_json_response(response)) # type: ignore
|
265
|
-
if
|
259
|
+
if (
|
260
|
+
"plan" not in result
|
261
|
+
or "target" not in result["plan"]
|
262
|
+
or "steps" not in result["plan"]
|
263
|
+
):
|
266
264
|
raise ValueError("Invalid plan format")
|
267
265
|
for step in result["plan"]["steps"]:
|
268
266
|
if "intention" not in step or "type" not in step:
|
@@ -282,10 +280,10 @@ class PlanBlock(Block):
|
|
282
280
|
|
283
281
|
# Step 2: Generate detailed plan
|
284
282
|
detailed_plan = await self.generate_detailed_plan(
|
285
|
-
|
283
|
+
guidance_result["selected_option"]
|
286
284
|
)
|
287
285
|
|
288
|
-
if not detailed_plan
|
286
|
+
if not detailed_plan:
|
289
287
|
await self.memory.status.update("current_plan", None)
|
290
288
|
await self.memory.status.update(
|
291
289
|
"current_step", {"intention": "", "type": ""}
|
@@ -18,12 +18,10 @@ logger = logging.getLogger("pycityagent")
|
|
18
18
|
|
19
19
|
|
20
20
|
class MessagePromptManager:
|
21
|
-
def __init__(self
|
22
|
-
|
23
|
-
self.format_prompt = FormatPrompt(self.template)
|
24
|
-
self.to_discuss = to_discuss
|
21
|
+
def __init__(self):
|
22
|
+
pass
|
25
23
|
|
26
|
-
async def get_prompt(self, memory, step: Dict[str, Any], target: str) -> str:
|
24
|
+
async def get_prompt(self, memory, step: Dict[str, Any], target: str, template: str) -> str:
|
27
25
|
"""在这里改给模板输入的数据"""
|
28
26
|
# 获取数据
|
29
27
|
relationships = await memory.status.get("relationships") or {}
|
@@ -31,14 +29,17 @@ class MessagePromptManager:
|
|
31
29
|
|
32
30
|
# 构建讨论话题约束
|
33
31
|
discussion_constraint = ""
|
34
|
-
|
35
|
-
|
32
|
+
topics = await memory.status.get("attitude")
|
33
|
+
topics = topics.keys()
|
34
|
+
if topics:
|
35
|
+
topics = ", ".join(f'"{topic}"' for topic in topics)
|
36
36
|
discussion_constraint = (
|
37
37
|
f"Limit your discussion to the following topics: {topics}."
|
38
38
|
)
|
39
39
|
|
40
40
|
# 格式化提示
|
41
|
-
|
41
|
+
format_prompt = FormatPrompt(template)
|
42
|
+
format_prompt.format(
|
42
43
|
gender=await memory.status.get("gender") or "",
|
43
44
|
education=await memory.status.get("education") or "",
|
44
45
|
personality=await memory.status.get("personality") or "",
|
@@ -55,7 +56,7 @@ class MessagePromptManager:
|
|
55
56
|
discussion_constraint=discussion_constraint,
|
56
57
|
)
|
57
58
|
|
58
|
-
return
|
59
|
+
return format_prompt.to_dialog()
|
59
60
|
|
60
61
|
|
61
62
|
class SocialNoneBlock(Block):
|
@@ -75,7 +76,7 @@ class SocialNoneBlock(Block):
|
|
75
76
|
intention=step["intention"],
|
76
77
|
emotion_types=await self.memory.status.get("emotion_types"),
|
77
78
|
)
|
78
|
-
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
79
|
+
result = await self.llm.atext_request(self.guidance_prompt.to_dialog(), response_format={"type": "json_object"})
|
79
80
|
result = clean_json_response(result)
|
80
81
|
try:
|
81
82
|
result = json.loads(result)
|
@@ -270,11 +271,8 @@ class MessageBlock(Block):
|
|
270
271
|
The message should reflect my personality and background.
|
271
272
|
{discussion_constraint}
|
272
273
|
"""
|
273
|
-
self.to_discuss = []
|
274
274
|
|
275
|
-
self.prompt_manager = MessagePromptManager(
|
276
|
-
self.default_message_template, self.to_discuss
|
277
|
-
)
|
275
|
+
self.prompt_manager = MessagePromptManager()
|
278
276
|
|
279
277
|
def _serialize_message(self, message: str, propagation_count: int) -> str:
|
280
278
|
try:
|
@@ -305,7 +303,7 @@ class MessageBlock(Block):
|
|
305
303
|
|
306
304
|
# Get formatted prompt using prompt manager
|
307
305
|
formatted_prompt = await self.prompt_manager.get_prompt(
|
308
|
-
self.memory, step, target
|
306
|
+
self.memory, step, target, self.default_message_template
|
309
307
|
)
|
310
308
|
|
311
309
|
# Generate message
|
@@ -319,8 +317,8 @@ class MessageBlock(Block):
|
|
319
317
|
chat_histories = {}
|
320
318
|
if target not in chat_histories:
|
321
319
|
chat_histories[target] = ""
|
322
|
-
|
323
|
-
chat_histories[target] += "
|
320
|
+
elif len(chat_histories[target]) > 0:
|
321
|
+
chat_histories[target] += ", "
|
324
322
|
chat_histories[target] += f"me: {message}"
|
325
323
|
|
326
324
|
await self.memory.status.update("chat_histories", chat_histories)
|
@@ -17,12 +17,13 @@ Examples:
|
|
17
17
|
- "Read a book": {{"time": 90}}
|
18
18
|
- "Exercise": {{"time": 45}}
|
19
19
|
|
20
|
-
Please return the result in JSON format (Do not return any other text):
|
20
|
+
Please return the result in JSON format (Do not return any other text), example:
|
21
21
|
{{
|
22
|
-
"time":
|
22
|
+
"time": 10
|
23
23
|
}}
|
24
24
|
"""
|
25
25
|
|
26
|
+
|
26
27
|
def prettify_document(document: str) -> str:
|
27
28
|
# Remove sequences of whitespace characters (including newlines)
|
28
29
|
cleaned = re.sub(r"\s+", " ", document).strip()
|
@@ -3,7 +3,7 @@ from typing import Optional
|
|
3
3
|
import numpy as np
|
4
4
|
from pycityagent import Simulator, InstitutionAgent
|
5
5
|
from pycityagent.llm import LLM
|
6
|
-
from pycityagent.
|
6
|
+
from pycityagent.environment import EconomyClient
|
7
7
|
from pycityagent.message import Messager
|
8
8
|
from pycityagent.memory import Memory
|
9
9
|
import logging
|
@@ -4,7 +4,7 @@ from typing import Optional
|
|
4
4
|
import numpy as np
|
5
5
|
from pycityagent import Simulator, InstitutionAgent
|
6
6
|
from pycityagent.llm.llm import LLM
|
7
|
-
from pycityagent.
|
7
|
+
from pycityagent.environment import EconomyClient
|
8
8
|
from pycityagent.message import Messager
|
9
9
|
from pycityagent.memory import Memory
|
10
10
|
import logging
|
pycityagent/cityagent/initial.py
CHANGED
@@ -63,7 +63,7 @@ async def initialize_social_network(simulation):
|
|
63
63
|
|
64
64
|
# Initialize empty chat histories and interaction records
|
65
65
|
await simulation.update(
|
66
|
-
agent_id, "chat_histories", {friend_id:
|
66
|
+
agent_id, "chat_histories", {friend_id: "" for friend_id in friends}
|
67
67
|
)
|
68
68
|
await simulation.update(
|
69
69
|
agent_id, "interactions", {friend_id: [] for friend_id in friends}
|
@@ -88,7 +88,6 @@ def memory_config_societyagent():
|
|
88
88
|
"relation_types": (dict, {}, False),
|
89
89
|
"chat_histories": (dict, {}, False), # all chat histories
|
90
90
|
"interactions": (dict, {}, False), # all interaction records
|
91
|
-
"to_discuss": (dict, {}, False),
|
92
91
|
# mobility
|
93
92
|
"number_poi_visited": (int, 1, False),
|
94
93
|
}
|
@@ -1,5 +1,4 @@
|
|
1
1
|
import asyncio
|
2
|
-
import json
|
3
2
|
|
4
3
|
from pycityagent.llm import LLM
|
5
4
|
from pycityagent.message import MessageBlockBase, MessageBlockListenerBase
|
@@ -8,15 +7,15 @@ from pycityagent.message import MessageBlockBase, MessageBlockListenerBase
|
|
8
7
|
async def check_message(
|
9
8
|
from_uuid: str, to_uuid: str, llm_client: LLM, content: str
|
10
9
|
) -> bool:
|
11
|
-
print(f"\n
|
10
|
+
print(f"\n Checking Message: {from_uuid} -> {to_uuid}: {content}")
|
12
11
|
is_valid = True
|
13
12
|
prompt = f"""
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
13
|
+
Please determine if the following message is emotionally provocative:
|
14
|
+
Sender ID: {from_uuid}
|
15
|
+
Receiver ID: {to_uuid}
|
16
|
+
Message content: {content}
|
18
17
|
|
19
|
-
|
18
|
+
If the message is emotionally provocative, please return False; if the message is normal, please return True.
|
20
19
|
"""
|
21
20
|
for _ in range(10):
|
22
21
|
try:
|
@@ -33,7 +32,7 @@ async def check_message(
|
|
33
32
|
pass
|
34
33
|
else:
|
35
34
|
raise RuntimeError(f"Request for message interception prompt=`{prompt}` failed")
|
36
|
-
print(f"
|
35
|
+
print(f"Message Check: {'Valid' if is_valid else 'Invalid'}")
|
37
36
|
return is_valid
|
38
37
|
|
39
38
|
|
@@ -5,7 +5,7 @@ from typing import Optional
|
|
5
5
|
import numpy as np
|
6
6
|
|
7
7
|
from pycityagent import InstitutionAgent, Simulator
|
8
|
-
from pycityagent.
|
8
|
+
from pycityagent.environment import EconomyClient
|
9
9
|
from pycityagent.llm.llm import LLM
|
10
10
|
from pycityagent.memory import Memory
|
11
11
|
from pycityagent.message import Messager
|
@@ -6,7 +6,7 @@ from typing import Optional
|
|
6
6
|
|
7
7
|
from pycityagent import CitizenAgent, Simulator
|
8
8
|
from pycityagent.agent import Agent
|
9
|
-
from pycityagent.
|
9
|
+
from pycityagent.environment import EconomyClient
|
10
10
|
from pycityagent.llm.llm import LLM
|
11
11
|
from pycityagent.memory import Memory
|
12
12
|
from pycityagent.tools import UpdateWithSimulator
|
@@ -427,7 +427,6 @@ class SocietyAgent(CitizenAgent):
|
|
427
427
|
)
|
428
428
|
|
429
429
|
if should_respond.strip().upper() != "YES": # type:ignore
|
430
|
-
await self.memory.status.update("chat_histories", chat_histories)
|
431
430
|
return ""
|
432
431
|
|
433
432
|
response_prompt = f"""Based on:
|
@@ -0,0 +1,18 @@
|
|
1
|
+
from typing import TYPE_CHECKING
|
2
|
+
|
3
|
+
from .exp_config import AgentConfig, ExpConfig, WorkflowStep
|
4
|
+
from .sim_config import (LLMRequestConfig, MapRequestConfig, MlflowConfig,
|
5
|
+
SimConfig, SimulatorRequestConfig)
|
6
|
+
from .utils import load_config_from_file
|
7
|
+
|
8
|
+
__all__ = [
|
9
|
+
"SimConfig",
|
10
|
+
"SimulatorRequestConfig",
|
11
|
+
"MapRequestConfig",
|
12
|
+
"MlflowConfig",
|
13
|
+
"LLMRequestConfig",
|
14
|
+
"ExpConfig",
|
15
|
+
"load_config_from_file",
|
16
|
+
"WorkflowStep",
|
17
|
+
"AgentConfig",
|
18
|
+
]
|