pycityagent 2.0.0a65__cp39-cp39-macosx_11_0_arm64.whl → 2.0.0a67__cp39-cp39-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycityagent/agent/agent.py +157 -57
- pycityagent/agent/agent_base.py +316 -43
- pycityagent/cityagent/bankagent.py +49 -9
- pycityagent/cityagent/blocks/__init__.py +1 -2
- pycityagent/cityagent/blocks/cognition_block.py +54 -31
- pycityagent/cityagent/blocks/dispatcher.py +22 -17
- pycityagent/cityagent/blocks/economy_block.py +46 -32
- pycityagent/cityagent/blocks/mobility_block.py +209 -105
- pycityagent/cityagent/blocks/needs_block.py +101 -54
- pycityagent/cityagent/blocks/other_block.py +42 -33
- pycityagent/cityagent/blocks/plan_block.py +59 -42
- pycityagent/cityagent/blocks/social_block.py +167 -126
- pycityagent/cityagent/blocks/utils.py +13 -6
- pycityagent/cityagent/firmagent.py +17 -35
- pycityagent/cityagent/governmentagent.py +3 -3
- pycityagent/cityagent/initial.py +79 -49
- pycityagent/cityagent/memory_config.py +123 -94
- pycityagent/cityagent/message_intercept.py +0 -4
- pycityagent/cityagent/metrics.py +41 -0
- pycityagent/cityagent/nbsagent.py +24 -36
- pycityagent/cityagent/societyagent.py +9 -4
- pycityagent/cli/wrapper.py +2 -2
- pycityagent/economy/econ_client.py +407 -81
- pycityagent/environment/__init__.py +0 -3
- pycityagent/environment/sim/__init__.py +0 -3
- pycityagent/environment/sim/aoi_service.py +2 -2
- pycityagent/environment/sim/client.py +3 -31
- pycityagent/environment/sim/clock_service.py +2 -2
- pycityagent/environment/sim/lane_service.py +8 -8
- pycityagent/environment/sim/light_service.py +8 -8
- pycityagent/environment/sim/pause_service.py +9 -10
- pycityagent/environment/sim/person_service.py +20 -20
- pycityagent/environment/sim/road_service.py +2 -2
- pycityagent/environment/sim/sim_env.py +21 -5
- pycityagent/environment/sim/social_service.py +4 -4
- pycityagent/environment/simulator.py +249 -27
- pycityagent/environment/utils/__init__.py +2 -2
- pycityagent/environment/utils/geojson.py +2 -2
- pycityagent/environment/utils/grpc.py +4 -4
- pycityagent/environment/utils/map_utils.py +2 -2
- pycityagent/llm/embeddings.py +147 -28
- pycityagent/llm/llm.py +178 -111
- pycityagent/llm/llmconfig.py +5 -0
- pycityagent/llm/utils.py +4 -0
- pycityagent/memory/__init__.py +0 -4
- pycityagent/memory/const.py +2 -2
- pycityagent/memory/faiss_query.py +140 -61
- pycityagent/memory/memory.py +394 -91
- pycityagent/memory/memory_base.py +140 -34
- pycityagent/memory/profile.py +13 -13
- pycityagent/memory/self_define.py +13 -13
- pycityagent/memory/state.py +14 -14
- pycityagent/message/message_interceptor.py +253 -3
- pycityagent/message/messager.py +133 -6
- pycityagent/metrics/mlflow_client.py +47 -4
- pycityagent/pycityagent-sim +0 -0
- pycityagent/pycityagent-ui +0 -0
- pycityagent/simulation/__init__.py +3 -2
- pycityagent/simulation/agentgroup.py +150 -54
- pycityagent/simulation/simulation.py +276 -66
- pycityagent/survey/manager.py +45 -3
- pycityagent/survey/models.py +42 -2
- pycityagent/tools/__init__.py +1 -2
- pycityagent/tools/tool.py +93 -69
- pycityagent/utils/avro_schema.py +2 -2
- pycityagent/utils/parsers/code_block_parser.py +1 -1
- pycityagent/utils/parsers/json_parser.py +2 -2
- pycityagent/utils/parsers/parser_base.py +2 -2
- pycityagent/workflow/block.py +64 -13
- pycityagent/workflow/prompt.py +31 -23
- pycityagent/workflow/trigger.py +91 -24
- {pycityagent-2.0.0a65.dist-info → pycityagent-2.0.0a67.dist-info}/METADATA +2 -2
- pycityagent-2.0.0a67.dist-info/RECORD +97 -0
- pycityagent/environment/interact/__init__.py +0 -0
- pycityagent/environment/interact/interact.py +0 -198
- pycityagent/environment/message/__init__.py +0 -0
- pycityagent/environment/sence/__init__.py +0 -0
- pycityagent/environment/sence/static.py +0 -416
- pycityagent/environment/sidecar/__init__.py +0 -8
- pycityagent/environment/sidecar/sidecarv2.py +0 -109
- pycityagent/environment/sim/economy_services.py +0 -192
- pycityagent/metrics/utils/const.py +0 -0
- pycityagent-2.0.0a65.dist-info/RECORD +0 -105
- {pycityagent-2.0.0a65.dist-info → pycityagent-2.0.0a67.dist-info}/LICENSE +0 -0
- {pycityagent-2.0.0a65.dist-info → pycityagent-2.0.0a67.dist-info}/WHEEL +0 -0
- {pycityagent-2.0.0a65.dist-info → pycityagent-2.0.0a67.dist-info}/entry_points.txt +0 -0
- {pycityagent-2.0.0a65.dist-info → pycityagent-2.0.0a67.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,22 @@
|
|
1
|
-
# 由于目前模拟器支持的限制,现在只有Dispatcher中只有NoneBlock,MessageBlock和FindPersonBlock。
|
1
|
+
# 由于目前模拟器支持的限制,现在只有Dispatcher中只有NoneBlock,MessageBlock和FindPersonBlock。
|
2
2
|
|
3
|
-
import random
|
4
3
|
import json
|
5
|
-
|
4
|
+
import logging
|
5
|
+
import random
|
6
|
+
from typing import Any, Dict, List, Optional
|
7
|
+
|
8
|
+
from pycityagent.environment.simulator import Simulator
|
6
9
|
from pycityagent.llm.llm import LLM
|
7
|
-
from pycityagent.workflow.block import Block
|
8
10
|
from pycityagent.memory import Memory
|
9
|
-
from pycityagent.
|
11
|
+
from pycityagent.workflow.block import Block
|
10
12
|
from pycityagent.workflow.prompt import FormatPrompt
|
13
|
+
|
11
14
|
from .dispatcher import BlockDispatcher
|
12
|
-
from .utils import
|
13
|
-
import logging
|
15
|
+
from .utils import TIME_ESTIMATE_PROMPT, clean_json_response
|
14
16
|
|
15
17
|
logger = logging.getLogger("pycityagent")
|
16
18
|
|
19
|
+
|
17
20
|
class MessagePromptManager:
|
18
21
|
def __init__(self, template: str, to_discuss: List[str]):
|
19
22
|
self.template = template
|
@@ -30,8 +33,10 @@ class MessagePromptManager:
|
|
30
33
|
discussion_constraint = ""
|
31
34
|
if self.to_discuss:
|
32
35
|
topics = ", ".join(f'"{topic}"' for topic in self.to_discuss)
|
33
|
-
discussion_constraint =
|
34
|
-
|
36
|
+
discussion_constraint = (
|
37
|
+
f"Limit your discussion to the following topics: {topics}."
|
38
|
+
)
|
39
|
+
|
35
40
|
# 格式化提示
|
36
41
|
self.format_prompt.format(
|
37
42
|
gender=await memory.status.get("gender") or "",
|
@@ -42,49 +47,63 @@ class MessagePromptManager:
|
|
42
47
|
intention=step.get("intention", ""),
|
43
48
|
emotion_types=await memory.status.get("emotion_types"),
|
44
49
|
thought=await memory.status.get("thought"),
|
45
|
-
chat_history=
|
46
|
-
|
50
|
+
chat_history=(
|
51
|
+
chat_histories.get(target, "")
|
52
|
+
if isinstance(chat_histories, dict)
|
53
|
+
else ""
|
54
|
+
),
|
55
|
+
discussion_constraint=discussion_constraint,
|
47
56
|
)
|
48
|
-
|
57
|
+
|
49
58
|
return self.format_prompt.to_dialog()
|
50
59
|
|
60
|
+
|
51
61
|
class SocialNoneBlock(Block):
|
52
62
|
"""
|
53
63
|
空操作
|
54
64
|
NoneBlock
|
55
65
|
"""
|
66
|
+
|
56
67
|
def __init__(self, llm: LLM, memory: Memory):
|
57
68
|
super().__init__("NoneBlock", llm=llm, memory=memory)
|
58
69
|
self.description = "Handle all other cases"
|
59
70
|
self.guidance_prompt = FormatPrompt(template=TIME_ESTIMATE_PROMPT)
|
60
71
|
|
61
72
|
async def forward(self, step, context):
|
62
|
-
self.guidance_prompt.format(
|
63
|
-
|
64
|
-
|
73
|
+
self.guidance_prompt.format(
|
74
|
+
plan=context["plan"],
|
75
|
+
intention=step["intention"],
|
76
|
+
emotion_types=await self.memory.status.get("emotion_types"),
|
77
|
+
)
|
65
78
|
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
66
79
|
result = clean_json_response(result)
|
67
80
|
try:
|
68
81
|
result = json.loads(result)
|
69
|
-
node_id = await self.memory.stream.add_social(
|
82
|
+
node_id = await self.memory.stream.add_social(
|
83
|
+
description=f"I {step['intention']}"
|
84
|
+
)
|
70
85
|
return {
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
86
|
+
"success": True,
|
87
|
+
"evaluation": f'Finished {step["intention"]}',
|
88
|
+
"consumed_time": result["time"],
|
89
|
+
"node_id": node_id,
|
75
90
|
}
|
76
91
|
except Exception as e:
|
77
92
|
logger.warning(f"解析时间评估响应时发生错误: {str(e)}, 原始结果: {result}")
|
78
|
-
node_id = await self.memory.stream.add_social(
|
93
|
+
node_id = await self.memory.stream.add_social(
|
94
|
+
description=f"I failed to execute {step['intention']}"
|
95
|
+
)
|
79
96
|
return {
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
97
|
+
"success": False,
|
98
|
+
"evaluation": f'Failed to execute {step["intention"]}',
|
99
|
+
"consumed_time": 5,
|
100
|
+
"node_id": node_id,
|
84
101
|
}
|
85
102
|
|
103
|
+
|
86
104
|
class FindPersonBlock(Block):
|
87
105
|
"""寻找社交对象"""
|
106
|
+
|
88
107
|
def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
|
89
108
|
super().__init__("FindPersonBlock", llm=llm, memory=memory, simulator=simulator)
|
90
109
|
self.description = "Find a suitable person to socialize with"
|
@@ -122,39 +141,42 @@ class FindPersonBlock(Block):
|
|
122
141
|
['offline', 2] - means meet the third friend offline
|
123
142
|
"""
|
124
143
|
|
125
|
-
async def forward(
|
144
|
+
async def forward(
|
145
|
+
self, step: Dict[str, Any], context: Optional[Dict] = None
|
146
|
+
) -> Dict[str, Any]:
|
126
147
|
try:
|
127
148
|
# Get friends list and relationship strength
|
128
149
|
friends = await self.memory.status.get("friends") or []
|
129
150
|
relationships = await self.memory.status.get("relationships") or {}
|
130
|
-
|
151
|
+
|
131
152
|
if not friends:
|
132
|
-
node_id = await self.memory.stream.add_social(
|
153
|
+
node_id = await self.memory.stream.add_social(
|
154
|
+
description=f"I can't find any friends to socialize with."
|
155
|
+
)
|
133
156
|
return {
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
157
|
+
"success": False,
|
158
|
+
"evaluation": "No friends found in social network",
|
159
|
+
"consumed_time": 5,
|
160
|
+
"node_id": node_id,
|
138
161
|
}
|
139
|
-
|
162
|
+
|
140
163
|
# Create a list of friends with all information
|
141
164
|
friend_info = []
|
142
165
|
index_to_uuid = {}
|
143
|
-
|
166
|
+
|
144
167
|
for i, friend_id in enumerate(friends):
|
145
168
|
relationship_strength = relationships.get(friend_id, 0)
|
146
|
-
friend_info.append(
|
147
|
-
|
148
|
-
|
149
|
-
})
|
169
|
+
friend_info.append(
|
170
|
+
{"index": i, "relationship_strength": relationship_strength}
|
171
|
+
)
|
150
172
|
index_to_uuid[i] = friend_id
|
151
|
-
|
173
|
+
|
152
174
|
# Format friend information for easier reading
|
153
175
|
formatted_friend_info = {
|
154
|
-
i: {
|
176
|
+
i: {"relationship_strength": info["relationship_strength"]}
|
155
177
|
for i, info in enumerate(friend_info)
|
156
178
|
}
|
157
|
-
|
179
|
+
|
158
180
|
# Format the prompt
|
159
181
|
formatted_prompt = FormatPrompt(self.prompt)
|
160
182
|
formatted_prompt.format(
|
@@ -165,61 +187,66 @@ class FindPersonBlock(Block):
|
|
165
187
|
intention=str(step.get("intention", "socialize")),
|
166
188
|
emotion_types=str(await self.memory.status.get("emotion_types")),
|
167
189
|
thought=str(await self.memory.status.get("thought")),
|
168
|
-
friend_info=str(formatted_friend_info)
|
190
|
+
friend_info=str(formatted_friend_info),
|
169
191
|
)
|
170
|
-
|
192
|
+
|
171
193
|
# Get LLM response
|
172
|
-
response = await self.llm.atext_request(
|
173
|
-
|
194
|
+
response = await self.llm.atext_request(
|
195
|
+
formatted_prompt.to_dialog(), timeout=300
|
196
|
+
)
|
197
|
+
|
174
198
|
try:
|
175
199
|
# Parse the response
|
176
200
|
mode, friend_index = eval(response)
|
177
|
-
|
201
|
+
|
178
202
|
# Validate the response format
|
179
|
-
if not isinstance(mode, str) or mode not in [
|
203
|
+
if not isinstance(mode, str) or mode not in ["online", "offline"]:
|
180
204
|
raise ValueError("Invalid mode")
|
181
|
-
if
|
205
|
+
if (
|
206
|
+
not isinstance(friend_index, int)
|
207
|
+
or friend_index not in index_to_uuid
|
208
|
+
):
|
182
209
|
raise ValueError("Invalid friend index")
|
183
|
-
|
210
|
+
|
184
211
|
# Convert index to UUID
|
185
212
|
target = index_to_uuid[friend_index]
|
186
|
-
context[
|
213
|
+
context["target"] = target
|
187
214
|
except Exception as e:
|
188
215
|
# If parsing fails, select the friend with the strongest relationship as the default option
|
189
|
-
target =
|
190
|
-
|
191
|
-
|
192
|
-
|
216
|
+
target = (
|
217
|
+
max(relationships.items(), key=lambda x: x[1])[0]
|
218
|
+
if relationships
|
219
|
+
else friends[0]
|
220
|
+
)
|
221
|
+
mode = "online"
|
222
|
+
|
223
|
+
node_id = await self.memory.stream.add_social(
|
224
|
+
description=f"I selected the friend {target} for {mode} interaction"
|
225
|
+
)
|
193
226
|
return {
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
227
|
+
"success": True,
|
228
|
+
"evaluation": f"Selected friend {target} for {mode} interaction",
|
229
|
+
"consumed_time": 15,
|
230
|
+
"mode": mode,
|
231
|
+
"target": target,
|
232
|
+
"node_id": node_id,
|
200
233
|
}
|
201
|
-
|
234
|
+
|
202
235
|
except Exception as e:
|
203
|
-
node_id = await self.memory.stream.add_social(
|
236
|
+
node_id = await self.memory.stream.add_social(
|
237
|
+
description=f"I can't find any friends to socialize with."
|
238
|
+
)
|
204
239
|
return {
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
240
|
+
"success": False,
|
241
|
+
"evaluation": f"Error in finding person: {str(e)}",
|
242
|
+
"consumed_time": 5,
|
243
|
+
"node_id": node_id,
|
209
244
|
}
|
210
245
|
|
246
|
+
|
211
247
|
class MessageBlock(Block):
|
212
248
|
"""生成并发送消息"""
|
213
|
-
|
214
|
-
default_values = {
|
215
|
-
"default_message_template": """
|
216
|
-
As a {gender} {occupation} with {education} education and {personality} personality,
|
217
|
-
generate a message for a friend (relationship strength: {relationship_score}/100)
|
218
|
-
about {intention}.
|
219
|
-
""",
|
220
|
-
"to_discuss": []
|
221
|
-
}
|
222
|
-
|
249
|
+
|
223
250
|
def __init__(self, agent, llm: LLM, memory: Memory, simulator: Simulator):
|
224
251
|
super().__init__("MessageBlock", llm=llm, memory=memory, simulator=simulator)
|
225
252
|
self.agent = agent
|
@@ -245,44 +272,46 @@ class MessageBlock(Block):
|
|
245
272
|
"""
|
246
273
|
self.to_discuss = []
|
247
274
|
|
248
|
-
self.prompt_manager = MessagePromptManager(
|
275
|
+
self.prompt_manager = MessagePromptManager(
|
276
|
+
self.default_message_template, self.to_discuss
|
277
|
+
)
|
249
278
|
|
250
279
|
def _serialize_message(self, message: str, propagation_count: int) -> str:
|
251
280
|
try:
|
252
|
-
return json.dumps(
|
253
|
-
"content": message,
|
254
|
-
|
255
|
-
|
281
|
+
return json.dumps(
|
282
|
+
{"content": message, "propagation_count": propagation_count},
|
283
|
+
ensure_ascii=False,
|
284
|
+
)
|
256
285
|
except Exception as e:
|
257
286
|
logger.warning(f"Error serializing message: {e}")
|
258
287
|
return message
|
259
288
|
|
260
|
-
async def forward(
|
289
|
+
async def forward(
|
290
|
+
self, step: Dict[str, Any], context: Optional[Dict] = None
|
291
|
+
) -> Dict[str, Any]:
|
261
292
|
try:
|
262
293
|
# Get target from context or find one
|
263
|
-
target = context.get(
|
294
|
+
target = context.get("target") if context else None
|
264
295
|
if not target:
|
265
296
|
result = await self.find_person_block.forward(step, context)
|
266
|
-
if not result[
|
297
|
+
if not result["success"]:
|
267
298
|
return {
|
268
|
-
|
269
|
-
|
270
|
-
|
299
|
+
"success": False,
|
300
|
+
"evaluation": "Could not find target for message",
|
301
|
+
"consumed_time": 5,
|
271
302
|
}
|
272
|
-
target = result[
|
273
|
-
|
303
|
+
target = result["target"]
|
304
|
+
|
274
305
|
# Get formatted prompt using prompt manager
|
275
306
|
formatted_prompt = await self.prompt_manager.get_prompt(
|
276
|
-
self.memory,
|
277
|
-
step,
|
278
|
-
target
|
307
|
+
self.memory, step, target
|
279
308
|
)
|
280
|
-
|
309
|
+
|
281
310
|
# Generate message
|
282
311
|
message = await self.llm.atext_request(formatted_prompt, timeout=300)
|
283
312
|
if not message:
|
284
313
|
message = "Hello! How are you?"
|
285
|
-
|
314
|
+
|
286
315
|
# Update chat history with proper format
|
287
316
|
chat_histories = await self.memory.status.get("chat_histories") or {}
|
288
317
|
if not isinstance(chat_histories, dict):
|
@@ -292,32 +321,38 @@ class MessageBlock(Block):
|
|
292
321
|
if chat_histories[target]:
|
293
322
|
chat_histories[target] += ","
|
294
323
|
chat_histories[target] += f"me: {message}"
|
295
|
-
|
324
|
+
|
296
325
|
await self.memory.status.update("chat_histories", chat_histories)
|
297
|
-
|
326
|
+
|
298
327
|
# Send message
|
299
328
|
serialized_message = self._serialize_message(message, 1)
|
300
|
-
node_id = await self.memory.stream.add_social(
|
329
|
+
node_id = await self.memory.stream.add_social(
|
330
|
+
description=f"I sent a message to {target}: {message}"
|
331
|
+
)
|
301
332
|
return {
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
333
|
+
"success": True,
|
334
|
+
"evaluation": f"Sent message to {target}: {message}",
|
335
|
+
"consumed_time": 10,
|
336
|
+
"message": message,
|
337
|
+
"target": target,
|
338
|
+
"node_id": node_id,
|
308
339
|
}
|
309
|
-
|
340
|
+
|
310
341
|
except Exception as e:
|
311
|
-
node_id = await self.memory.stream.add_social(
|
342
|
+
node_id = await self.memory.stream.add_social(
|
343
|
+
description=f"I can't send a message to {target}"
|
344
|
+
)
|
312
345
|
return {
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
346
|
+
"success": False,
|
347
|
+
"evaluation": f"Error in sending message: {str(e)}",
|
348
|
+
"consumed_time": 5,
|
349
|
+
"node_id": node_id,
|
317
350
|
}
|
318
|
-
|
351
|
+
|
352
|
+
|
319
353
|
class SocialBlock(Block):
|
320
354
|
"""主社交模块"""
|
355
|
+
|
321
356
|
find_person_block: FindPersonBlock
|
322
357
|
message_block: MessageBlock
|
323
358
|
noneblock: SocialNoneBlock
|
@@ -326,39 +361,45 @@ class SocialBlock(Block):
|
|
326
361
|
super().__init__("SocialBlock", llm=llm, memory=memory, simulator=simulator)
|
327
362
|
self.find_person_block = FindPersonBlock(llm, memory, simulator)
|
328
363
|
self.message_block = MessageBlock(agent, llm, memory, simulator)
|
329
|
-
self.noneblock=SocialNoneBlock(llm, memory)
|
364
|
+
self.noneblock = SocialNoneBlock(llm, memory)
|
330
365
|
self.dispatcher = BlockDispatcher(llm)
|
331
366
|
|
332
367
|
self.trigger_time = 0
|
333
368
|
self.token_consumption = 0
|
334
|
-
|
335
|
-
self.dispatcher.register_blocks([
|
336
|
-
self.find_person_block,
|
337
|
-
self.message_block,
|
338
|
-
self.noneblock
|
339
|
-
])
|
340
369
|
|
341
|
-
|
370
|
+
self.dispatcher.register_blocks(
|
371
|
+
[self.find_person_block, self.message_block, self.noneblock]
|
372
|
+
)
|
373
|
+
|
374
|
+
async def forward(
|
375
|
+
self, step: Dict[str, Any], context: Optional[Dict] = None
|
376
|
+
) -> Dict[str, Any]:
|
342
377
|
try:
|
343
378
|
self.trigger_time += 1
|
344
|
-
consumption_start =
|
379
|
+
consumption_start = (
|
380
|
+
self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
381
|
+
)
|
345
382
|
|
346
383
|
# Select the appropriate sub-block using dispatcher
|
347
384
|
selected_block = await self.dispatcher.dispatch(step)
|
348
|
-
|
385
|
+
|
349
386
|
# Execute the selected sub-block and get the result
|
350
387
|
result = await selected_block.forward(step, context)
|
351
|
-
|
352
|
-
consumption_end =
|
388
|
+
|
389
|
+
consumption_end = (
|
390
|
+
self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
391
|
+
)
|
353
392
|
self.token_consumption += consumption_end - consumption_start
|
354
393
|
|
355
394
|
return result
|
356
395
|
|
357
396
|
except:
|
358
|
-
consumption_end =
|
397
|
+
consumption_end = (
|
398
|
+
self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
399
|
+
)
|
359
400
|
self.token_consumption += consumption_end - consumption_start
|
360
401
|
return {
|
361
|
-
|
362
|
-
|
363
|
-
|
402
|
+
"success": True,
|
403
|
+
"evaluation": "Completed social interaction with default behavior",
|
404
|
+
"consumed_time": 15,
|
364
405
|
}
|
@@ -1,5 +1,5 @@
|
|
1
|
-
import re
|
2
1
|
import ast
|
2
|
+
import re
|
3
3
|
|
4
4
|
TIME_ESTIMATE_PROMPT = """As an intelligent agent's time estimation system, please estimate the time needed to complete the current action based on the overall plan and current intention.
|
5
5
|
|
@@ -36,18 +36,24 @@ tao = 1
|
|
36
36
|
period = 3
|
37
37
|
UBI = 0
|
38
38
|
|
39
|
+
|
39
40
|
def prettify_document(document: str) -> str:
|
40
41
|
# Remove sequences of whitespace characters (including newlines)
|
41
|
-
cleaned = re.sub(r
|
42
|
+
cleaned = re.sub(r"\s+", " ", document).strip()
|
42
43
|
return cleaned
|
43
44
|
|
45
|
+
|
44
46
|
def extract_dict_from_string(input_string):
|
45
47
|
"""
|
46
48
|
提取输入字符串中的字典。支持跨行字典和嵌套字典。
|
47
49
|
"""
|
48
50
|
# 正则表达式查找所有可能的字典部分,允许多行
|
49
|
-
dict_pattern =
|
50
|
-
|
51
|
+
dict_pattern = (
|
52
|
+
r"\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\}" # 匹配字典的正则表达式,支持嵌套
|
53
|
+
)
|
54
|
+
matches = re.findall(
|
55
|
+
dict_pattern, input_string, re.DOTALL
|
56
|
+
) # re.DOTALL允许匹配换行符
|
51
57
|
|
52
58
|
dicts = []
|
53
59
|
|
@@ -62,7 +68,8 @@ def extract_dict_from_string(input_string):
|
|
62
68
|
|
63
69
|
return dicts
|
64
70
|
|
71
|
+
|
65
72
|
def clean_json_response(response: str) -> str:
|
66
73
|
"""清理LLM响应中的特殊字符"""
|
67
|
-
response = response.replace(
|
68
|
-
return response.strip()
|
74
|
+
response = response.replace("```json", "").replace("```", "")
|
75
|
+
return response.strip()
|
@@ -1,9 +1,8 @@
|
|
1
|
-
import asyncio
|
2
1
|
from typing import Optional
|
3
2
|
|
4
3
|
import numpy as np
|
5
4
|
from pycityagent import Simulator, InstitutionAgent
|
6
|
-
from pycityagent.llm
|
5
|
+
from pycityagent.llm import LLM
|
7
6
|
from pycityagent.economy import EconomyClient
|
8
7
|
from pycityagent.message import Messager
|
9
8
|
from pycityagent.memory import Memory
|
@@ -19,6 +18,11 @@ class FirmAgent(InstitutionAgent):
|
|
19
18
|
"max_price_inflation": 0.05,
|
20
19
|
"max_wage_inflation": 0.05,
|
21
20
|
}
|
21
|
+
fields_description = {
|
22
|
+
"time_diff": "Time difference between each forward, day * hour * minute * second",
|
23
|
+
"max_price_inflation": "Maximum price inflation rate",
|
24
|
+
"max_wage_inflation": "Maximum wage inflation rate",
|
25
|
+
}
|
22
26
|
|
23
27
|
def __init__(
|
24
28
|
self,
|
@@ -62,34 +66,14 @@ class FirmAgent(InstitutionAgent):
|
|
62
66
|
|
63
67
|
async def forward(self):
|
64
68
|
if await self.month_trigger():
|
65
|
-
employees = await self.
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
)
|
73
|
-
print(
|
74
|
-
f"goods_demand: {goods_demand}, goods_consumption: {goods_consumption}"
|
75
|
-
)
|
76
|
-
total_demand = sum(goods_demand)
|
77
|
-
last_inventory = sum(goods_consumption) + await self.economy_client.get(
|
78
|
-
self._agent_id, "inventory"
|
79
|
-
)
|
80
|
-
print(
|
81
|
-
f"total_demand: {total_demand}, last_inventory: {last_inventory}, goods_contumption: {sum(goods_consumption)}"
|
82
|
-
)
|
83
|
-
max_change_rate = (total_demand - last_inventory) / (
|
84
|
-
max(total_demand, last_inventory) + 1e-8
|
85
|
-
)
|
86
|
-
skills = await self.gather_messages(employees, "work_skill")
|
87
|
-
for skill, uuid in zip(skills, employees):
|
88
|
-
await self.send_message_to_agent(
|
89
|
-
uuid,
|
90
|
-
f"work_skill@{max(skill*(1 + np.random.uniform(0, max_change_rate*self.max_wage_inflation)), 1)}",
|
91
|
-
"economy",
|
92
|
-
)
|
69
|
+
employees = await self.economy_client.get(self._agent_id, "employees")
|
70
|
+
total_demand = await self.economy_client.get(self._agent_id, "demand")
|
71
|
+
goods_consumption = await self.economy_client.get(self._agent_id, "sales")
|
72
|
+
last_inventory = goods_consumption + await self.economy_client.get(self._agent_id, "inventory")
|
73
|
+
max_change_rate = (total_demand - last_inventory) / (max(total_demand, last_inventory) + 1e-8)
|
74
|
+
skills = np.array(await self.economy_client.get(employees, "skill"))
|
75
|
+
skill_change_ratio = np.random.uniform(0, max_change_rate*self.max_wage_inflation)
|
76
|
+
await self.economy_client.update(employees, "skill", list(np.maximum(skills*(1 + skill_change_ratio), 1)))
|
93
77
|
price = await self.economy_client.get(self._agent_id, "price")
|
94
78
|
await self.economy_client.update(
|
95
79
|
self._agent_id,
|
@@ -105,8 +89,6 @@ class FirmAgent(InstitutionAgent):
|
|
105
89
|
1,
|
106
90
|
),
|
107
91
|
)
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
uuid, f"firm_forward@{self.forward_times}", "economy"
|
112
|
-
)
|
92
|
+
|
93
|
+
await self.economy_client.update(self._agent_id, 'demand', 0)
|
94
|
+
await self.economy_client.update(self._agent_id, 'sales', 0)
|
@@ -20,7 +20,7 @@ class GovernmentAgent(InstitutionAgent):
|
|
20
20
|
simulator: Optional[Simulator] = None,
|
21
21
|
memory: Optional[Memory] = None,
|
22
22
|
economy_client: Optional[EconomyClient] = None,
|
23
|
-
messager: Optional[Messager] = None
|
23
|
+
messager: Optional[Messager] = None,# type:ignore
|
24
24
|
avro_file: Optional[dict] = None,
|
25
25
|
) -> None:
|
26
26
|
super().__init__(
|
@@ -42,12 +42,12 @@ class GovernmentAgent(InstitutionAgent):
|
|
42
42
|
if self.last_time_trigger is None:
|
43
43
|
self.last_time_trigger = now_time
|
44
44
|
return False
|
45
|
-
if now_time - self.last_time_trigger >= self.time_diff:
|
45
|
+
if now_time - self.last_time_trigger >= self.time_diff:# type:ignore
|
46
46
|
self.last_time_trigger = now_time
|
47
47
|
return True
|
48
48
|
return False
|
49
49
|
|
50
|
-
async def gather_messages(self, agent_ids, content):
|
50
|
+
async def gather_messages(self, agent_ids, content):# type:ignore
|
51
51
|
infos = await super().gather_messages(agent_ids, content)
|
52
52
|
return [info["content"] for info in infos]
|
53
53
|
|