pycityagent 2.0.0a66__cp312-cp312-macosx_11_0_arm64.whl → 2.0.0a67__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. pycityagent/agent/agent.py +157 -57
  2. pycityagent/agent/agent_base.py +316 -43
  3. pycityagent/cityagent/bankagent.py +49 -9
  4. pycityagent/cityagent/blocks/__init__.py +1 -2
  5. pycityagent/cityagent/blocks/cognition_block.py +54 -31
  6. pycityagent/cityagent/blocks/dispatcher.py +22 -17
  7. pycityagent/cityagent/blocks/economy_block.py +46 -32
  8. pycityagent/cityagent/blocks/mobility_block.py +130 -100
  9. pycityagent/cityagent/blocks/needs_block.py +101 -44
  10. pycityagent/cityagent/blocks/other_block.py +42 -33
  11. pycityagent/cityagent/blocks/plan_block.py +59 -42
  12. pycityagent/cityagent/blocks/social_block.py +167 -116
  13. pycityagent/cityagent/blocks/utils.py +13 -6
  14. pycityagent/cityagent/firmagent.py +17 -35
  15. pycityagent/cityagent/governmentagent.py +3 -3
  16. pycityagent/cityagent/initial.py +79 -44
  17. pycityagent/cityagent/memory_config.py +108 -88
  18. pycityagent/cityagent/message_intercept.py +0 -4
  19. pycityagent/cityagent/metrics.py +41 -0
  20. pycityagent/cityagent/nbsagent.py +24 -36
  21. pycityagent/cityagent/societyagent.py +7 -3
  22. pycityagent/cli/wrapper.py +2 -2
  23. pycityagent/economy/econ_client.py +407 -81
  24. pycityagent/environment/__init__.py +0 -3
  25. pycityagent/environment/sim/__init__.py +0 -3
  26. pycityagent/environment/sim/aoi_service.py +2 -2
  27. pycityagent/environment/sim/client.py +3 -31
  28. pycityagent/environment/sim/clock_service.py +2 -2
  29. pycityagent/environment/sim/lane_service.py +8 -8
  30. pycityagent/environment/sim/light_service.py +8 -8
  31. pycityagent/environment/sim/pause_service.py +9 -10
  32. pycityagent/environment/sim/person_service.py +20 -20
  33. pycityagent/environment/sim/road_service.py +2 -2
  34. pycityagent/environment/sim/sim_env.py +21 -5
  35. pycityagent/environment/sim/social_service.py +4 -4
  36. pycityagent/environment/simulator.py +249 -27
  37. pycityagent/environment/utils/__init__.py +2 -2
  38. pycityagent/environment/utils/geojson.py +2 -2
  39. pycityagent/environment/utils/grpc.py +4 -4
  40. pycityagent/environment/utils/map_utils.py +2 -2
  41. pycityagent/llm/embeddings.py +147 -28
  42. pycityagent/llm/llm.py +122 -77
  43. pycityagent/llm/llmconfig.py +5 -0
  44. pycityagent/llm/utils.py +4 -0
  45. pycityagent/memory/__init__.py +0 -4
  46. pycityagent/memory/const.py +2 -2
  47. pycityagent/memory/faiss_query.py +140 -61
  48. pycityagent/memory/memory.py +393 -90
  49. pycityagent/memory/memory_base.py +140 -34
  50. pycityagent/memory/profile.py +13 -13
  51. pycityagent/memory/self_define.py +13 -13
  52. pycityagent/memory/state.py +14 -14
  53. pycityagent/message/message_interceptor.py +253 -3
  54. pycityagent/message/messager.py +133 -6
  55. pycityagent/metrics/mlflow_client.py +47 -4
  56. pycityagent/pycityagent-sim +0 -0
  57. pycityagent/pycityagent-ui +0 -0
  58. pycityagent/simulation/__init__.py +3 -2
  59. pycityagent/simulation/agentgroup.py +145 -52
  60. pycityagent/simulation/simulation.py +257 -62
  61. pycityagent/survey/manager.py +45 -3
  62. pycityagent/survey/models.py +42 -2
  63. pycityagent/tools/__init__.py +1 -2
  64. pycityagent/tools/tool.py +93 -69
  65. pycityagent/utils/avro_schema.py +2 -2
  66. pycityagent/utils/parsers/code_block_parser.py +1 -1
  67. pycityagent/utils/parsers/json_parser.py +2 -2
  68. pycityagent/utils/parsers/parser_base.py +2 -2
  69. pycityagent/workflow/block.py +64 -13
  70. pycityagent/workflow/prompt.py +31 -23
  71. pycityagent/workflow/trigger.py +91 -24
  72. {pycityagent-2.0.0a66.dist-info → pycityagent-2.0.0a67.dist-info}/METADATA +2 -2
  73. pycityagent-2.0.0a67.dist-info/RECORD +97 -0
  74. pycityagent/environment/interact/__init__.py +0 -0
  75. pycityagent/environment/interact/interact.py +0 -198
  76. pycityagent/environment/message/__init__.py +0 -0
  77. pycityagent/environment/sence/__init__.py +0 -0
  78. pycityagent/environment/sence/static.py +0 -416
  79. pycityagent/environment/sidecar/__init__.py +0 -8
  80. pycityagent/environment/sidecar/sidecarv2.py +0 -109
  81. pycityagent/environment/sim/economy_services.py +0 -192
  82. pycityagent/metrics/utils/const.py +0 -0
  83. pycityagent-2.0.0a66.dist-info/RECORD +0 -105
  84. {pycityagent-2.0.0a66.dist-info → pycityagent-2.0.0a67.dist-info}/LICENSE +0 -0
  85. {pycityagent-2.0.0a66.dist-info → pycityagent-2.0.0a67.dist-info}/WHEEL +0 -0
  86. {pycityagent-2.0.0a66.dist-info → pycityagent-2.0.0a67.dist-info}/entry_points.txt +0 -0
  87. {pycityagent-2.0.0a66.dist-info → pycityagent-2.0.0a67.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,22 @@
1
- # 由于目前模拟器支持的限制,现在只有Dispatcher中只有NoneBlock,MessageBlock和FindPersonBlock。
1
+ # 由于目前模拟器支持的限制,现在只有Dispatcher中只有NoneBlock,MessageBlock和FindPersonBlock。
2
2
 
3
- import random
4
3
  import json
5
- from typing import Dict, Any, List, Optional
4
+ import logging
5
+ import random
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ from pycityagent.environment.simulator import Simulator
6
9
  from pycityagent.llm.llm import LLM
7
- from pycityagent.workflow.block import Block
8
10
  from pycityagent.memory import Memory
9
- from pycityagent.environment.simulator import Simulator
11
+ from pycityagent.workflow.block import Block
10
12
  from pycityagent.workflow.prompt import FormatPrompt
13
+
11
14
  from .dispatcher import BlockDispatcher
12
- from .utils import clean_json_response, TIME_ESTIMATE_PROMPT
13
- import logging
15
+ from .utils import TIME_ESTIMATE_PROMPT, clean_json_response
14
16
 
15
17
  logger = logging.getLogger("pycityagent")
16
18
 
19
+
17
20
  class MessagePromptManager:
18
21
  def __init__(self, template: str, to_discuss: List[str]):
19
22
  self.template = template
@@ -30,8 +33,10 @@ class MessagePromptManager:
30
33
  discussion_constraint = ""
31
34
  if self.to_discuss:
32
35
  topics = ", ".join(f'"{topic}"' for topic in self.to_discuss)
33
- discussion_constraint = f"Limit your discussion to the following topics: {topics}."
34
-
36
+ discussion_constraint = (
37
+ f"Limit your discussion to the following topics: {topics}."
38
+ )
39
+
35
40
  # 格式化提示
36
41
  self.format_prompt.format(
37
42
  gender=await memory.status.get("gender") or "",
@@ -42,49 +47,63 @@ class MessagePromptManager:
42
47
  intention=step.get("intention", ""),
43
48
  emotion_types=await memory.status.get("emotion_types"),
44
49
  thought=await memory.status.get("thought"),
45
- chat_history=chat_histories.get(target, "") if isinstance(chat_histories, dict) else "",
46
- discussion_constraint=discussion_constraint
50
+ chat_history=(
51
+ chat_histories.get(target, "")
52
+ if isinstance(chat_histories, dict)
53
+ else ""
54
+ ),
55
+ discussion_constraint=discussion_constraint,
47
56
  )
48
-
57
+
49
58
  return self.format_prompt.to_dialog()
50
59
 
60
+
51
61
  class SocialNoneBlock(Block):
52
62
  """
53
63
  空操作
54
64
  NoneBlock
55
65
  """
66
+
56
67
  def __init__(self, llm: LLM, memory: Memory):
57
68
  super().__init__("NoneBlock", llm=llm, memory=memory)
58
69
  self.description = "Handle all other cases"
59
70
  self.guidance_prompt = FormatPrompt(template=TIME_ESTIMATE_PROMPT)
60
71
 
61
72
  async def forward(self, step, context):
62
- self.guidance_prompt.format(plan=context['plan'],
63
- intention=step['intention'],
64
- emotion_types=await self.memory.status.get("emotion_types"))
73
+ self.guidance_prompt.format(
74
+ plan=context["plan"],
75
+ intention=step["intention"],
76
+ emotion_types=await self.memory.status.get("emotion_types"),
77
+ )
65
78
  result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
66
79
  result = clean_json_response(result)
67
80
  try:
68
81
  result = json.loads(result)
69
- node_id = await self.memory.stream.add_social(description=f"I {step['intention']}")
82
+ node_id = await self.memory.stream.add_social(
83
+ description=f"I {step['intention']}"
84
+ )
70
85
  return {
71
- 'success': True,
72
- 'evaluation': f'Finished {step["intention"]}',
73
- 'consumed_time': result['time'],
74
- 'node_id': node_id
86
+ "success": True,
87
+ "evaluation": f'Finished {step["intention"]}',
88
+ "consumed_time": result["time"],
89
+ "node_id": node_id,
75
90
  }
76
91
  except Exception as e:
77
92
  logger.warning(f"解析时间评估响应时发生错误: {str(e)}, 原始结果: {result}")
78
- node_id = await self.memory.stream.add_social(description=f"I failed to execute {step['intention']}")
93
+ node_id = await self.memory.stream.add_social(
94
+ description=f"I failed to execute {step['intention']}"
95
+ )
79
96
  return {
80
- 'success': False,
81
- 'evaluation': f'Failed to execute {step["intention"]}',
82
- 'consumed_time': 5,
83
- 'node_id': node_id
97
+ "success": False,
98
+ "evaluation": f'Failed to execute {step["intention"]}',
99
+ "consumed_time": 5,
100
+ "node_id": node_id,
84
101
  }
85
102
 
103
+
86
104
  class FindPersonBlock(Block):
87
105
  """寻找社交对象"""
106
+
88
107
  def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
89
108
  super().__init__("FindPersonBlock", llm=llm, memory=memory, simulator=simulator)
90
109
  self.description = "Find a suitable person to socialize with"
@@ -122,39 +141,42 @@ class FindPersonBlock(Block):
122
141
  ['offline', 2] - means meet the third friend offline
123
142
  """
124
143
 
125
- async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
144
+ async def forward(
145
+ self, step: Dict[str, Any], context: Optional[Dict] = None
146
+ ) -> Dict[str, Any]:
126
147
  try:
127
148
  # Get friends list and relationship strength
128
149
  friends = await self.memory.status.get("friends") or []
129
150
  relationships = await self.memory.status.get("relationships") or {}
130
-
151
+
131
152
  if not friends:
132
- node_id = await self.memory.stream.add_social(description=f"I can't find any friends to socialize with.")
153
+ node_id = await self.memory.stream.add_social(
154
+ description=f"I can't find any friends to socialize with."
155
+ )
133
156
  return {
134
- 'success': False,
135
- 'evaluation': 'No friends found in social network',
136
- 'consumed_time': 5,
137
- 'node_id': node_id
157
+ "success": False,
158
+ "evaluation": "No friends found in social network",
159
+ "consumed_time": 5,
160
+ "node_id": node_id,
138
161
  }
139
-
162
+
140
163
  # Create a list of friends with all information
141
164
  friend_info = []
142
165
  index_to_uuid = {}
143
-
166
+
144
167
  for i, friend_id in enumerate(friends):
145
168
  relationship_strength = relationships.get(friend_id, 0)
146
- friend_info.append({
147
- 'index': i,
148
- 'relationship_strength': relationship_strength
149
- })
169
+ friend_info.append(
170
+ {"index": i, "relationship_strength": relationship_strength}
171
+ )
150
172
  index_to_uuid[i] = friend_id
151
-
173
+
152
174
  # Format friend information for easier reading
153
175
  formatted_friend_info = {
154
- i: {'relationship_strength': info['relationship_strength']}
176
+ i: {"relationship_strength": info["relationship_strength"]}
155
177
  for i, info in enumerate(friend_info)
156
178
  }
157
-
179
+
158
180
  # Format the prompt
159
181
  formatted_prompt = FormatPrompt(self.prompt)
160
182
  formatted_prompt.format(
@@ -165,51 +187,66 @@ class FindPersonBlock(Block):
165
187
  intention=str(step.get("intention", "socialize")),
166
188
  emotion_types=str(await self.memory.status.get("emotion_types")),
167
189
  thought=str(await self.memory.status.get("thought")),
168
- friend_info=str(formatted_friend_info)
190
+ friend_info=str(formatted_friend_info),
169
191
  )
170
-
192
+
171
193
  # Get LLM response
172
- response = await self.llm.atext_request(formatted_prompt.to_dialog(), timeout=300)
173
-
194
+ response = await self.llm.atext_request(
195
+ formatted_prompt.to_dialog(), timeout=300
196
+ )
197
+
174
198
  try:
175
199
  # Parse the response
176
200
  mode, friend_index = eval(response)
177
-
201
+
178
202
  # Validate the response format
179
- if not isinstance(mode, str) or mode not in ['online', 'offline']:
203
+ if not isinstance(mode, str) or mode not in ["online", "offline"]:
180
204
  raise ValueError("Invalid mode")
181
- if not isinstance(friend_index, int) or friend_index not in index_to_uuid:
205
+ if (
206
+ not isinstance(friend_index, int)
207
+ or friend_index not in index_to_uuid
208
+ ):
182
209
  raise ValueError("Invalid friend index")
183
-
210
+
184
211
  # Convert index to UUID
185
212
  target = index_to_uuid[friend_index]
186
- context['target']=target
213
+ context["target"] = target
187
214
  except Exception as e:
188
215
  # If parsing fails, select the friend with the strongest relationship as the default option
189
- target = max(relationships.items(), key=lambda x: x[1])[0] if relationships else friends[0]
190
- mode = 'online'
191
-
192
- node_id = await self.memory.stream.add_social(description=f"I selected the friend {target} for {mode} interaction")
216
+ target = (
217
+ max(relationships.items(), key=lambda x: x[1])[0]
218
+ if relationships
219
+ else friends[0]
220
+ )
221
+ mode = "online"
222
+
223
+ node_id = await self.memory.stream.add_social(
224
+ description=f"I selected the friend {target} for {mode} interaction"
225
+ )
193
226
  return {
194
- 'success': True,
195
- 'evaluation': f'Selected friend {target} for {mode} interaction',
196
- 'consumed_time': 15,
197
- 'mode': mode,
198
- 'target': target,
199
- 'node_id': node_id
227
+ "success": True,
228
+ "evaluation": f"Selected friend {target} for {mode} interaction",
229
+ "consumed_time": 15,
230
+ "mode": mode,
231
+ "target": target,
232
+ "node_id": node_id,
200
233
  }
201
-
234
+
202
235
  except Exception as e:
203
- node_id = await self.memory.stream.add_social(description=f"I can't find any friends to socialize with.")
236
+ node_id = await self.memory.stream.add_social(
237
+ description=f"I can't find any friends to socialize with."
238
+ )
204
239
  return {
205
- 'success': False,
206
- 'evaluation': f'Error in finding person: {str(e)}',
207
- 'consumed_time': 5,
208
- 'node_id': node_id
240
+ "success": False,
241
+ "evaluation": f"Error in finding person: {str(e)}",
242
+ "consumed_time": 5,
243
+ "node_id": node_id,
209
244
  }
210
245
 
246
+
211
247
  class MessageBlock(Block):
212
248
  """生成并发送消息"""
249
+
213
250
  def __init__(self, agent, llm: LLM, memory: Memory, simulator: Simulator):
214
251
  super().__init__("MessageBlock", llm=llm, memory=memory, simulator=simulator)
215
252
  self.agent = agent
@@ -235,44 +272,46 @@ class MessageBlock(Block):
235
272
  """
236
273
  self.to_discuss = []
237
274
 
238
- self.prompt_manager = MessagePromptManager(self.default_message_template, self.to_discuss)
275
+ self.prompt_manager = MessagePromptManager(
276
+ self.default_message_template, self.to_discuss
277
+ )
239
278
 
240
279
  def _serialize_message(self, message: str, propagation_count: int) -> str:
241
280
  try:
242
- return json.dumps({
243
- "content": message,
244
- "propagation_count": propagation_count
245
- }, ensure_ascii=False)
281
+ return json.dumps(
282
+ {"content": message, "propagation_count": propagation_count},
283
+ ensure_ascii=False,
284
+ )
246
285
  except Exception as e:
247
286
  logger.warning(f"Error serializing message: {e}")
248
287
  return message
249
288
 
250
- async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
289
+ async def forward(
290
+ self, step: Dict[str, Any], context: Optional[Dict] = None
291
+ ) -> Dict[str, Any]:
251
292
  try:
252
293
  # Get target from context or find one
253
- target = context.get('target') if context else None
294
+ target = context.get("target") if context else None
254
295
  if not target:
255
296
  result = await self.find_person_block.forward(step, context)
256
- if not result['success']:
297
+ if not result["success"]:
257
298
  return {
258
- 'success': False,
259
- 'evaluation': 'Could not find target for message',
260
- 'consumed_time': 5
299
+ "success": False,
300
+ "evaluation": "Could not find target for message",
301
+ "consumed_time": 5,
261
302
  }
262
- target = result['target']
263
-
303
+ target = result["target"]
304
+
264
305
  # Get formatted prompt using prompt manager
265
306
  formatted_prompt = await self.prompt_manager.get_prompt(
266
- self.memory,
267
- step,
268
- target
307
+ self.memory, step, target
269
308
  )
270
-
309
+
271
310
  # Generate message
272
311
  message = await self.llm.atext_request(formatted_prompt, timeout=300)
273
312
  if not message:
274
313
  message = "Hello! How are you?"
275
-
314
+
276
315
  # Update chat history with proper format
277
316
  chat_histories = await self.memory.status.get("chat_histories") or {}
278
317
  if not isinstance(chat_histories, dict):
@@ -282,32 +321,38 @@ class MessageBlock(Block):
282
321
  if chat_histories[target]:
283
322
  chat_histories[target] += ","
284
323
  chat_histories[target] += f"me: {message}"
285
-
324
+
286
325
  await self.memory.status.update("chat_histories", chat_histories)
287
-
326
+
288
327
  # Send message
289
328
  serialized_message = self._serialize_message(message, 1)
290
- node_id = await self.memory.stream.add_social(description=f"I sent a message to {target}: {message}")
329
+ node_id = await self.memory.stream.add_social(
330
+ description=f"I sent a message to {target}: {message}"
331
+ )
291
332
  return {
292
- 'success': True,
293
- 'evaluation': f'Sent message to {target}: {message}',
294
- 'consumed_time': 10,
295
- 'message': message,
296
- 'target': target,
297
- 'node_id': node_id
333
+ "success": True,
334
+ "evaluation": f"Sent message to {target}: {message}",
335
+ "consumed_time": 10,
336
+ "message": message,
337
+ "target": target,
338
+ "node_id": node_id,
298
339
  }
299
-
340
+
300
341
  except Exception as e:
301
- node_id = await self.memory.stream.add_social(description=f"I can't send a message to {target}")
342
+ node_id = await self.memory.stream.add_social(
343
+ description=f"I can't send a message to {target}"
344
+ )
302
345
  return {
303
- 'success': False,
304
- 'evaluation': f'Error in sending message: {str(e)}',
305
- 'consumed_time': 5,
306
- 'node_id': node_id
346
+ "success": False,
347
+ "evaluation": f"Error in sending message: {str(e)}",
348
+ "consumed_time": 5,
349
+ "node_id": node_id,
307
350
  }
308
-
351
+
352
+
309
353
  class SocialBlock(Block):
310
354
  """主社交模块"""
355
+
311
356
  find_person_block: FindPersonBlock
312
357
  message_block: MessageBlock
313
358
  noneblock: SocialNoneBlock
@@ -316,39 +361,45 @@ class SocialBlock(Block):
316
361
  super().__init__("SocialBlock", llm=llm, memory=memory, simulator=simulator)
317
362
  self.find_person_block = FindPersonBlock(llm, memory, simulator)
318
363
  self.message_block = MessageBlock(agent, llm, memory, simulator)
319
- self.noneblock=SocialNoneBlock(llm, memory)
364
+ self.noneblock = SocialNoneBlock(llm, memory)
320
365
  self.dispatcher = BlockDispatcher(llm)
321
366
 
322
367
  self.trigger_time = 0
323
368
  self.token_consumption = 0
324
-
325
- self.dispatcher.register_blocks([
326
- self.find_person_block,
327
- self.message_block,
328
- self.noneblock
329
- ])
330
369
 
331
- async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
370
+ self.dispatcher.register_blocks(
371
+ [self.find_person_block, self.message_block, self.noneblock]
372
+ )
373
+
374
+ async def forward(
375
+ self, step: Dict[str, Any], context: Optional[Dict] = None
376
+ ) -> Dict[str, Any]:
332
377
  try:
333
378
  self.trigger_time += 1
334
- consumption_start = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
379
+ consumption_start = (
380
+ self.llm.prompt_tokens_used + self.llm.completion_tokens_used
381
+ )
335
382
 
336
383
  # Select the appropriate sub-block using dispatcher
337
384
  selected_block = await self.dispatcher.dispatch(step)
338
-
385
+
339
386
  # Execute the selected sub-block and get the result
340
387
  result = await selected_block.forward(step, context)
341
-
342
- consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
388
+
389
+ consumption_end = (
390
+ self.llm.prompt_tokens_used + self.llm.completion_tokens_used
391
+ )
343
392
  self.token_consumption += consumption_end - consumption_start
344
393
 
345
394
  return result
346
395
 
347
396
  except:
348
- consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
397
+ consumption_end = (
398
+ self.llm.prompt_tokens_used + self.llm.completion_tokens_used
399
+ )
349
400
  self.token_consumption += consumption_end - consumption_start
350
401
  return {
351
- 'success': True,
352
- 'evaluation': 'Completed social interaction with default behavior',
353
- 'consumed_time': 15
402
+ "success": True,
403
+ "evaluation": "Completed social interaction with default behavior",
404
+ "consumed_time": 15,
354
405
  }
@@ -1,5 +1,5 @@
1
- import re
2
1
  import ast
2
+ import re
3
3
 
4
4
  TIME_ESTIMATE_PROMPT = """As an intelligent agent's time estimation system, please estimate the time needed to complete the current action based on the overall plan and current intention.
5
5
 
@@ -36,18 +36,24 @@ tao = 1
36
36
  period = 3
37
37
  UBI = 0
38
38
 
39
+
39
40
  def prettify_document(document: str) -> str:
40
41
  # Remove sequences of whitespace characters (including newlines)
41
- cleaned = re.sub(r'\s+', ' ', document).strip()
42
+ cleaned = re.sub(r"\s+", " ", document).strip()
42
43
  return cleaned
43
44
 
45
+
44
46
  def extract_dict_from_string(input_string):
45
47
  """
46
48
  提取输入字符串中的字典。支持跨行字典和嵌套字典。
47
49
  """
48
50
  # 正则表达式查找所有可能的字典部分,允许多行
49
- dict_pattern = r'\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\}' # 匹配字典的正则表达式,支持嵌套
50
- matches = re.findall(dict_pattern, input_string, re.DOTALL) # re.DOTALL允许匹配换行符
51
+ dict_pattern = (
52
+ r"\{(?:[^{}]|\{(?:[^{}]|\{[^{}]*\})*\})*\}" # 匹配字典的正则表达式,支持嵌套
53
+ )
54
+ matches = re.findall(
55
+ dict_pattern, input_string, re.DOTALL
56
+ ) # re.DOTALL允许匹配换行符
51
57
 
52
58
  dicts = []
53
59
 
@@ -62,7 +68,8 @@ def extract_dict_from_string(input_string):
62
68
 
63
69
  return dicts
64
70
 
71
+
65
72
  def clean_json_response(response: str) -> str:
66
73
  """清理LLM响应中的特殊字符"""
67
- response = response.replace('```json', '').replace('```', '')
68
- return response.strip()
74
+ response = response.replace("```json", "").replace("```", "")
75
+ return response.strip()
@@ -1,9 +1,8 @@
1
- import asyncio
2
1
  from typing import Optional
3
2
 
4
3
  import numpy as np
5
4
  from pycityagent import Simulator, InstitutionAgent
6
- from pycityagent.llm.llm import LLM
5
+ from pycityagent.llm import LLM
7
6
  from pycityagent.economy import EconomyClient
8
7
  from pycityagent.message import Messager
9
8
  from pycityagent.memory import Memory
@@ -19,6 +18,11 @@ class FirmAgent(InstitutionAgent):
19
18
  "max_price_inflation": 0.05,
20
19
  "max_wage_inflation": 0.05,
21
20
  }
21
+ fields_description = {
22
+ "time_diff": "Time difference between each forward, day * hour * minute * second",
23
+ "max_price_inflation": "Maximum price inflation rate",
24
+ "max_wage_inflation": "Maximum wage inflation rate",
25
+ }
22
26
 
23
27
  def __init__(
24
28
  self,
@@ -62,34 +66,14 @@ class FirmAgent(InstitutionAgent):
62
66
 
63
67
  async def forward(self):
64
68
  if await self.month_trigger():
65
- employees = await self.memory.status.get("employees")
66
- agents_forward = []
67
- if not np.all(np.array(agents_forward) > self.forward_times):
68
- return
69
- goods_demand = await self.gather_messages(employees, "goods_demand")
70
- goods_consumption = await self.gather_messages(
71
- employees, "goods_consumption"
72
- )
73
- print(
74
- f"goods_demand: {goods_demand}, goods_consumption: {goods_consumption}"
75
- )
76
- total_demand = sum(goods_demand)
77
- last_inventory = sum(goods_consumption) + await self.economy_client.get(
78
- self._agent_id, "inventory"
79
- )
80
- print(
81
- f"total_demand: {total_demand}, last_inventory: {last_inventory}, goods_contumption: {sum(goods_consumption)}"
82
- )
83
- max_change_rate = (total_demand - last_inventory) / (
84
- max(total_demand, last_inventory) + 1e-8
85
- )
86
- skills = await self.gather_messages(employees, "work_skill")
87
- for skill, uuid in zip(skills, employees):
88
- await self.send_message_to_agent(
89
- uuid,
90
- f"work_skill@{max(skill*(1 + np.random.uniform(0, max_change_rate*self.max_wage_inflation)), 1)}",
91
- "economy",
92
- )
69
+ employees = await self.economy_client.get(self._agent_id, "employees")
70
+ total_demand = await self.economy_client.get(self._agent_id, "demand")
71
+ goods_consumption = await self.economy_client.get(self._agent_id, "sales")
72
+ last_inventory = goods_consumption + await self.economy_client.get(self._agent_id, "inventory")
73
+ max_change_rate = (total_demand - last_inventory) / (max(total_demand, last_inventory) + 1e-8)
74
+ skills = np.array(await self.economy_client.get(employees, "skill"))
75
+ skill_change_ratio = np.random.uniform(0, max_change_rate*self.max_wage_inflation)
76
+ await self.economy_client.update(employees, "skill", list(np.maximum(skills*(1 + skill_change_ratio), 1)))
93
77
  price = await self.economy_client.get(self._agent_id, "price")
94
78
  await self.economy_client.update(
95
79
  self._agent_id,
@@ -105,8 +89,6 @@ class FirmAgent(InstitutionAgent):
105
89
  1,
106
90
  ),
107
91
  )
108
- self.forward_times += 1
109
- for uuid in employees:
110
- await self.send_message_to_agent(
111
- uuid, f"firm_forward@{self.forward_times}", "economy"
112
- )
92
+
93
+ await self.economy_client.update(self._agent_id, 'demand', 0)
94
+ await self.economy_client.update(self._agent_id, 'sales', 0)
@@ -20,7 +20,7 @@ class GovernmentAgent(InstitutionAgent):
20
20
  simulator: Optional[Simulator] = None,
21
21
  memory: Optional[Memory] = None,
22
22
  economy_client: Optional[EconomyClient] = None,
23
- messager: Optional[Messager] = None,
23
+ messager: Optional[Messager] = None,# type:ignore
24
24
  avro_file: Optional[dict] = None,
25
25
  ) -> None:
26
26
  super().__init__(
@@ -42,12 +42,12 @@ class GovernmentAgent(InstitutionAgent):
42
42
  if self.last_time_trigger is None:
43
43
  self.last_time_trigger = now_time
44
44
  return False
45
- if now_time - self.last_time_trigger >= self.time_diff:
45
+ if now_time - self.last_time_trigger >= self.time_diff:# type:ignore
46
46
  self.last_time_trigger = now_time
47
47
  return True
48
48
  return False
49
49
 
50
- async def gather_messages(self, agent_ids, content):
50
+ async def gather_messages(self, agent_ids, content):# type:ignore
51
51
  infos = await super().gather_messages(agent_ids, content)
52
52
  return [info["content"] for info in infos]
53
53