pycityagent 2.0.0a47__cp39-cp39-macosx_11_0_arm64.whl → 2.0.0a49__cp39-cp39-macosx_11_0_arm64.whl
Sign up to get free protection for your applications and to get access to all the features.
- pycityagent/__init__.py +3 -2
- pycityagent/agent.py +109 -4
- pycityagent/cityagent/__init__.py +20 -0
- pycityagent/cityagent/bankagent.py +54 -0
- pycityagent/cityagent/blocks/__init__.py +20 -0
- pycityagent/cityagent/blocks/cognition_block.py +304 -0
- pycityagent/cityagent/blocks/dispatcher.py +78 -0
- pycityagent/cityagent/blocks/economy_block.py +356 -0
- pycityagent/cityagent/blocks/mobility_block.py +258 -0
- pycityagent/cityagent/blocks/needs_block.py +305 -0
- pycityagent/cityagent/blocks/other_block.py +103 -0
- pycityagent/cityagent/blocks/plan_block.py +309 -0
- pycityagent/cityagent/blocks/social_block.py +345 -0
- pycityagent/cityagent/blocks/time_block.py +116 -0
- pycityagent/cityagent/blocks/utils.py +66 -0
- pycityagent/cityagent/firmagent.py +75 -0
- pycityagent/cityagent/governmentagent.py +60 -0
- pycityagent/cityagent/initial.py +98 -0
- pycityagent/cityagent/memory_config.py +202 -0
- pycityagent/cityagent/nbsagent.py +92 -0
- pycityagent/cityagent/societyagent.py +291 -0
- pycityagent/memory/memory.py +0 -18
- pycityagent/message/messager.py +6 -3
- pycityagent/simulation/agentgroup.py +123 -37
- pycityagent/simulation/simulation.py +311 -316
- pycityagent/workflow/block.py +66 -1
- pycityagent/workflow/tool.py +9 -4
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/METADATA +2 -2
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/RECORD +33 -14
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/LICENSE +0 -0
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/WHEEL +0 -0
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/entry_points.txt +0 -0
- {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a49.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,305 @@
|
|
1
|
+
import json
|
2
|
+
from pycityagent import Simulator
|
3
|
+
from pycityagent.memory.memory import Memory
|
4
|
+
from pycityagent.llm.llm import LLM
|
5
|
+
from pycityagent.workflow.block import Block
|
6
|
+
from pycityagent.workflow.prompt import FormatPrompt
|
7
|
+
import logging
|
8
|
+
logger = logging.getLogger("pycityagent")
|
9
|
+
|
10
|
+
INITIAL_NEEDS_PROMPT = """You are an intelligent agent needs initialization system. Based on the profile information below, please help initialize the agent's needs and related parameters.
|
11
|
+
|
12
|
+
Profile Information:
|
13
|
+
- Gender: {gender}
|
14
|
+
- Education: {education}
|
15
|
+
- Consumption Level: {consumption}
|
16
|
+
- Occupation: {occupation}
|
17
|
+
- Age: {age}
|
18
|
+
- Monthly Income: {income}
|
19
|
+
- Race: {race}
|
20
|
+
- Religion: {religion}
|
21
|
+
- Skills: {skill}
|
22
|
+
|
23
|
+
Current Time: {now_time}
|
24
|
+
|
25
|
+
Please initialize the agent's needs and parameters based on the profile above. Return the values in JSON format with the following structure:
|
26
|
+
|
27
|
+
1. Current needs satisfaction levels (0-1 float values, lower means less satisfied):
|
28
|
+
- hungry: Hunger satisfaction level (Normally, the agent will be more hungry at eating time)
|
29
|
+
- tired: Fatigue level (Normally, at night, the agent will be more tired)
|
30
|
+
- safe: Safety satisfaction level (Normally, the agent will be more safe when they have high income and currency)
|
31
|
+
- social: Social satisfaction level
|
32
|
+
|
33
|
+
2. Natural decay rates per hour (0-1 float values):
|
34
|
+
- alpha_H: Hunger satisfaction decay rate
|
35
|
+
- alpha_D: Fatigue decay rate
|
36
|
+
- alpha_P: Safety satisfaction decay rate
|
37
|
+
- alpha_C: Social satisfaction decay rate
|
38
|
+
|
39
|
+
3. Threshold values (0-1 float values, below which the agent will try to improve):
|
40
|
+
- T_H: Hunger satisfaction threshold
|
41
|
+
- T_D: Fatigue threshold
|
42
|
+
- T_S: Safety threshold
|
43
|
+
- T_C: Social threshold
|
44
|
+
|
45
|
+
Example response format (Do not return any other text):
|
46
|
+
{{
|
47
|
+
"current_needs": {{
|
48
|
+
"hungry": 0.8,
|
49
|
+
"tired": 0.7,
|
50
|
+
"safe": 0.9,
|
51
|
+
"social": 0.6
|
52
|
+
}},
|
53
|
+
"decay_rates": {{
|
54
|
+
"alpha_H": 0.2,
|
55
|
+
"alpha_D": 0.08,
|
56
|
+
"alpha_P": 0.05,
|
57
|
+
"alpha_C": 0.03
|
58
|
+
}},
|
59
|
+
"thresholds": {{
|
60
|
+
"T_H": 0.4,
|
61
|
+
"T_D": 0.2,
|
62
|
+
"T_S": 0.2,
|
63
|
+
"T_C": 0.2
|
64
|
+
}}
|
65
|
+
}}
|
66
|
+
"""
|
67
|
+
|
68
|
+
EVALUATION_PROMPT = """You are an evaluation system for an intelligent agent. The agent has performed the following actions to satisfy the {current_need} need:
|
69
|
+
|
70
|
+
Goal: {plan_target}
|
71
|
+
Execution steps:
|
72
|
+
{evaluation_results}
|
73
|
+
|
74
|
+
Current needs status: {current_needs}
|
75
|
+
|
76
|
+
Please evaluate and adjust the value of {current_need} need based on the execution results above.
|
77
|
+
|
78
|
+
Notes:
|
79
|
+
1. Need values range from 0-1, where:
|
80
|
+
- 1 means the need is fully satisfied
|
81
|
+
- 0 means the need is completely unsatisfied
|
82
|
+
- Higher values indicate greater need satisfaction
|
83
|
+
2. If the current need is not "whatever", only return the new value for the current need. Otherwise, return both safe and social need values.
|
84
|
+
3. Ensure the return value is in valid JSON format, examples below:
|
85
|
+
|
86
|
+
Example response format for specific need (hungry here) adjustment (Do not return any other text):
|
87
|
+
{{
|
88
|
+
"hungry": new_need_value
|
89
|
+
}}
|
90
|
+
|
91
|
+
Example response format for whatever need adjustment (Do not return any other text):
|
92
|
+
{{
|
93
|
+
"safe": new_safe_value,
|
94
|
+
"social": new_social_value
|
95
|
+
}}
|
96
|
+
"""
|
97
|
+
|
98
|
+
class NeedsBlock(Block):
|
99
|
+
"""
|
100
|
+
Generate needs
|
101
|
+
"""
|
102
|
+
def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
|
103
|
+
super().__init__("NeedsBlock", llm, memory, simulator)
|
104
|
+
self.evaluation_prompt = FormatPrompt(EVALUATION_PROMPT)
|
105
|
+
self.initial_prompt = FormatPrompt(INITIAL_NEEDS_PROMPT)
|
106
|
+
self.last_evaluation_time = None
|
107
|
+
self.trigger_time = 0
|
108
|
+
self.token_consumption = 0
|
109
|
+
self.initialized = False
|
110
|
+
self.alpha_H, self.alpha_D, self.alpha_P, self.alpha_C = 0.2, 0.08, 0.05, 0.03 # 饥饿感与疲劳感自然衰减速率
|
111
|
+
self.T_H, self.T_D, self.T_P, self.T_C = 0.4, 0.2, 0.2, 0.2 # 饥饿感与疲劳感临界值
|
112
|
+
|
113
|
+
async def initialize(self):
|
114
|
+
self.initial_prompt.format(
|
115
|
+
gender=await self.memory.get("gender"),
|
116
|
+
education=await self.memory.get("education"),
|
117
|
+
consumption=await self.memory.get("consumption"),
|
118
|
+
occupation=await self.memory.get("occupation"),
|
119
|
+
age=await self.memory.get("age"),
|
120
|
+
income=await self.memory.get("income"),
|
121
|
+
race=await self.memory.get("race"),
|
122
|
+
religion=await self.memory.get("religion"),
|
123
|
+
skill=await self.memory.get("skill"),
|
124
|
+
now_time=await self.simulator.get_time(format_time=True)
|
125
|
+
)
|
126
|
+
response = await self.llm.atext_request(
|
127
|
+
self.initial_prompt.to_dialog()
|
128
|
+
)
|
129
|
+
response = self.clean_json_response(response)
|
130
|
+
logger.info(f"Needs Initialization: {response}")
|
131
|
+
try:
|
132
|
+
needs = json.loads(response)
|
133
|
+
await self.memory.update("needs", needs["current_needs"])
|
134
|
+
self.alpha_H, self.alpha_D, self.alpha_P, self.alpha_C = needs["decay_rates"].values()
|
135
|
+
self.T_H, self.T_D, self.T_P, self.T_C = needs["thresholds"].values()
|
136
|
+
except json.JSONDecodeError:
|
137
|
+
logger.warning(f"初始化响应不是有效的JSON格式: {response}")
|
138
|
+
self.initialized = True
|
139
|
+
|
140
|
+
async def forward(self):
|
141
|
+
self.trigger_time += 1
|
142
|
+
consumption_start = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
143
|
+
|
144
|
+
if not self.initialized:
|
145
|
+
await self.initialize()
|
146
|
+
|
147
|
+
# 计算时间差
|
148
|
+
time_now = await self.simulator.get_time()
|
149
|
+
if self.last_evaluation_time is None:
|
150
|
+
self.last_evaluation_time = time_now
|
151
|
+
time_diff = 0
|
152
|
+
else:
|
153
|
+
time_diff = (time_now - self.last_evaluation_time)/3600
|
154
|
+
self.last_evaluation_time = time_now
|
155
|
+
|
156
|
+
# 获取当前需求
|
157
|
+
needs = await self.memory.get("needs")
|
158
|
+
|
159
|
+
# 根据经过的时间计算饥饿与疲劳的衰减
|
160
|
+
hungry_decay = self.alpha_H * time_diff
|
161
|
+
tired_decay = self.alpha_D * time_diff
|
162
|
+
safe_decay = self.alpha_P * time_diff
|
163
|
+
social_decay = self.alpha_C * time_diff
|
164
|
+
hungry = max(0, needs["hungry"] - hungry_decay)
|
165
|
+
tired = max(0, needs["tired"] - tired_decay)
|
166
|
+
safe = max(0, needs["safe"] - safe_decay)
|
167
|
+
social = max(0, needs["social"] - social_decay)
|
168
|
+
needs["hungry"] = hungry
|
169
|
+
needs["tired"] = tired
|
170
|
+
needs["safe"] = safe
|
171
|
+
needs["social"] = social
|
172
|
+
|
173
|
+
await self.memory.update("needs", needs)
|
174
|
+
|
175
|
+
# 判断当前是否有正在执行的plan
|
176
|
+
current_plan = await self.memory.get("current_plan")
|
177
|
+
if current_plan and current_plan.get("completed"):
|
178
|
+
# 评估计划执行过程并调整需求
|
179
|
+
await self.evaluate_and_adjust_needs(current_plan)
|
180
|
+
# 将完成的计划添加到历史记录
|
181
|
+
history = await self.memory.get("plan_history")
|
182
|
+
history.append(current_plan)
|
183
|
+
await self.memory.update("plan_history", history)
|
184
|
+
await self.memory.update("current_plan", None)
|
185
|
+
await self.memory.update("current_step", {"intention": "", "type": ""})
|
186
|
+
await self.memory.update("execution_context", {})
|
187
|
+
|
188
|
+
needs = await self.memory.get("needs")
|
189
|
+
hungry = needs["hungry"]
|
190
|
+
tired = needs["tired"]
|
191
|
+
safe = needs["safe"]
|
192
|
+
social = needs["social"]
|
193
|
+
logger.info(f"Time elapsed: {time_diff:.2f} hours")
|
194
|
+
logger.info(f"Current state - Hungry: {hungry:.2f}, Tired: {tired:.2f}, Safe: {safe:.2f}, Social: {social:.2f}")
|
195
|
+
|
196
|
+
# 如果需要调整需求,更新当前需求
|
197
|
+
# 调整方案为,如果当前的需求为空,或有更高级的需求出现,则调整需求
|
198
|
+
current_need = await self.memory.get("current_need")
|
199
|
+
|
200
|
+
# 当前没有计划或计划已执行完毕,获取所有需求值,按优先级检查各需求是否达到阈值
|
201
|
+
if not current_plan or current_plan.get("completed"):
|
202
|
+
# 按优先级顺序检查需求
|
203
|
+
if hungry <= self.T_H:
|
204
|
+
await self.memory.update("current_need", "hungry")
|
205
|
+
logger.info("Needs adjusted: Hungry")
|
206
|
+
elif tired <= self.T_D:
|
207
|
+
await self.memory.update("current_need", "tired")
|
208
|
+
logger.info("Needs adjusted: Tired")
|
209
|
+
elif safe <= self.T_P:
|
210
|
+
await self.memory.update("current_need", "safe")
|
211
|
+
logger.info("Needs adjusted: Safe")
|
212
|
+
elif social <= self.T_C:
|
213
|
+
await self.memory.update("current_need", "social")
|
214
|
+
logger.info("Needs adjusted: Social")
|
215
|
+
else:
|
216
|
+
await self.memory.update("current_need", "whatever")
|
217
|
+
logger.info("Needs adjusted: Whatever")
|
218
|
+
else:
|
219
|
+
# 有正在执行的计划时,只在出现更高优先级需求时调整
|
220
|
+
needs_changed = False
|
221
|
+
new_need = None
|
222
|
+
if hungry <= self.T_H and current_need not in ["hungry", "tired"]:
|
223
|
+
new_need = "hungry"
|
224
|
+
logger.info("Higher priority need detected, adjusted to: Hungry")
|
225
|
+
needs_changed = True
|
226
|
+
elif tired <= self.T_D and current_need not in ["hungry", "tired"]:
|
227
|
+
new_need = "tired"
|
228
|
+
logger.info("Higher priority need detected, adjusted to: Tired")
|
229
|
+
needs_changed = True
|
230
|
+
elif safe <= self.T_P and current_need not in ["hungry", "tired", "safe"]:
|
231
|
+
new_need = "safe"
|
232
|
+
logger.info("Higher priority need detected, adjusted to: Safe")
|
233
|
+
needs_changed = True
|
234
|
+
elif social <= self.T_C and current_need not in ["hungry", "tired", "safe", "social"]:
|
235
|
+
new_need = "social"
|
236
|
+
logger.info("Higher priority need detected, adjusted to: Social")
|
237
|
+
needs_changed = True
|
238
|
+
|
239
|
+
# 如果需求发生变化,中断当前计划
|
240
|
+
if needs_changed:
|
241
|
+
await self.evaluate_and_adjust_needs(current_plan)
|
242
|
+
history = await self.memory.get("plan_history")
|
243
|
+
history.append(current_plan)
|
244
|
+
await self.memory.update("current_need", new_need)
|
245
|
+
await self.memory.update("plan_history", history)
|
246
|
+
await self.memory.update("current_plan", None)
|
247
|
+
await self.memory.update("current_step", {"intention": "", "type": ""})
|
248
|
+
await self.memory.update("execution_context", {})
|
249
|
+
logger.info("----Agent's plan has been interrupted due to need change----")
|
250
|
+
|
251
|
+
consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
252
|
+
self.token_consumption += consumption_end - consumption_start
|
253
|
+
|
254
|
+
async def evaluate_and_adjust_needs(self, completed_plan):
|
255
|
+
# 获取执行的计划和评估结果
|
256
|
+
evaluation_results = []
|
257
|
+
for step in completed_plan["steps"]:
|
258
|
+
if 'evaluation' in step['evaluation']:
|
259
|
+
eva_ = step['evaluation']['evaluation']
|
260
|
+
else:
|
261
|
+
eva_ = 'Plan interrupted, not completed'
|
262
|
+
evaluation_results.append(f"- {step['intention']} ({step['type']}): {eva_}")
|
263
|
+
evaluation_results = "\n".join(evaluation_results)
|
264
|
+
|
265
|
+
# 使用 LLM 进行评估
|
266
|
+
current_need = await self.memory.get("current_need")
|
267
|
+
self.evaluation_prompt.format(
|
268
|
+
current_need=current_need,
|
269
|
+
plan_target=completed_plan["target"],
|
270
|
+
evaluation_results=evaluation_results,
|
271
|
+
current_needs=await self.memory.get("needs")
|
272
|
+
)
|
273
|
+
|
274
|
+
response = await self.llm.atext_request(
|
275
|
+
self.evaluation_prompt.to_dialog()
|
276
|
+
)
|
277
|
+
|
278
|
+
try:
|
279
|
+
logger.info("\n=== Needs Evaluation ===")
|
280
|
+
logger.info(f"Evaluating need: {current_need}")
|
281
|
+
logger.info(f"Executing plan: {completed_plan['target']}")
|
282
|
+
logger.info("Execution results:")
|
283
|
+
logger.info(evaluation_results)
|
284
|
+
|
285
|
+
new_needs = json.loads(self.clean_json_response(response)) # type: ignore
|
286
|
+
# 更新所有需求的数值
|
287
|
+
needs = await self.memory.get("needs")
|
288
|
+
logger.info(f"\nNeeds value adjustment:")
|
289
|
+
for need_type, new_value in new_needs.items():
|
290
|
+
if need_type in needs:
|
291
|
+
old_value = needs[need_type]
|
292
|
+
needs[need_type] = new_value
|
293
|
+
logger.info(f"- {need_type}: {old_value} -> {new_value}")
|
294
|
+
await self.memory.update("needs", needs)
|
295
|
+
logger.info("===============\n")
|
296
|
+
except json.JSONDecodeError:
|
297
|
+
logger.warning(f"Evaluation response is not a valid JSON format: {response}")
|
298
|
+
except Exception as e:
|
299
|
+
logger.warning(f"Error processing evaluation response: {str(e)}")
|
300
|
+
logger.warning(f"Original response: {response}")
|
301
|
+
|
302
|
+
def clean_json_response(self, response: str) -> str:
|
303
|
+
"""清理LLM响应中的特殊字符"""
|
304
|
+
response = response.replace('```json', '').replace('```', '')
|
305
|
+
return response.strip()
|
@@ -0,0 +1,103 @@
|
|
1
|
+
import json
|
2
|
+
import random
|
3
|
+
|
4
|
+
from .dispatcher import BlockDispatcher
|
5
|
+
from pycityagent.llm.llm import LLM
|
6
|
+
from pycityagent.workflow.block import Block
|
7
|
+
from pycityagent.memory import Memory
|
8
|
+
from pycityagent.workflow.prompt import FormatPrompt
|
9
|
+
from .utils import clean_json_response, TIME_ESTIMATE_PROMPT
|
10
|
+
import logging
|
11
|
+
logger = logging.getLogger("pycityagent")
|
12
|
+
|
13
|
+
class SleepBlock(Block):
|
14
|
+
def __init__(self, llm: LLM, memory: Memory):
|
15
|
+
super().__init__("SleepBlock", llm, memory)
|
16
|
+
self.description = "Sleep"
|
17
|
+
self.guidance_prompt = FormatPrompt(template=TIME_ESTIMATE_PROMPT)
|
18
|
+
|
19
|
+
async def forward(self, step, context):
|
20
|
+
self.guidance_prompt.format(
|
21
|
+
plan=context['plan'],
|
22
|
+
intention=step['intention']
|
23
|
+
)
|
24
|
+
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
25
|
+
result = clean_json_response(result)
|
26
|
+
try:
|
27
|
+
result = json.loads(result)
|
28
|
+
return {
|
29
|
+
'success': True,
|
30
|
+
'evaluation': f'Sleep: {step["intention"]}',
|
31
|
+
'consumed_time': result['time']
|
32
|
+
}
|
33
|
+
except Exception as e:
|
34
|
+
logger.warning(f"解析时间评估响应时发生错误: {str(e)}, 原始结果: {result}")
|
35
|
+
return {
|
36
|
+
'success': True,
|
37
|
+
'evaluation': f'Sleep: {step["intention"]}',
|
38
|
+
'consumed_time': random.randint(1, 10)*60
|
39
|
+
}
|
40
|
+
|
41
|
+
class OtherNoneBlock(Block):
|
42
|
+
"""
|
43
|
+
空操作
|
44
|
+
OtherNoneBlock
|
45
|
+
"""
|
46
|
+
def __init__(self, llm: LLM, memory: Memory):
|
47
|
+
super().__init__("OtherNoneBlock", llm, memory)
|
48
|
+
self.description = "Used to handle other cases"
|
49
|
+
self.guidance_prompt = FormatPrompt(template=TIME_ESTIMATE_PROMPT)
|
50
|
+
|
51
|
+
async def forward(self, step, context):
|
52
|
+
self.guidance_prompt.format(
|
53
|
+
plan=context['plan'],
|
54
|
+
intention=step['intention']
|
55
|
+
)
|
56
|
+
result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
|
57
|
+
result = clean_json_response(result)
|
58
|
+
try:
|
59
|
+
result = json.loads(result)
|
60
|
+
return {
|
61
|
+
'success': True,
|
62
|
+
'evaluation': f'Finished executing {step["intention"]}',
|
63
|
+
'consumed_time': result['time']
|
64
|
+
}
|
65
|
+
except Exception as e:
|
66
|
+
logger.warning(f"解析时间评估响应时发生错误: {str(e)}, 原始结果: {result}")
|
67
|
+
return {
|
68
|
+
'success': True,
|
69
|
+
'evaluation': f'Finished executing {step["intention"]}',
|
70
|
+
'consumed_time': random.randint(1, 180)
|
71
|
+
}
|
72
|
+
|
73
|
+
|
74
|
+
class OtherBlock(Block):
|
75
|
+
sleep_block: SleepBlock
|
76
|
+
other_none_block: OtherNoneBlock
|
77
|
+
|
78
|
+
def __init__(self, llm: LLM, memory: Memory):
|
79
|
+
super().__init__("OtherBlock", llm, memory)
|
80
|
+
# 初始化所有块
|
81
|
+
self.sleep_block = SleepBlock(llm, memory)
|
82
|
+
self.other_none_block = OtherNoneBlock(llm, memory)
|
83
|
+
self.trigger_time = 0
|
84
|
+
self.token_consumption = 0
|
85
|
+
# 初始化调度器
|
86
|
+
self.dispatcher = BlockDispatcher(llm)
|
87
|
+
# 注册所有块
|
88
|
+
self.dispatcher.register_blocks([self.sleep_block, self.other_none_block])
|
89
|
+
|
90
|
+
async def forward(self, step, context):
|
91
|
+
self.trigger_time += 1
|
92
|
+
consumption_start = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
93
|
+
|
94
|
+
# Select the appropriate sub-block using dispatcher
|
95
|
+
selected_block = await self.dispatcher.dispatch(step)
|
96
|
+
|
97
|
+
# Execute the selected sub-block and get the result
|
98
|
+
result = await selected_block.forward(step, context) # type: ignore
|
99
|
+
|
100
|
+
consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
|
101
|
+
self.token_consumption += consumption_end - consumption_start
|
102
|
+
|
103
|
+
return result
|