pycityagent 2.0.0a47__cp312-cp312-macosx_11_0_arm64.whl → 2.0.0a48__cp312-cp312-macosx_11_0_arm64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (33) hide show
  1. pycityagent/__init__.py +3 -2
  2. pycityagent/agent.py +109 -4
  3. pycityagent/cityagent/__init__.py +20 -0
  4. pycityagent/cityagent/bankagent.py +54 -0
  5. pycityagent/cityagent/blocks/__init__.py +20 -0
  6. pycityagent/cityagent/blocks/cognition_block.py +304 -0
  7. pycityagent/cityagent/blocks/dispatcher.py +78 -0
  8. pycityagent/cityagent/blocks/economy_block.py +356 -0
  9. pycityagent/cityagent/blocks/mobility_block.py +258 -0
  10. pycityagent/cityagent/blocks/needs_block.py +305 -0
  11. pycityagent/cityagent/blocks/other_block.py +103 -0
  12. pycityagent/cityagent/blocks/plan_block.py +309 -0
  13. pycityagent/cityagent/blocks/social_block.py +345 -0
  14. pycityagent/cityagent/blocks/time_block.py +116 -0
  15. pycityagent/cityagent/blocks/utils.py +66 -0
  16. pycityagent/cityagent/firmagent.py +75 -0
  17. pycityagent/cityagent/governmentagent.py +60 -0
  18. pycityagent/cityagent/initial.py +98 -0
  19. pycityagent/cityagent/memory_config.py +202 -0
  20. pycityagent/cityagent/nbsagent.py +92 -0
  21. pycityagent/cityagent/societyagent.py +291 -0
  22. pycityagent/memory/memory.py +0 -18
  23. pycityagent/message/messager.py +6 -3
  24. pycityagent/simulation/agentgroup.py +118 -37
  25. pycityagent/simulation/simulation.py +311 -316
  26. pycityagent/workflow/block.py +66 -1
  27. pycityagent/workflow/tool.py +15 -11
  28. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/METADATA +2 -2
  29. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/RECORD +33 -14
  30. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/LICENSE +0 -0
  31. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/WHEEL +0 -0
  32. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/entry_points.txt +0 -0
  33. {pycityagent-2.0.0a47.dist-info → pycityagent-2.0.0a48.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,309 @@
1
+ import json
2
+ import random
3
+ from typing import Dict, List
4
+ from pycityagent.environment.simulator import Simulator
5
+ from pycityagent.workflow import Block
6
+ from pycityagent.llm import LLM
7
+ from pycityagent.memory import Memory
8
+ from pycityagent.workflow.prompt import FormatPrompt
9
+ import logging
10
+
11
+ logger = logging.getLogger("pycityagent")
12
+
13
+ GUIDANCE_SELECTION_PROMPT = """As an intelligent agent's decision system, please select the most suitable option from the following choices to satisfy the current need.
14
+ The Environment will influence the choice of steps.
15
+
16
+ Current need: Need to satisfy {current_need}
17
+ Available options: {options}
18
+ Current location: {current_location}
19
+ Current time: {current_time}
20
+ Current Environment: {environment}
21
+
22
+ Please evaluate and select the most appropriate option based on these three dimensions:
23
+ 1. Attitude: Personal preference and evaluation of the option
24
+ 2. Subjective Norm: Social environment and others' views on this behavior
25
+ 3. Perceived Control: Difficulty and controllability of executing this option
26
+
27
+ Please return the evaluation results in JSON format (Do not return any other text):
28
+ {{
29
+ "selected_option": "Select the most suitable option from available choices",
30
+ "evaluation": {{
31
+ "attitude": "Attitude score for the option (0-1)",
32
+ "subjective_norm": "Subjective norm score (0-1)",
33
+ "perceived_control": "Perceived control score (0-1)",
34
+ "reasoning": "Specific reasons for selecting this option"
35
+ }}
36
+ }}
37
+ """
38
+
39
+ DETAILED_PLAN_PROMPT = """Generate specific execution steps based on the selected guidance plan. The Environment will influence the choice of steps.
40
+
41
+ Selected plan: {selected_option}
42
+ Current location: {current_location}
43
+ Current time: {current_time}
44
+ Current Environment: {environment}
45
+
46
+ Please generate specific execution steps and return in JSON format:
47
+ {{
48
+ "plan": {{
49
+ "target": "Specific goal",
50
+ "steps": [
51
+ {{
52
+ "intention": "Specific intention",
53
+ "type": "Step type"
54
+ }}
55
+ ]
56
+ }}
57
+ }}
58
+
59
+ Notes:
60
+ 1. type can only be one of these four: mobility, social, economy, other
61
+ 1.1 mobility: Decisions or behaviors related to large-scale spatial movement, such as location selection, going to a place, etc.
62
+ 1.2 social: Decisions or behaviors related to social interaction, such as finding contacts, chatting with friends, etc.
63
+ 1.3 economy: Decisions or behaviors related to shopping, work, etc.
64
+ 1.4 other: Other types of decisions or behaviors, such as small-scale activities, learning, resting, entertainment, etc.
65
+ 2. steps should only include steps necessary to fulfill the target (limited to {max_plan_steps} steps)
66
+ 3. intention in each step should be concise and clear
67
+
68
+ Example outputs (Do not return any other text):
69
+ {{
70
+ "plan": {{
71
+ "target": "Eat at home",
72
+ "steps": [
73
+ {{
74
+ "intention": "Return home from current location",
75
+ "type": "mobility"
76
+ }},
77
+ {{
78
+ "intention": "Cook food",
79
+ "type": "other"
80
+ }},
81
+ {{
82
+ "intention": "Have meal",
83
+ "type": "other"
84
+ }}
85
+ ]
86
+ }}
87
+ }}
88
+
89
+ {{
90
+ "plan": {{
91
+ "target": "Eat outside",
92
+ "steps": [
93
+ {{
94
+ "intention": "Select restaurant",
95
+ "type": "mobility"
96
+ }},
97
+ {{
98
+ "intention": "Go to restaurant",
99
+ "type": "mobility"
100
+ }},
101
+ {{
102
+ "intention": "Order food",
103
+ "type": "economy"
104
+ }},
105
+ {{
106
+ "intention": "Have meal",
107
+ "type": "other"
108
+ }}
109
+ ]
110
+ }}
111
+ }}
112
+
113
+ {{
114
+ "plan": {{
115
+ "target": "Offline social",
116
+ "steps": [
117
+ {{
118
+ "intention": "Contact friends to arrange meeting place",
119
+ "type": "social"
120
+ }},
121
+ {{
122
+ "intention": "Go to meeting place",
123
+ "type": "mobility"
124
+ }},
125
+ {{
126
+ "intention": "Chat with friends",
127
+ "type": "social"
128
+ }}
129
+ ]
130
+ }}
131
+ }}
132
+
133
+ {{
134
+ "plan": {{
135
+ "target": "Work",
136
+ "steps": [
137
+ {{
138
+ "intention": "Go to workplace",
139
+ "type": "mobility"
140
+ }},
141
+ {{
142
+ "intention": "Work",
143
+ "type": "other"
144
+ }}
145
+ ]
146
+ }}
147
+ }}
148
+ """
149
+
150
+ class PlanBlock(Block):
151
+ configurable_fields: List[str] = ["guidance_options", "max_plan_steps"]
152
+ default_values = {
153
+ "guidance_options": {
154
+ "hungry": ['Eat at home', 'Eat outside'],
155
+ "tired": ['Sleep', 'Take a nap'],
156
+ "safe": ['Work'],
157
+ "social": ['Online social', 'Shopping'],
158
+ "whatever": ['Learning', 'Entertainment', 'Hang out', 'Exercise']
159
+ },
160
+ "max_plan_steps": 6
161
+ }
162
+
163
+ def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
164
+ super().__init__("PlanBlock", llm, memory, simulator)
165
+ self.guidance_prompt = FormatPrompt(template=GUIDANCE_SELECTION_PROMPT)
166
+ self.detail_prompt = FormatPrompt(template=DETAILED_PLAN_PROMPT)
167
+ self.trigger_time = 0
168
+ self.token_consumption = 0
169
+
170
+ # configurable fields
171
+ self.guidance_options = {
172
+ "hungry": ['Eat at home', 'Eat outside'],
173
+ "tired": ['Sleep', 'Take a nap'],
174
+ "safe": ['Work'],
175
+ "social": ['Online social', 'Shopping'],
176
+ "whatever": ['Learning', 'Entertainment', 'Hang out', 'Exercise']
177
+ }
178
+ self.max_plan_steps = 6
179
+
180
+ async def select_guidance(self, current_need: str) -> Dict:
181
+ """选择指导方案"""
182
+ options = self.guidance_options.get(current_need, [])
183
+ if not options:
184
+ return None # type: ignore–
185
+
186
+ position_now = await self.memory.get("position")
187
+ home_location = await self.memory.get("home")
188
+ work_location = await self.memory.get("work")
189
+ current_location = "Out"
190
+ if 'aoi_position' in position_now and position_now['aoi_position'] == home_location['aoi_position']:
191
+ current_location = "At home"
192
+ elif 'aoi_position' in position_now and position_now['aoi_position'] == work_location['aoi_position']:
193
+ current_location = "At workplace"
194
+ current_time = await self.simulator.get_time(format_time=True)
195
+ environment = await self.memory.get("environment")
196
+ self.guidance_prompt.format(
197
+ current_need=current_need,
198
+ options=options,
199
+ current_location=current_location,
200
+ current_time=current_time,
201
+ environment=environment
202
+ )
203
+
204
+ response = await self.llm.atext_request(
205
+ self.guidance_prompt.to_dialog()
206
+ ) # type: ignore
207
+
208
+ try:
209
+ result = json.loads(self.clean_json_response(response)) # type: ignore
210
+ if result['selected_option'] not in options:
211
+ result['selected_option'] = random.choice(options)
212
+ logger.info(f"\n=== Plan Selection ===")
213
+ logger.info(f"Selected Plan: {result['selected_option']}")
214
+ return result
215
+ except Exception as e:
216
+ logger.warning(f"Error parsing guidance selection response: {str(e)}")
217
+ return None # type: ignore
218
+
219
+ async def generate_detailed_plan(self, current_need: str, selected_option: str) -> Dict:
220
+ """生成具体执行计划"""
221
+ position_now = await self.memory.get("position")
222
+ home_location = await self.memory.get("home")
223
+ work_location = await self.memory.get("work")
224
+ current_location = "Out"
225
+ if 'aoi_position' in position_now and position_now['aoi_position'] == home_location['aoi_position']:
226
+ current_location = "At home"
227
+ elif 'aoi_position' in position_now and position_now['aoi_position'] == work_location['aoi_position']:
228
+ current_location = "At workplace"
229
+ current_time = await self.simulator.get_time(format_time=True)
230
+ environment = await self.memory.get("environment")
231
+ self.detail_prompt.format(
232
+ selected_option=selected_option,
233
+ current_location=current_location,
234
+ current_time=current_time,
235
+ environment=environment,
236
+ max_plan_steps=self.max_plan_steps
237
+ )
238
+
239
+ response = await self.llm.atext_request(
240
+ self.detail_prompt.to_dialog()
241
+ )
242
+
243
+ try:
244
+ result = json.loads(self.clean_json_response(response)) # type: ignore
245
+ return result
246
+ except Exception as e:
247
+ logger.warning(f"Error parsing detailed plan: {str(e)}")
248
+ return None # type: ignore
249
+
250
+ async def forward(self):
251
+ self.trigger_time += 1
252
+ consumption_start = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
253
+
254
+ current_need = await self.memory.get("current_need")
255
+ if current_need == "none":
256
+ await self.memory.update("current_plan", [])
257
+ await self.memory.update("current_step", {"intention": "", "type": ""})
258
+ return
259
+
260
+ # 第一步:选择指导方案
261
+ guidance_result = await self.select_guidance(current_need)
262
+ if not guidance_result:
263
+ return
264
+
265
+ # 第二步:生成具体计划
266
+ detailed_plan = await self.generate_detailed_plan(
267
+ current_need,
268
+ guidance_result["selected_option"]
269
+ )
270
+
271
+ if not detailed_plan or "plan" not in detailed_plan:
272
+ await self.memory.update("current_plan", [])
273
+ await self.memory.update("current_step", {"intention": "", "type": ""})
274
+ return
275
+ logger.info("\n=== Plan Generation ===")
276
+ logger.info(f"Target: {detailed_plan['plan']['target']}")
277
+ logger.info("\nExecution Steps:")
278
+ for i, step in enumerate(detailed_plan['plan']['steps'], 1):
279
+ logger.info(f"{i}. {step['intention']} ({step['type']})")
280
+ logger.info("===============\n")
281
+
282
+ # 更新计划和当前步骤
283
+ steps = detailed_plan["plan"]["steps"]
284
+ for step in steps:
285
+ step["evaluation"] = {"status": "pending", "details": ""}
286
+
287
+ plan = {
288
+ "target": detailed_plan["plan"]["target"],
289
+ "steps": steps,
290
+ "completed": False,
291
+ "guidance": guidance_result # 保存方案选择的评估结果
292
+ }
293
+ formated_steps = "\n".join([f"{i}. {step['intention']}" for i, step in enumerate(plan['steps'], 1)])
294
+ formated_plan = f"""
295
+ Overall Target: {plan['target']}
296
+ Execution Steps: \n{formated_steps}
297
+ """
298
+ plan['start_time'] = await self.simulator.get_time(format_time=True)
299
+ await self.memory.update("current_plan", plan)
300
+ await self.memory.update("current_step", steps[0] if steps else {"intention": "", "type": ""})
301
+ await self.memory.update("execution_context", {'plan': formated_plan})
302
+
303
+ consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
304
+ self.token_consumption += consumption_end - consumption_start
305
+
306
+ def clean_json_response(self, response: str) -> str:
307
+ """清理LLM响应中的特殊字符"""
308
+ response = response.replace('```json', '').replace('```', '')
309
+ return response.strip()
@@ -0,0 +1,345 @@
1
+ # 由于目前模拟器支持的限制,现在只有Dispatcher中只有NoneBlock,MessageBlock和FindPersonBlock。
2
+
3
+ import random
4
+ import json
5
+ from typing import Dict, Any, List, Optional
6
+ from pycityagent.llm.llm import LLM
7
+ from pycityagent.workflow.block import Block
8
+ from pycityagent.memory import Memory
9
+ from pycityagent.environment.simulator import Simulator
10
+ from pycityagent.workflow.prompt import FormatPrompt
11
+ from .dispatcher import BlockDispatcher
12
+ from .utils import clean_json_response, TIME_ESTIMATE_PROMPT
13
+ import logging
14
+
15
+ logger = logging.getLogger("pycityagent")
16
+
17
+ class MessagePromptManager:
18
+ def __init__(self, template: str, to_discuss: List[str]):
19
+ self.template = template
20
+ self.format_prompt = FormatPrompt(self.template)
21
+ self.to_discuss = to_discuss
22
+
23
+ async def get_prompt(self, memory, step: Dict[str, Any], target: str) -> str:
24
+ """在这里改给模板输入的数据"""
25
+ # 获取数据
26
+ relationships = await memory.get("relationships") or {}
27
+ chat_histories = await memory.get("chat_histories") or {}
28
+
29
+ # 构建讨论话题约束
30
+ discussion_constraint = ""
31
+ if self.to_discuss:
32
+ topics = ", ".join(f'"{topic}"' for topic in self.to_discuss)
33
+ discussion_constraint = f"Limit your discussion to the following topics: {topics}."
34
+
35
+ # 格式化提示
36
+ self.format_prompt.format(
37
+ gender=await memory.get("gender") or "",
38
+ education=await memory.get("education") or "",
39
+ personality=await memory.get("personality") or "",
40
+ occupation=await memory.get("occupation") or "",
41
+ relationship_score=relationships.get(target, 50),
42
+ intention=step.get("intention", ""),
43
+ chat_history=chat_histories.get(target, "") if isinstance(chat_histories, dict) else "",
44
+ discussion_constraint=discussion_constraint
45
+ )
46
+
47
+ return self.format_prompt.to_dialog()
48
+
49
+ class SocialNoneBlock(Block):
50
+ """
51
+ 空操作
52
+ NoneBlock
53
+ """
54
+ def __init__(self, llm: LLM, memory: Memory):
55
+ super().__init__("NoneBlock", llm, memory)
56
+ self.description = "Handle all other cases"
57
+ self.guidance_prompt = FormatPrompt(template=TIME_ESTIMATE_PROMPT)
58
+
59
+ async def forward(self, step, context):
60
+ self.guidance_prompt.format(intention=step['intention'])
61
+ result = await self.llm.atext_request(self.guidance_prompt.to_dialog())
62
+ result = clean_json_response(result)
63
+ try:
64
+ result = json.loads(result)
65
+ return {
66
+ 'success': True,
67
+ 'evaluation': f'完成执行{step["intention"]}',
68
+ 'consumed_time': result['time']
69
+ }
70
+ except Exception as e:
71
+ logger.warning(f"解析时间评估响应时发生错误: {str(e)}, 原始结果: {result}")
72
+ return {
73
+ 'success': False,
74
+ 'evaluation': f'完成执行{step["intention"]}',
75
+ 'consumed_time': random.randint(1, 100)
76
+ }
77
+
78
+ class FindPersonBlock(Block):
79
+ """寻找社交对象"""
80
+ def __init__(self, llm: LLM, memory: Memory, simulator: Simulator):
81
+ super().__init__("FindPersonBlock", llm, memory, simulator)
82
+ self.description = "Find a suitable person to socialize with"
83
+
84
+ self.prompt = """
85
+ Based on the following information, help me select the most suitable friend to interact with:
86
+
87
+ 1. My Profile:
88
+ - Gender: {gender}
89
+ - Education: {education}
90
+ - Personality: {personality}
91
+ - Occupation: {occupation}
92
+
93
+ 2. My Current Intention: {intention}
94
+
95
+ 3. My Friends List (shown as index-to-relationship pairs):
96
+ {friend_info}
97
+ Note: For each friend, the relationship strength (0-100) indicates how close we are
98
+
99
+ Please analyze and select:
100
+ 1. The most appropriate friend based on relationship strength and my current intention
101
+ 2. Whether we should meet online or offline
102
+
103
+ Requirements:
104
+ - You must respond in this exact format: [mode, friend_index]
105
+ - mode must be either 'online' or 'offline'
106
+ - friend_index must be an integer representing the friend's position in the list (starting from 0)
107
+
108
+ Example valid outputs:
109
+ ['online', 0] - means meet the first friend online
110
+ ['offline', 2] - means meet the third friend offline
111
+ """
112
+
113
+ async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
114
+ try:
115
+ # 获取用户个人资料
116
+ profile = {
117
+ "gender": await self.memory.get("gender"),
118
+ "education": await self.memory.get("education"),
119
+ "personality": await self.memory.get("personality"),
120
+ "occupation": await self.memory.get("occupation")
121
+ }
122
+
123
+ # 获取朋友列表和关系强度
124
+ friends = await self.memory.get("friends") or []
125
+ relationships = await self.memory.get("relationships") or {}
126
+
127
+ if not friends:
128
+ return {
129
+ 'success': False,
130
+ 'evaluation': 'No friends found in social network',
131
+ 'consumed_time': 5
132
+ }
133
+
134
+ # 创建包含所有信息的朋友列表
135
+ friend_info = []
136
+ index_to_uuid = {}
137
+
138
+ for i, friend_id in enumerate(friends):
139
+ relationship_strength = relationships.get(friend_id, 0)
140
+ friend_info.append({
141
+ 'index': i,
142
+ 'relationship_strength': relationship_strength
143
+ })
144
+ index_to_uuid[i] = friend_id
145
+
146
+ # 格式化朋友信息为更易读的格式
147
+ formatted_friend_info = {
148
+ i: {'relationship_strength': info['relationship_strength']}
149
+ for i, info in enumerate(friend_info)
150
+ }
151
+
152
+ # 格式化提示
153
+ formatted_prompt = FormatPrompt(self.prompt)
154
+ formatted_prompt.format(
155
+ gender=str(await self.memory.get("gender")),
156
+ education=str(await self.memory.get("education")),
157
+ personality=str(await self.memory.get("personality")),
158
+ occupation=str(await self.memory.get("occupation")),
159
+ intention=str(step.get("intention", "socialize")),
160
+ friend_info=str(formatted_friend_info)
161
+ )
162
+
163
+ # 获取LLM响应
164
+ response = await self.llm.atext_request(formatted_prompt.to_dialog(), timeout=300)
165
+
166
+ try:
167
+ # 解析响应
168
+ mode, friend_index = eval(response)
169
+
170
+ # 验证响应格式
171
+ if not isinstance(mode, str) or mode not in ['online', 'offline']:
172
+ raise ValueError("Invalid mode")
173
+ if not isinstance(friend_index, int) or friend_index not in index_to_uuid:
174
+ raise ValueError("Invalid friend index")
175
+
176
+ # 将索引转换为UUID
177
+ target = index_to_uuid[friend_index]
178
+ context['target']=target
179
+ except Exception as e:
180
+ # 如果解析失败,选择关系最强的朋友作为默认选项
181
+ target = max(relationships.items(), key=lambda x: x[1])[0] if relationships else friends[0]
182
+ mode = 'online'
183
+
184
+ return {
185
+ 'success': True,
186
+ 'evaluation': f'Selected friend {target} for {mode} interaction',
187
+ 'consumed_time': 15,
188
+ 'mode': mode,
189
+ 'target': target
190
+ }
191
+
192
+ except Exception as e:
193
+ return {
194
+ 'success': False,
195
+ 'evaluation': f'Error in finding person: {str(e)}',
196
+ 'consumed_time': 5
197
+ }
198
+
199
+ class MessageBlock(Block):
200
+ """生成并发送消息"""
201
+ configurable_fields: List[str] = ["default_message_template", "to_discuss"]
202
+ default_values = {
203
+ "default_message_template": """
204
+ As a {gender} {occupation} with {education} education and {personality} personality,
205
+ generate a message for a friend (relationship strength: {relationship_score}/100)
206
+ about {intention}.
207
+ """,
208
+ "to_discuss": []
209
+ }
210
+
211
+ def __init__(self, agent, llm: LLM, memory: Memory, simulator: Simulator):
212
+ super().__init__("MessageBlock", llm, memory, simulator)
213
+ self.agent = agent
214
+ self.description = "Generate and send a message to someone"
215
+ self.find_person_block = FindPersonBlock(llm, memory, simulator)
216
+
217
+ # configurable fields
218
+ self.default_message_template = """
219
+ As a {gender} {occupation} with {education} education and {personality} personality,
220
+ generate a message for a friend (relationship strength: {relationship_score}/100)
221
+ about {intention}.
222
+
223
+ Previous chat history:
224
+ {chat_history}
225
+
226
+ Generate a natural and contextually appropriate message.
227
+ Keep it under 100 characters.
228
+ The message should reflect my personality and background.
229
+ {discussion_constraint}
230
+ """
231
+ self.to_discuss = []
232
+
233
+ self.prompt_manager = MessagePromptManager(self.default_message_template, self.to_discuss)
234
+
235
+ def _serialize_message(self, message: str, propagation_count: int) -> str:
236
+ try:
237
+ return json.dumps({
238
+ "content": message,
239
+ "propagation_count": propagation_count
240
+ }, ensure_ascii=False)
241
+ except Exception as e:
242
+ logger.warning(f"Error serializing message: {e}")
243
+ return message
244
+
245
+ async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
246
+ try:
247
+ # Get target from context or find one
248
+ target = context.get('target') if context else None
249
+ if not target:
250
+ result = await self.find_person_block.forward(step, context)
251
+ if not result['success']:
252
+ return {
253
+ 'success': False,
254
+ 'evaluation': 'Could not find target for message',
255
+ 'consumed_time': 5
256
+ }
257
+ target = result['target']
258
+
259
+ # 使用prompt管理器获取格式化后的提示
260
+ formatted_prompt = await self.prompt_manager.get_prompt(
261
+ self.memory,
262
+ step,
263
+ target
264
+ )
265
+
266
+ # Generate message
267
+ message = await self.llm.atext_request(formatted_prompt, timeout=300)
268
+ if not message:
269
+ message = "Hello! How are you?"
270
+
271
+ # Update chat history with proper format
272
+ chat_histories = await self.memory.get("chat_histories") or {}
273
+ if not isinstance(chat_histories, dict):
274
+ chat_histories = {}
275
+ if target not in chat_histories:
276
+ chat_histories[target] = ""
277
+ if chat_histories[target]:
278
+ chat_histories[target] += ","
279
+ chat_histories[target] += f"me: {message}"
280
+
281
+ await self.memory.update("chat_histories", chat_histories)
282
+
283
+ # Send message
284
+ serialized_message = self._serialize_message(message, 1)
285
+ return {
286
+ 'success': True,
287
+ 'evaluation': f'Sent message to {target}: {message}',
288
+ 'consumed_time': 10,
289
+ 'message': message,
290
+ 'target': target
291
+ }
292
+
293
+ except Exception as e:
294
+ return {
295
+ 'success': False,
296
+ 'evaluation': f'Error in sending message: {str(e)}',
297
+ 'consumed_time': 5
298
+ }
299
+
300
+ class SocialBlock(Block):
301
+ """主社交模块"""
302
+ find_person_block: FindPersonBlock
303
+ message_block: MessageBlock
304
+ noneblock: SocialNoneBlock
305
+
306
+ def __init__(self, agent, llm: LLM, memory: Memory, simulator: Simulator):
307
+ super().__init__("SocialBlock", llm, memory, simulator)
308
+ self.find_person_block = FindPersonBlock(llm, memory, simulator)
309
+ self.message_block = MessageBlock(agent, llm, memory, simulator)
310
+ self.noneblock=SocialNoneBlock(llm,memory)
311
+ self.dispatcher = BlockDispatcher(llm)
312
+
313
+ self.trigger_time = 0
314
+ self.token_consumption = 0
315
+
316
+ self.dispatcher.register_blocks([
317
+ self.find_person_block,
318
+ self.message_block,
319
+ self.noneblock
320
+ ])
321
+
322
+ async def forward(self, step: Dict[str, Any], context: Optional[Dict] = None) -> Dict[str, Any]:
323
+ try:
324
+ self.trigger_time += 1
325
+ consumption_start = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
326
+
327
+ # Select the appropriate sub-block using dispatcher
328
+ selected_block = await self.dispatcher.dispatch(step)
329
+
330
+ # Execute the selected sub-block and get the result
331
+ result = await selected_block.forward(step, context)
332
+
333
+ consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
334
+ self.token_consumption += consumption_end - consumption_start
335
+
336
+ return result
337
+
338
+ except:
339
+ consumption_end = self.llm.prompt_tokens_used + self.llm.completion_tokens_used
340
+ self.token_consumption += consumption_end - consumption_start
341
+ return {
342
+ 'success': True,
343
+ 'evaluation': 'Completed social interaction with default behavior',
344
+ 'consumed_time': 15
345
+ }