pycityagent 2.0.0a6__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pycityagent/agent.py +29 -5
- pycityagent/environment/interact/interact.py +86 -29
- pycityagent/environment/sence/static.py +3 -2
- pycityagent/environment/sim/aoi_service.py +1 -1
- pycityagent/environment/sim/economy_services.py +1 -1
- pycityagent/environment/sim/road_service.py +1 -1
- pycityagent/environment/sim/social_service.py +1 -1
- pycityagent/environment/simulator.py +6 -4
- pycityagent/environment/utils/__init__.py +5 -1
- pycityagent/llm/__init__.py +1 -1
- pycityagent/llm/embedding.py +36 -35
- pycityagent/llm/llm.py +197 -161
- pycityagent/llm/llmconfig.py +7 -9
- pycityagent/llm/utils.py +2 -2
- pycityagent/memory/memory.py +1 -2
- pycityagent/memory/memory_base.py +1 -2
- pycityagent/memory/profile.py +1 -2
- pycityagent/memory/self_define.py +1 -2
- pycityagent/memory/state.py +1 -2
- pycityagent/message/__init__.py +1 -1
- pycityagent/message/messager.py +11 -4
- pycityagent/simulation/__init__.py +1 -1
- pycityagent/simulation/agentgroup.py +39 -11
- pycityagent/simulation/interview.py +9 -5
- pycityagent/simulation/simulation.py +181 -61
- pycityagent/simulation/survey/__init__.py +1 -6
- pycityagent/simulation/survey/manager.py +22 -21
- pycityagent/simulation/survey/models.py +8 -5
- pycityagent/utils/decorators.py +14 -4
- pycityagent/utils/parsers/__init__.py +2 -1
- pycityagent/workflow/block.py +4 -3
- pycityagent/workflow/prompt.py +16 -9
- pycityagent/workflow/tool.py +1 -2
- pycityagent/workflow/trigger.py +36 -23
- {pycityagent-2.0.0a6.dist-info → pycityagent-2.0.0a8.dist-info}/METADATA +1 -1
- pycityagent-2.0.0a8.dist-info/RECORD +70 -0
- pycityagent-2.0.0a6.dist-info/RECORD +0 -70
- {pycityagent-2.0.0a6.dist-info → pycityagent-2.0.0a8.dist-info}/WHEEL +0 -0
pycityagent/llm/llm.py
CHANGED
@@ -4,6 +4,7 @@ import json
|
|
4
4
|
from openai import OpenAI, AsyncOpenAI, APIConnectionError, OpenAIError
|
5
5
|
from zhipuai import ZhipuAI
|
6
6
|
import logging
|
7
|
+
|
7
8
|
logging.getLogger("zhipuai").setLevel(logging.WARNING)
|
8
9
|
|
9
10
|
import asyncio
|
@@ -19,29 +20,38 @@ from .llmconfig import *
|
|
19
20
|
from .utils import *
|
20
21
|
|
21
22
|
import os
|
23
|
+
|
22
24
|
os.environ["GRPC_VERBOSITY"] = "ERROR"
|
23
25
|
|
26
|
+
|
24
27
|
class LLM:
|
25
28
|
"""
|
26
29
|
大语言模型对象
|
27
30
|
The LLM Object used by Agent(Soul)
|
28
31
|
"""
|
32
|
+
|
29
33
|
def __init__(self, config: LLMConfig) -> None:
|
30
34
|
self.config = config
|
31
|
-
if config.text[
|
35
|
+
if config.text["request_type"] not in ["openai", "deepseek", "qwen", "zhipuai"]:
|
32
36
|
raise ValueError("Invalid request type for text request")
|
33
37
|
self.prompt_tokens_used = 0
|
34
38
|
self.completion_tokens_used = 0
|
35
39
|
self.request_number = 0
|
36
40
|
self.semaphore = None
|
37
|
-
if self.config.text[
|
38
|
-
self._aclient = AsyncOpenAI(
|
39
|
-
|
40
|
-
|
41
|
-
elif self.config.text[
|
42
|
-
self._aclient =
|
41
|
+
if self.config.text["request_type"] == "openai":
|
42
|
+
self._aclient = AsyncOpenAI(
|
43
|
+
api_key=self.config.text["api_key"], timeout=300
|
44
|
+
)
|
45
|
+
elif self.config.text["request_type"] == "deepseek":
|
46
|
+
self._aclient = AsyncOpenAI(
|
47
|
+
api_key=self.config.text["api_key"],
|
48
|
+
base_url="https://api.deepseek.com/beta",
|
49
|
+
timeout=300,
|
50
|
+
)
|
51
|
+
elif self.config.text["request_type"] == "zhipuai":
|
52
|
+
self._aclient = ZhipuAI(api_key=self.config.text["api_key"], timeout=300)
|
43
53
|
|
44
|
-
def set_semaphore(self, number_of_coroutine:int):
|
54
|
+
def set_semaphore(self, number_of_coroutine: int):
|
45
55
|
self.semaphore = asyncio.Semaphore(number_of_coroutine)
|
46
56
|
|
47
57
|
def clear_semaphore(self):
|
@@ -56,19 +66,21 @@ class LLM:
|
|
56
66
|
self.completion_tokens_used = 0
|
57
67
|
self.request_number = 0
|
58
68
|
|
59
|
-
def show_consumption(
|
69
|
+
def show_consumption(
|
70
|
+
self, input_price: Optional[float] = None, output_price: Optional[float] = None
|
71
|
+
):
|
60
72
|
"""
|
61
73
|
if you give the input and output price of using model, this function will also calculate the consumption for you
|
62
74
|
"""
|
63
75
|
total_token = self.prompt_tokens_used + self.completion_tokens_used
|
64
76
|
if self.completion_tokens_used != 0:
|
65
|
-
rate = self.prompt_tokens_used/self.completion_tokens_used
|
77
|
+
rate = self.prompt_tokens_used / self.completion_tokens_used
|
66
78
|
else:
|
67
|
-
rate =
|
79
|
+
rate = "nan"
|
68
80
|
if self.request_number != 0:
|
69
|
-
TcA = total_token/self.request_number
|
81
|
+
TcA = total_token / self.request_number
|
70
82
|
else:
|
71
|
-
TcA =
|
83
|
+
TcA = "nan"
|
72
84
|
out = f"""Request Number: {self.request_number}
|
73
85
|
Token Usage:
|
74
86
|
- Total tokens: {total_token}
|
@@ -77,23 +89,30 @@ Token Usage:
|
|
77
89
|
- Token per request: {TcA}
|
78
90
|
- Prompt:Completion ratio: {rate}:1"""
|
79
91
|
if input_price != None and output_price != None:
|
80
|
-
consumption =
|
92
|
+
consumption = (
|
93
|
+
self.prompt_tokens_used / 1000000 * input_price
|
94
|
+
+ self.completion_tokens_used / 1000000 * output_price
|
95
|
+
)
|
81
96
|
out += f"\n - Cost Estimation: {consumption}"
|
82
97
|
print(out)
|
83
|
-
return {
|
84
|
-
|
98
|
+
return {
|
99
|
+
"total": total_token,
|
100
|
+
"prompt": self.prompt_tokens_used,
|
101
|
+
"completion": self.completion_tokens_used,
|
102
|
+
"ratio": rate,
|
103
|
+
}
|
85
104
|
|
86
105
|
def text_request(
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
106
|
+
self,
|
107
|
+
dialog: Any,
|
108
|
+
temperature: float = 1,
|
109
|
+
max_tokens: Optional[int] = None,
|
110
|
+
top_p: Optional[float] = None,
|
111
|
+
frequency_penalty: Optional[float] = None,
|
112
|
+
presence_penalty: Optional[float] = None,
|
113
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
114
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
115
|
+
) -> Optional[str]:
|
97
116
|
"""
|
98
117
|
文本相关请求
|
99
118
|
Text request
|
@@ -109,17 +128,17 @@ Token Usage:
|
|
109
128
|
Returns:
|
110
129
|
- (str): the response content
|
111
130
|
"""
|
112
|
-
if
|
113
|
-
api_base = self.config.text[
|
131
|
+
if "api_base" in self.config.text.keys():
|
132
|
+
api_base = self.config.text["api_base"]
|
114
133
|
else:
|
115
134
|
api_base = None
|
116
|
-
if self.config.text[
|
135
|
+
if self.config.text["request_type"] == "openai":
|
117
136
|
client = OpenAI(
|
118
|
-
api_key=self.config.text[
|
137
|
+
api_key=self.config.text["api_key"],
|
119
138
|
base_url=api_base,
|
120
139
|
)
|
121
140
|
response = client.chat.completions.create(
|
122
|
-
model=self.config.text[
|
141
|
+
model=self.config.text["model"],
|
123
142
|
messages=dialog,
|
124
143
|
temperature=temperature,
|
125
144
|
max_tokens=max_tokens,
|
@@ -127,33 +146,33 @@ Token Usage:
|
|
127
146
|
frequency_penalty=frequency_penalty,
|
128
147
|
presence_penalty=presence_penalty,
|
129
148
|
tools=tools,
|
130
|
-
tool_choice=tool_choice
|
149
|
+
tool_choice=tool_choice,
|
131
150
|
)
|
132
|
-
self.prompt_tokens_used += response.usage.prompt_tokens
|
133
|
-
self.completion_tokens_used += response.usage.completion_tokens
|
151
|
+
self.prompt_tokens_used += response.usage.prompt_tokens # type: ignore
|
152
|
+
self.completion_tokens_used += response.usage.completion_tokens # type: ignore
|
134
153
|
self.request_number += 1
|
135
154
|
if tools != None:
|
136
155
|
return response.tool_calls[0].function.arguments
|
137
156
|
else:
|
138
157
|
return response.choices[0].message.content
|
139
|
-
elif self.config.text[
|
158
|
+
elif self.config.text["request_type"] == "qwen":
|
140
159
|
response = dashscope.Generation.call(
|
141
|
-
model=self.config.text[
|
142
|
-
api_key=self.config.text[
|
160
|
+
model=self.config.text["model"],
|
161
|
+
api_key=self.config.text["api_key"],
|
143
162
|
messages=dialog,
|
144
|
-
result_format=
|
163
|
+
result_format="message",
|
145
164
|
)
|
146
|
-
if response.status_code == HTTPStatus.OK:
|
147
|
-
return response.output.choices[0][
|
165
|
+
if response.status_code == HTTPStatus.OK: # type: ignore
|
166
|
+
return response.output.choices[0]["message"]["content"] # type: ignore
|
148
167
|
else:
|
149
|
-
return "Error: {}, {}".format(response.status_code, response.message)
|
150
|
-
elif self.config.text[
|
168
|
+
return "Error: {}, {}".format(response.status_code, response.message) # type: ignore
|
169
|
+
elif self.config.text["request_type"] == "deepseek":
|
151
170
|
client = OpenAI(
|
152
|
-
api_key=self.config.text[
|
171
|
+
api_key=self.config.text["api_key"],
|
153
172
|
base_url="https://api.deepseek.com/beta",
|
154
173
|
)
|
155
174
|
response = client.chat.completions.create(
|
156
|
-
model=self.config.text[
|
175
|
+
model=self.config.text["model"],
|
157
176
|
messages=dialog,
|
158
177
|
temperature=temperature,
|
159
178
|
max_tokens=max_tokens,
|
@@ -162,63 +181,66 @@ Token Usage:
|
|
162
181
|
presence_penalty=presence_penalty,
|
163
182
|
stream=False,
|
164
183
|
)
|
165
|
-
self.prompt_tokens_used += response.usage.prompt_tokens
|
166
|
-
self.completion_tokens_used += response.usage.completion_tokens
|
184
|
+
self.prompt_tokens_used += response.usage.prompt_tokens # type: ignore
|
185
|
+
self.completion_tokens_used += response.usage.completion_tokens # type: ignore
|
167
186
|
self.request_number += 1
|
168
187
|
return response.choices[0].message.content
|
169
|
-
elif self.config.text[
|
170
|
-
client = ZhipuAI(api_key=self.config.text[
|
188
|
+
elif self.config.text["request_type"] == "zhipuai":
|
189
|
+
client = ZhipuAI(api_key=self.config.text["api_key"])
|
171
190
|
response = client.chat.completions.create(
|
172
|
-
model=self.config.text[
|
191
|
+
model=self.config.text["model"],
|
173
192
|
messages=dialog,
|
174
193
|
temperature=temperature,
|
175
194
|
top_p=top_p,
|
176
|
-
stream=False
|
195
|
+
stream=False,
|
177
196
|
)
|
178
|
-
self.prompt_tokens_used += response.usage.prompt_tokens
|
179
|
-
self.completion_tokens_used += response.usage.completion_tokens
|
197
|
+
self.prompt_tokens_used += response.usage.prompt_tokens # type: ignore
|
198
|
+
self.completion_tokens_used += response.usage.completion_tokens # type: ignore
|
180
199
|
self.request_number += 1
|
181
|
-
return response.choices[0].message.content
|
200
|
+
return response.choices[0].message.content # type: ignore
|
182
201
|
else:
|
183
202
|
print("ERROR: Wrong Config")
|
184
203
|
return "wrong config"
|
185
|
-
|
204
|
+
|
186
205
|
async def atext_request(
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
206
|
+
self,
|
207
|
+
dialog: Any,
|
208
|
+
temperature: float = 1,
|
209
|
+
max_tokens: Optional[int] = None,
|
210
|
+
top_p: Optional[float] = None,
|
211
|
+
frequency_penalty: Optional[float] = None,
|
212
|
+
presence_penalty: Optional[float] = None,
|
213
|
+
timeout: int = 300,
|
214
|
+
retries=3,
|
215
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
216
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
217
|
+
):
|
199
218
|
"""
|
200
219
|
异步版文本请求
|
201
220
|
"""
|
202
|
-
if
|
221
|
+
if (
|
222
|
+
self.config.text["request_type"] == "openai"
|
223
|
+
or self.config.text["request_type"] == "deepseek"
|
224
|
+
):
|
203
225
|
for attempt in range(retries):
|
204
226
|
try:
|
205
227
|
if self.semaphore != None:
|
206
228
|
async with self.semaphore:
|
207
229
|
response = await self._aclient.chat.completions.create(
|
208
|
-
model=self.config.text[
|
230
|
+
model=self.config.text["model"],
|
209
231
|
messages=dialog,
|
210
232
|
temperature=temperature,
|
211
233
|
max_tokens=max_tokens,
|
212
234
|
top_p=top_p,
|
213
|
-
frequency_penalty=frequency_penalty,
|
214
|
-
presence_penalty=presence_penalty,
|
235
|
+
frequency_penalty=frequency_penalty, # type: ignore
|
236
|
+
presence_penalty=presence_penalty, # type: ignore
|
215
237
|
stream=False,
|
216
238
|
timeout=timeout,
|
217
239
|
tools=tools,
|
218
|
-
tool_choice=tool_choice
|
219
|
-
)
|
220
|
-
self.prompt_tokens_used += response.usage.prompt_tokens
|
221
|
-
self.completion_tokens_used += response.usage.completion_tokens
|
240
|
+
tool_choice=tool_choice,
|
241
|
+
) # type: ignore
|
242
|
+
self.prompt_tokens_used += response.usage.prompt_tokens # type: ignore
|
243
|
+
self.completion_tokens_used += response.usage.completion_tokens # type: ignore
|
222
244
|
self.request_number += 1
|
223
245
|
if tools != None:
|
224
246
|
return response.tool_calls[0].function.arguments
|
@@ -226,20 +248,20 @@ Token Usage:
|
|
226
248
|
return response.choices[0].message.content
|
227
249
|
else:
|
228
250
|
response = await self._aclient.chat.completions.create(
|
229
|
-
model=self.config.text[
|
251
|
+
model=self.config.text["model"],
|
230
252
|
messages=dialog,
|
231
253
|
temperature=temperature,
|
232
254
|
max_tokens=max_tokens,
|
233
255
|
top_p=top_p,
|
234
|
-
frequency_penalty=frequency_penalty,
|
235
|
-
presence_penalty=presence_penalty,
|
256
|
+
frequency_penalty=frequency_penalty, # type: ignore
|
257
|
+
presence_penalty=presence_penalty, # type: ignore
|
236
258
|
stream=False,
|
237
259
|
timeout=timeout,
|
238
260
|
tools=tools,
|
239
|
-
tool_choice=tool_choice
|
240
|
-
)
|
241
|
-
self.prompt_tokens_used += response.usage.prompt_tokens
|
242
|
-
self.completion_tokens_used += response.usage.completion_tokens
|
261
|
+
tool_choice=tool_choice,
|
262
|
+
) # type: ignore
|
263
|
+
self.prompt_tokens_used += response.usage.prompt_tokens # type: ignore
|
264
|
+
self.completion_tokens_used += response.usage.completion_tokens # type: ignore
|
243
265
|
self.request_number += 1
|
244
266
|
if tools != None:
|
245
267
|
return response.tool_calls[0].function.arguments
|
@@ -248,77 +270,89 @@ Token Usage:
|
|
248
270
|
except APIConnectionError as e:
|
249
271
|
print("API connection error:", e)
|
250
272
|
if attempt < retries - 1:
|
251
|
-
await asyncio.sleep(2
|
273
|
+
await asyncio.sleep(2**attempt)
|
252
274
|
else:
|
253
275
|
raise e
|
254
276
|
except OpenAIError as e:
|
255
|
-
if hasattr(e,
|
256
|
-
print(f"HTTP status code: {e.http_status}")
|
277
|
+
if hasattr(e, "http_status"):
|
278
|
+
print(f"HTTP status code: {e.http_status}") # type: ignore
|
257
279
|
else:
|
258
280
|
print("An error occurred:", e)
|
259
281
|
if attempt < retries - 1:
|
260
|
-
await asyncio.sleep(2
|
282
|
+
await asyncio.sleep(2**attempt)
|
261
283
|
else:
|
262
284
|
raise e
|
263
|
-
elif self.config.text[
|
285
|
+
elif self.config.text["request_type"] == "zhipuai":
|
264
286
|
for attempt in range(retries):
|
265
287
|
try:
|
266
|
-
response = self._aclient.chat.asyncCompletions.create(
|
267
|
-
model=self.config.text[
|
288
|
+
response = self._aclient.chat.asyncCompletions.create( # type: ignore
|
289
|
+
model=self.config.text["model"],
|
268
290
|
messages=dialog,
|
269
291
|
temperature=temperature,
|
270
292
|
top_p=top_p,
|
271
293
|
timeout=timeout,
|
272
294
|
tools=tools,
|
273
|
-
tool_choice=tool_choice
|
295
|
+
tool_choice=tool_choice,
|
274
296
|
)
|
275
297
|
task_id = response.id
|
276
|
-
task_status =
|
298
|
+
task_status = ""
|
277
299
|
get_cnt = 0
|
278
|
-
cnt_threshold = int(timeout/0.5)
|
279
|
-
while
|
280
|
-
|
300
|
+
cnt_threshold = int(timeout / 0.5)
|
301
|
+
while (
|
302
|
+
task_status != "SUCCESS"
|
303
|
+
and task_status != "FAILED"
|
304
|
+
and get_cnt <= cnt_threshold
|
305
|
+
):
|
306
|
+
result_response = self._aclient.chat.asyncCompletions.retrieve_completion_result(id=task_id) # type: ignore
|
281
307
|
task_status = result_response.task_status
|
282
308
|
await asyncio.sleep(0.5)
|
283
309
|
get_cnt += 1
|
284
|
-
if task_status !=
|
310
|
+
if task_status != "SUCCESS":
|
285
311
|
raise Exception(f"Task failed with status: {task_status}")
|
286
312
|
|
287
|
-
self.prompt_tokens_used += result_response.usage.prompt_tokens
|
288
|
-
self.completion_tokens_used += result_response.usage.completion_tokens
|
313
|
+
self.prompt_tokens_used += result_response.usage.prompt_tokens # type: ignore
|
314
|
+
self.completion_tokens_used += result_response.usage.completion_tokens # type: ignore
|
289
315
|
self.request_number += 1
|
290
316
|
if tools and result_response.choices[0].message.tool_calls:
|
291
|
-
return json.loads(
|
317
|
+
return json.loads(
|
318
|
+
result_response.choices[0]
|
319
|
+
.message.tool_calls[0]
|
320
|
+
.function.arguments
|
321
|
+
)
|
292
322
|
else:
|
293
|
-
return result_response.choices[0].message.content
|
323
|
+
return result_response.choices[0].message.content # type: ignore
|
294
324
|
except APIConnectionError as e:
|
295
325
|
print("API connection error:", e)
|
296
326
|
if attempt < retries - 1:
|
297
|
-
await asyncio.sleep(2
|
327
|
+
await asyncio.sleep(2**attempt)
|
298
328
|
else:
|
299
329
|
raise e
|
300
|
-
elif self.config.text[
|
330
|
+
elif self.config.text["request_type"] == "qwen":
|
301
331
|
async with aiohttp.ClientSession() as session:
|
302
332
|
api_url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
|
303
|
-
headers = {
|
333
|
+
headers = {
|
334
|
+
"Content-Type": "application/json",
|
335
|
+
"Authorization": f"{self.config.text['api_key']}",
|
336
|
+
}
|
304
337
|
payload = {
|
305
|
-
|
306
|
-
|
307
|
-
'messages': dialog
|
308
|
-
}
|
338
|
+
"model": self.config.text["model"],
|
339
|
+
"input": {"messages": dialog},
|
309
340
|
}
|
310
341
|
async with session.post(api_url, json=payload, headers=headers) as resp:
|
311
342
|
response_json = await resp.json()
|
312
|
-
if
|
313
|
-
raise Exception(
|
343
|
+
if "code" in response_json.keys():
|
344
|
+
raise Exception(
|
345
|
+
f"Error: {response_json['code']}, {response_json['message']}"
|
346
|
+
)
|
314
347
|
else:
|
315
|
-
return response_json[
|
348
|
+
return response_json["output"]["text"]
|
316
349
|
else:
|
317
350
|
print("ERROR: Wrong Config")
|
318
351
|
return "wrong config"
|
319
|
-
|
320
352
|
|
321
|
-
async def img_understand(
|
353
|
+
async def img_understand(
|
354
|
+
self, img_path: Union[str, list[str]], prompt: Optional[str] = None
|
355
|
+
) -> str:
|
322
356
|
"""
|
323
357
|
图像理解
|
324
358
|
Image understanding
|
@@ -333,73 +367,73 @@ Token Usage:
|
|
333
367
|
ppt = "如何理解这幅图像?"
|
334
368
|
if prompt != None:
|
335
369
|
ppt = prompt
|
336
|
-
if self.config.image_u[
|
337
|
-
if
|
338
|
-
api_base = self.config.image_u[
|
370
|
+
if self.config.image_u["request_type"] == "openai":
|
371
|
+
if "api_base" in self.config.image_u.keys():
|
372
|
+
api_base = self.config.image_u["api_base"]
|
339
373
|
else:
|
340
374
|
api_base = None
|
341
375
|
client = OpenAI(
|
342
|
-
api_key=self.config.text[
|
376
|
+
api_key=self.config.text["api_key"],
|
343
377
|
base_url=api_base,
|
344
378
|
)
|
345
379
|
content = []
|
346
|
-
content.append({
|
380
|
+
content.append({"type": "text", "text": ppt})
|
347
381
|
if isinstance(img_path, str):
|
348
382
|
base64_image = encode_image(img_path)
|
349
|
-
content.append(
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
elif isinstance(img_path, list) and all(
|
383
|
+
content.append(
|
384
|
+
{
|
385
|
+
"type": "image_url",
|
386
|
+
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
|
387
|
+
}
|
388
|
+
)
|
389
|
+
elif isinstance(img_path, list) and all(
|
390
|
+
isinstance(item, str) for item in img_path
|
391
|
+
):
|
356
392
|
for item in img_path:
|
357
393
|
base64_image = encode_image(item)
|
358
|
-
content.append(
|
359
|
-
|
360
|
-
|
361
|
-
|
394
|
+
content.append(
|
395
|
+
{
|
396
|
+
"type": "image_url",
|
397
|
+
"image_url": {
|
398
|
+
"url": f"data:image/jpeg;base64,{base64_image}"
|
399
|
+
},
|
362
400
|
}
|
363
|
-
|
401
|
+
)
|
364
402
|
response = client.chat.completions.create(
|
365
|
-
model=self.config.image_u[
|
366
|
-
messages=[{
|
367
|
-
'role': 'user',
|
368
|
-
'content': content
|
369
|
-
}]
|
403
|
+
model=self.config.image_u["model"],
|
404
|
+
messages=[{"role": "user", "content": content}],
|
370
405
|
)
|
371
|
-
return response.choices[0].message.content
|
372
|
-
elif self.config.image_u[
|
406
|
+
return response.choices[0].message.content # type: ignore
|
407
|
+
elif self.config.image_u["request_type"] == "qwen":
|
373
408
|
content = []
|
374
409
|
if isinstance(img_path, str):
|
375
|
-
content.append({
|
376
|
-
content.append({
|
377
|
-
elif isinstance(img_path, list) and all(
|
410
|
+
content.append({"image": "file://" + img_path})
|
411
|
+
content.append({"text": ppt})
|
412
|
+
elif isinstance(img_path, list) and all(
|
413
|
+
isinstance(item, str) for item in img_path
|
414
|
+
):
|
378
415
|
for item in img_path:
|
379
|
-
content.append({
|
380
|
-
|
381
|
-
})
|
382
|
-
content.append({'text': ppt})
|
416
|
+
content.append({"image": "file://" + item})
|
417
|
+
content.append({"text": ppt})
|
383
418
|
|
384
|
-
dialog = [{
|
385
|
-
'role': 'user',
|
386
|
-
'content': content
|
387
|
-
}]
|
419
|
+
dialog = [{"role": "user", "content": content}]
|
388
420
|
response = dashscope.MultiModalConversation.call(
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
if response.status_code == HTTPStatus.OK:
|
394
|
-
return response.output.choices[0][
|
421
|
+
model=self.config.image_u["model"],
|
422
|
+
api_key=self.config.image_u["api_key"],
|
423
|
+
messages=dialog,
|
424
|
+
)
|
425
|
+
if response.status_code == HTTPStatus.OK: # type: ignore
|
426
|
+
return response.output.choices[0]["message"]["content"] # type: ignore
|
395
427
|
else:
|
396
428
|
print(response.code) # type: ignore # The error code.
|
397
429
|
return "Error"
|
398
430
|
else:
|
399
|
-
print(
|
431
|
+
print(
|
432
|
+
"ERROR: wrong image understanding type, only 'openai' and 'openai' is available"
|
433
|
+
)
|
400
434
|
return "Error"
|
401
435
|
|
402
|
-
async def img_generate(self, prompt:str, size:str=
|
436
|
+
async def img_generate(self, prompt: str, size: str = "512*512", quantity: int = 1):
|
403
437
|
"""
|
404
438
|
图像生成
|
405
439
|
Image generation
|
@@ -413,11 +447,11 @@ Token Usage:
|
|
413
447
|
- (list[PIL.Image.Image]): 生成的图像列表. The list of generated Images.
|
414
448
|
"""
|
415
449
|
rsp = ImageSynthesis.call(
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
450
|
+
model=self.config.image_g["model"],
|
451
|
+
api_key=self.config.image_g["api_key"],
|
452
|
+
prompt=prompt,
|
453
|
+
n=quantity,
|
454
|
+
size=size,
|
421
455
|
)
|
422
456
|
if rsp.status_code == HTTPStatus.OK:
|
423
457
|
res = []
|
@@ -425,6 +459,8 @@ Token Usage:
|
|
425
459
|
res.append(Image.open(BytesIO(requests.get(result.url).content)))
|
426
460
|
return res
|
427
461
|
else:
|
428
|
-
print(
|
429
|
-
|
430
|
-
|
462
|
+
print(
|
463
|
+
"Failed, status_code: %s, code: %s, message: %s"
|
464
|
+
% (rsp.status_code, rsp.code, rsp.message)
|
465
|
+
)
|
466
|
+
return None
|
pycityagent/llm/llmconfig.py
CHANGED
@@ -3,13 +3,11 @@ class LLMConfig:
|
|
3
3
|
大语言模型相关配置
|
4
4
|
The config of LLM
|
5
5
|
"""
|
6
|
-
|
7
|
-
|
8
|
-
config: dict
|
9
|
-
) -> None:
|
6
|
+
|
7
|
+
def __init__(self, config: dict) -> None:
|
10
8
|
self.config = config
|
11
|
-
self.text = config[
|
12
|
-
if
|
13
|
-
self.text[
|
14
|
-
self.image_u = config[
|
15
|
-
self.image_g = config[
|
9
|
+
self.text = config["text_request"]
|
10
|
+
if "api_base" in self.text.keys() and self.text["api_base"] == "None":
|
11
|
+
self.text["api_base"] = None
|
12
|
+
self.image_u = config["img_understand_request"]
|
13
|
+
self.image_g = config["img_generate_request"]
|
pycityagent/llm/utils.py
CHANGED
pycityagent/memory/memory.py
CHANGED
@@ -2,8 +2,7 @@ import asyncio
|
|
2
2
|
import logging
|
3
3
|
from copy import deepcopy
|
4
4
|
from datetime import datetime
|
5
|
-
from typing import
|
6
|
-
Tuple, Union)
|
5
|
+
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union
|
7
6
|
|
8
7
|
import numpy as np
|
9
8
|
from pyparsing import deque
|
@@ -6,8 +6,7 @@ import asyncio
|
|
6
6
|
import logging
|
7
7
|
import time
|
8
8
|
from abc import ABC, abstractmethod
|
9
|
-
from typing import
|
10
|
-
Tuple, Union)
|
9
|
+
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
|
11
10
|
|
12
11
|
from .const import *
|
13
12
|
|