aient 1.1.16__py3-none-any.whl → 1.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/core/request.py +18 -5
- aient/core/response.py +23 -3
- aient/core/utils.py +23 -4
- aient/plugins/excute_command.py +3 -0
- {aient-1.1.16.dist-info → aient-1.1.18.dist-info}/METADATA +1 -1
- {aient-1.1.16.dist-info → aient-1.1.18.dist-info}/RECORD +9 -9
- {aient-1.1.16.dist-info → aient-1.1.18.dist-info}/WHEEL +1 -1
- {aient-1.1.16.dist-info → aient-1.1.18.dist-info}/licenses/LICENSE +0 -0
- {aient-1.1.16.dist-info → aient-1.1.18.dist-info}/top_level.txt +0 -0
aient/core/request.py
CHANGED
@@ -4,7 +4,7 @@ import httpx
|
|
4
4
|
import base64
|
5
5
|
import urllib.parse
|
6
6
|
|
7
|
-
from .models import RequestModel
|
7
|
+
from .models import RequestModel, Message
|
8
8
|
from .utils import (
|
9
9
|
c3s,
|
10
10
|
c3o,
|
@@ -50,7 +50,12 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
50
50
|
systemInstruction = None
|
51
51
|
system_prompt = ""
|
52
52
|
function_arguments = None
|
53
|
-
|
53
|
+
|
54
|
+
try:
|
55
|
+
request_messages = [Message(role="user", content=request.prompt)]
|
56
|
+
except:
|
57
|
+
request_messages = request.messages
|
58
|
+
for msg in request_messages:
|
54
59
|
if msg.role == "assistant":
|
55
60
|
msg.role = "model"
|
56
61
|
tool_calls = None
|
@@ -104,9 +109,10 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
104
109
|
elif msg.role == "system":
|
105
110
|
content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
|
106
111
|
system_prompt = system_prompt + "\n\n" + content[0]["text"]
|
107
|
-
|
112
|
+
if system_prompt.strip():
|
113
|
+
systemInstruction = {"parts": [{"text": system_prompt}]}
|
108
114
|
|
109
|
-
if any(off_model in original_model for off_model in gemini_max_token_65k_models):
|
115
|
+
if any(off_model in original_model for off_model in gemini_max_token_65k_models) or original_model == "gemini-2.0-flash-preview-image-generation":
|
110
116
|
safety_settings = "OFF"
|
111
117
|
else:
|
112
118
|
safety_settings = "BLOCK_NONE"
|
@@ -160,6 +166,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
160
166
|
'top_logprobs',
|
161
167
|
'response_format',
|
162
168
|
'stream_options',
|
169
|
+
'prompt',
|
163
170
|
]
|
164
171
|
generation_config = {}
|
165
172
|
|
@@ -214,6 +221,12 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
214
221
|
else:
|
215
222
|
payload["generationConfig"]["maxOutputTokens"] = 8192
|
216
223
|
|
224
|
+
if original_model == "gemini-2.0-flash-preview-image-generation":
|
225
|
+
payload["generationConfig"]["response_modalities"] = [
|
226
|
+
"Text",
|
227
|
+
"Image",
|
228
|
+
]
|
229
|
+
|
217
230
|
if "gemini-2.5" in original_model:
|
218
231
|
payload["generationConfig"]["thinkingConfig"] = {
|
219
232
|
"includeThoughts": True,
|
@@ -241,7 +254,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
241
254
|
if key == request.model:
|
242
255
|
for k, v in value.items():
|
243
256
|
payload[k] = v
|
244
|
-
elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
|
257
|
+
elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude", "deepseek"]) and "-" not in key:
|
245
258
|
payload[key] = value
|
246
259
|
|
247
260
|
return url, headers, payload
|
aient/core/response.py
CHANGED
@@ -36,6 +36,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
36
36
|
candidatesTokenCount = 0
|
37
37
|
totalTokenCount = 0
|
38
38
|
parts_json = ""
|
39
|
+
image_base64 = ""
|
39
40
|
# line_index = 0
|
40
41
|
# last_text_line = 0
|
41
42
|
# if "thinking" in model:
|
@@ -67,17 +68,25 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
67
68
|
if (line and '"parts": [' in line or parts_json != "") and is_finish == False:
|
68
69
|
parts_json += line
|
69
70
|
if parts_json != "" and line and '],' == line.strip():
|
70
|
-
tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
|
71
|
+
# tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
|
72
|
+
tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{")
|
73
|
+
if "inlineData" in tmp_parts_json:
|
74
|
+
tmp_parts_json = tmp_parts_json + "}}]}"
|
75
|
+
else:
|
76
|
+
tmp_parts_json = tmp_parts_json + "}]}"
|
71
77
|
try:
|
72
78
|
json_data = json.loads(tmp_parts_json)
|
73
79
|
|
74
80
|
content = safe_get(json_data, "parts", 0, "text", default="")
|
81
|
+
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
82
|
+
if b64_json:
|
83
|
+
image_base64 = b64_json
|
75
84
|
|
76
85
|
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
77
86
|
if is_thinking:
|
78
87
|
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
79
88
|
yield sse_string
|
80
|
-
|
89
|
+
elif not image_base64:
|
81
90
|
sse_string = await generate_sse_response(timestamp, model, content=content)
|
82
91
|
yield sse_string
|
83
92
|
except json.JSONDecodeError:
|
@@ -93,6 +102,10 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
93
102
|
|
94
103
|
function_full_response += line
|
95
104
|
|
105
|
+
if image_base64:
|
106
|
+
yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
|
107
|
+
return
|
108
|
+
|
96
109
|
if need_function_call:
|
97
110
|
function_call = json.loads(function_full_response)
|
98
111
|
function_call_name = function_call["functionCall"]["name"]
|
@@ -102,6 +115,9 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
102
115
|
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
103
116
|
yield sse_string
|
104
117
|
|
118
|
+
sse_string = await generate_sse_response(timestamp, model, stop="stop")
|
119
|
+
yield sse_string
|
120
|
+
|
105
121
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, totalTokenCount, promptTokenCount, candidatesTokenCount)
|
106
122
|
yield sse_string
|
107
123
|
|
@@ -535,9 +551,13 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
535
551
|
# print("parsed_data", json.dumps(parsed_data, indent=4, ensure_ascii=False))
|
536
552
|
content = ""
|
537
553
|
reasoning_content = ""
|
554
|
+
image_base64 = ""
|
538
555
|
parts_list = safe_get(parsed_data, 0, "candidates", 0, "content", "parts", default=[])
|
539
556
|
for item in parts_list:
|
540
557
|
chunk = safe_get(item, "text")
|
558
|
+
b64_json = safe_get(item, "inlineData", "data", default="")
|
559
|
+
if b64_json:
|
560
|
+
image_base64 = b64_json
|
541
561
|
is_think = safe_get(item, "thought", default=False)
|
542
562
|
# logger.info(f"chunk: {repr(chunk)}")
|
543
563
|
if chunk:
|
@@ -571,7 +591,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
571
591
|
function_call_content = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", 0, "functionCall", "args", default=None)
|
572
592
|
|
573
593
|
timestamp = int(datetime.timestamp(datetime.now()))
|
574
|
-
yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=function_call_name, function_call_content=function_call_content, role=role, total_tokens=total_tokens, prompt_tokens=prompt_tokens, completion_tokens=candidates_tokens, reasoning_content=reasoning_content)
|
594
|
+
yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=function_call_name, function_call_content=function_call_content, role=role, total_tokens=total_tokens, prompt_tokens=prompt_tokens, completion_tokens=candidates_tokens, reasoning_content=reasoning_content, image_base64=image_base64)
|
575
595
|
|
576
596
|
elif engine == "claude":
|
577
597
|
response_json = response.json()
|
aient/core/utils.py
CHANGED
@@ -112,7 +112,7 @@ def get_engine(provider, endpoint=None, original_model=""):
|
|
112
112
|
if provider.get("engine"):
|
113
113
|
engine = provider["engine"]
|
114
114
|
|
115
|
-
if endpoint == "/v1/images/generations" or "stable-diffusion" in original_model:
|
115
|
+
if engine != "gemini" and (endpoint == "/v1/images/generations" or "stable-diffusion" in original_model):
|
116
116
|
engine = "dalle"
|
117
117
|
stream = False
|
118
118
|
|
@@ -449,7 +449,7 @@ end_of_line = "\n\n"
|
|
449
449
|
|
450
450
|
import random
|
451
451
|
import string
|
452
|
-
async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None):
|
452
|
+
async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None, stop=None):
|
453
453
|
random.seed(timestamp)
|
454
454
|
random_str = ''.join(random.choices(string.ascii_letters + string.digits, k=29))
|
455
455
|
|
@@ -467,7 +467,7 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
|
|
467
467
|
"index": 0,
|
468
468
|
"delta": delta_content,
|
469
469
|
"logprobs": None,
|
470
|
-
"finish_reason": None if content else "stop"
|
470
|
+
"finish_reason": None if content or reasoning_content else "stop"
|
471
471
|
}
|
472
472
|
],
|
473
473
|
"usage": None,
|
@@ -484,14 +484,19 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
|
|
484
484
|
total_tokens = prompt_tokens + completion_tokens
|
485
485
|
sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
|
486
486
|
sample_data["choices"] = []
|
487
|
+
if stop:
|
488
|
+
sample_data["choices"][0]["delta"] = {}
|
489
|
+
sample_data["choices"][0]["finish_reason"] = stop
|
490
|
+
|
487
491
|
json_data = json.dumps(sample_data, ensure_ascii=False)
|
492
|
+
# print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
|
488
493
|
|
489
494
|
# 构建SSE响应
|
490
495
|
sse_response = f"data: {json_data}" + end_of_line
|
491
496
|
|
492
497
|
return sse_response
|
493
498
|
|
494
|
-
async def generate_no_stream_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None):
|
499
|
+
async def generate_no_stream_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None, image_base64=None):
|
495
500
|
random.seed(timestamp)
|
496
501
|
random_str = ''.join(random.choices(string.ascii_letters + string.digits, k=29))
|
497
502
|
message = {
|
@@ -554,11 +559,25 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
|
|
554
559
|
"system_fingerprint": "fp_4691090a87"
|
555
560
|
}
|
556
561
|
|
562
|
+
if image_base64:
|
563
|
+
sample_data = {
|
564
|
+
"created": timestamp,
|
565
|
+
"data": [{
|
566
|
+
"b64_json": image_base64
|
567
|
+
}],
|
568
|
+
# "usage": {
|
569
|
+
# "total_tokens": 100,
|
570
|
+
# "input_tokens": 50,
|
571
|
+
# "output_tokens": 50,
|
572
|
+
# }
|
573
|
+
}
|
574
|
+
|
557
575
|
if total_tokens:
|
558
576
|
total_tokens = prompt_tokens + completion_tokens
|
559
577
|
sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
|
560
578
|
|
561
579
|
json_data = json.dumps(sample_data, ensure_ascii=False)
|
580
|
+
# print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
|
562
581
|
|
563
582
|
return json_data
|
564
583
|
|
aient/plugins/excute_command.py
CHANGED
@@ -185,6 +185,9 @@ def excute_command(command):
|
|
185
185
|
# print(f"is_same: {is_same}", flush=True)
|
186
186
|
# print(f"\n\n\n", flush=True)
|
187
187
|
new_output_lines.append(line)
|
188
|
+
# 限制输出行数
|
189
|
+
if len(new_output_lines) > 500:
|
190
|
+
new_output_lines = new_output_lines[:250] + new_output_lines[-250:]
|
188
191
|
final_output_log = "\n".join(new_output_lines)
|
189
192
|
# print(f"output_lines: {len(new_output_lines)}")
|
190
193
|
|
@@ -4,9 +4,9 @@ aient/core/.gitignore,sha256=5JRRlYYsqt_yt6iFvvzhbqh2FTUQMqwo6WwIuFzlGR8,13
|
|
4
4
|
aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
5
5
|
aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
6
6
|
aient/core/models.py,sha256=kF-HLi1I2k_G5r153ZHuiGH8_NmpTlFMfK0_myB28YQ,7366
|
7
|
-
aient/core/request.py,sha256=
|
8
|
-
aient/core/response.py,sha256=
|
9
|
-
aient/core/utils.py,sha256
|
7
|
+
aient/core/request.py,sha256=nvF_V71svezQ0-UbnC9RB_pXo_wV6QC7WE_SANwQzxE,66195
|
8
|
+
aient/core/response.py,sha256=YphzhA9jtQKzWb3L4XGTp9xJZ2FOzHr1aAMTsi896FQ,33201
|
9
|
+
aient/core/utils.py,sha256=VQ9uutGRR_JOvECOrjeoRBO2aA6w-pGwoXnnS2UvfPU,27263
|
10
10
|
aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
|
11
11
|
aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
|
12
12
|
aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
|
@@ -23,7 +23,7 @@ aient/models/vertex.py,sha256=qVD5l1Q538xXUPulxG4nmDjXE1VoV4yuAkTCpIeJVw0,16795
|
|
23
23
|
aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
|
24
24
|
aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
|
25
25
|
aient/plugins/config.py,sha256=Vp6CG9ocdC_FAlCMEGtKj45xamir76DFxdJVvURNtog,6539
|
26
|
-
aient/plugins/excute_command.py,sha256=
|
26
|
+
aient/plugins/excute_command.py,sha256=9t7RB8ikltZZRv8NcjoxfaD8FkPuWbJTZ2Vwk7hEwTA,10683
|
27
27
|
aient/plugins/get_time.py,sha256=Ih5XIW5SDAIhrZ9W4Qe5Hs1k4ieKPUc_LAd6ySNyqZk,654
|
28
28
|
aient/plugins/image.py,sha256=ZElCIaZznE06TN9xW3DrSukS7U3A5_cjk1Jge4NzPxw,2072
|
29
29
|
aient/plugins/list_directory.py,sha256=JZVuImecMSfEv6jLqii-0uQJ1UCsrpMNmYlwW3PEDg4,1374
|
@@ -38,8 +38,8 @@ aient/prompt/agent.py,sha256=ZNsbgXRyvYzAFTRRziAnNVqcTyAnxrGcsGfGrt72j6k,25427
|
|
38
38
|
aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
39
|
aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
|
40
40
|
aient/utils/scripts.py,sha256=LD8adnfuRrJoY2tWKseXOPJXaxbrUmz4czsnUvHswNY,29096
|
41
|
-
aient-1.1.
|
42
|
-
aient-1.1.
|
43
|
-
aient-1.1.
|
44
|
-
aient-1.1.
|
45
|
-
aient-1.1.
|
41
|
+
aient-1.1.18.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
|
42
|
+
aient-1.1.18.dist-info/METADATA,sha256=pw57vV-QR6cvJTm40eK65Yq0cri4JgypKrGtVecRYYM,4968
|
43
|
+
aient-1.1.18.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
44
|
+
aient-1.1.18.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
|
45
|
+
aient-1.1.18.dist-info/RECORD,,
|
File without changes
|
File without changes
|