aient 1.0.30__py3-none-any.whl → 1.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/core/response.py +12 -18
- aient/models/chatgpt.py +4 -0
- {aient-1.0.30.dist-info → aient-1.0.32.dist-info}/METADATA +1 -1
- {aient-1.0.30.dist-info → aient-1.0.32.dist-info}/RECORD +7 -7
- {aient-1.0.30.dist-info → aient-1.0.32.dist-info}/WHEEL +0 -0
- {aient-1.0.30.dist-info → aient-1.0.32.dist-info}/licenses/LICENSE +0 -0
- {aient-1.0.30.dist-info → aient-1.0.32.dist-info}/top_level.txt +0 -0
aient/core/response.py
CHANGED
@@ -85,7 +85,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
85
85
|
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
86
86
|
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
87
87
|
yield sse_string
|
88
|
-
|
88
|
+
yield "data: [DONE]" + end_of_line
|
89
89
|
|
90
90
|
async def fetch_vertex_claude_response_stream(client, url, headers, payload, model):
|
91
91
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -132,7 +132,7 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
|
|
132
132
|
function_full_response = json.dumps(function_call["input"])
|
133
133
|
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id=function_call_id, function_call_name=None, function_call_content=function_full_response)
|
134
134
|
yield sse_string
|
135
|
-
|
135
|
+
yield "data: [DONE]" + end_of_line
|
136
136
|
|
137
137
|
async def fetch_gpt_response_stream(client, url, headers, payload):
|
138
138
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -156,8 +156,7 @@ async def fetch_gpt_response_stream(client, url, headers, payload):
|
|
156
156
|
# logger.info("line: %s", repr(line))
|
157
157
|
if line and line != "data: " and line != "data:" and not line.startswith(": ") and (result:=line.lstrip("data: ").strip()):
|
158
158
|
if result.strip() == "[DONE]":
|
159
|
-
|
160
|
-
return
|
159
|
+
break
|
161
160
|
line = json.loads(result)
|
162
161
|
line['id'] = f"chatcmpl-{random_str}"
|
163
162
|
|
@@ -242,6 +241,7 @@ async def fetch_gpt_response_stream(client, url, headers, payload):
|
|
242
241
|
if no_stream_content:
|
243
242
|
del line["choices"][0]["message"]
|
244
243
|
yield "data: " + json.dumps(line).strip() + end_of_line
|
244
|
+
yield "data: [DONE]" + end_of_line
|
245
245
|
|
246
246
|
async def fetch_azure_response_stream(client, url, headers, payload):
|
247
247
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -264,8 +264,7 @@ async def fetch_azure_response_stream(client, url, headers, payload):
|
|
264
264
|
if line and line != "data: " and line != "data:" and not line.startswith(": "):
|
265
265
|
result = line.lstrip("data: ")
|
266
266
|
if result.strip() == "[DONE]":
|
267
|
-
|
268
|
-
return
|
267
|
+
break
|
269
268
|
line = json.loads(result)
|
270
269
|
no_stream_content = safe_get(line, "choices", 0, "message", "content", default="")
|
271
270
|
content = safe_get(line, "choices", 0, "delta", "content", default="")
|
@@ -296,7 +295,7 @@ async def fetch_azure_response_stream(client, url, headers, payload):
|
|
296
295
|
if no_stream_content:
|
297
296
|
del line["choices"][0]["message"]
|
298
297
|
yield "data: " + json.dumps(line).strip() + end_of_line
|
299
|
-
|
298
|
+
yield "data: [DONE]" + end_of_line
|
300
299
|
|
301
300
|
async def fetch_cloudflare_response_stream(client, url, headers, payload, model):
|
302
301
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -315,13 +314,13 @@ async def fetch_cloudflare_response_stream(client, url, headers, payload, model)
|
|
315
314
|
if line.startswith("data:"):
|
316
315
|
line = line.lstrip("data: ")
|
317
316
|
if line == "[DONE]":
|
318
|
-
|
319
|
-
return
|
317
|
+
break
|
320
318
|
resp: dict = json.loads(line)
|
321
319
|
message = resp.get("response")
|
322
320
|
if message:
|
323
321
|
sse_string = await generate_sse_response(timestamp, model, content=message)
|
324
322
|
yield sse_string
|
323
|
+
yield "data: [DONE]" + end_of_line
|
325
324
|
|
326
325
|
async def fetch_cohere_response_stream(client, url, headers, payload, model):
|
327
326
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -339,12 +338,12 @@ async def fetch_cohere_response_stream(client, url, headers, payload, model):
|
|
339
338
|
# logger.info("line: %s", repr(line))
|
340
339
|
resp: dict = json.loads(line)
|
341
340
|
if resp.get("is_finished") == True:
|
342
|
-
|
343
|
-
return
|
341
|
+
break
|
344
342
|
if resp.get("event_type") == "text-generation":
|
345
343
|
message = resp.get("text")
|
346
344
|
sse_string = await generate_sse_response(timestamp, model, content=message)
|
347
345
|
yield sse_string
|
346
|
+
yield "data: [DONE]" + end_of_line
|
348
347
|
|
349
348
|
async def fetch_claude_response_stream(client, url, headers, payload, model):
|
350
349
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -409,7 +408,7 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
|
|
409
408
|
function_call_content = delta["partial_json"]
|
410
409
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
411
410
|
yield sse_string
|
412
|
-
|
411
|
+
yield "data: [DONE]" + end_of_line
|
413
412
|
|
414
413
|
async def fetch_response(client, url, headers, payload, engine, model):
|
415
414
|
response = None
|
@@ -501,7 +500,6 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
501
500
|
yield response_json
|
502
501
|
|
503
502
|
async def fetch_response_stream(client, url, headers, payload, engine, model):
|
504
|
-
# try:
|
505
503
|
if engine == "gemini" or engine == "vertex-gemini":
|
506
504
|
async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
|
507
505
|
yield chunk
|
@@ -524,8 +522,4 @@ async def fetch_response_stream(client, url, headers, payload, engine, model):
|
|
524
522
|
async for chunk in fetch_cohere_response_stream(client, url, headers, payload, model):
|
525
523
|
yield chunk
|
526
524
|
else:
|
527
|
-
raise ValueError("Unknown response")
|
528
|
-
# except httpx.ConnectError as e:
|
529
|
-
# yield {"error": f"500", "details": "fetch_response_stream Connect Error"}
|
530
|
-
# except httpx.ReadTimeout as e:
|
531
|
-
# yield {"error": f"500", "details": "fetch_response_stream Read Response Timeout"}
|
525
|
+
raise ValueError("Unknown response")
|
aient/models/chatgpt.py
CHANGED
@@ -560,6 +560,8 @@ class chatgpt(BaseLLM):
|
|
560
560
|
except requests.exceptions.ReadTimeout:
|
561
561
|
print("请求超时,请检查网络连接或增加超时时间。")
|
562
562
|
return
|
563
|
+
except httpx.RemoteProtocolError:
|
564
|
+
continue
|
563
565
|
except Exception as e:
|
564
566
|
print(f"发生了未预料的错误:{e}")
|
565
567
|
if "Invalid URL" in str(e):
|
@@ -651,6 +653,8 @@ class chatgpt(BaseLLM):
|
|
651
653
|
|
652
654
|
# 成功处理,跳出重试循环
|
653
655
|
break
|
656
|
+
except httpx.RemoteProtocolError:
|
657
|
+
continue
|
654
658
|
except Exception as e:
|
655
659
|
print(f"发生了未预料的错误:{e}")
|
656
660
|
import traceback
|
@@ -4,7 +4,7 @@ aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
|
4
4
|
aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
5
5
|
aient/core/models.py,sha256=8MsuiYHBHVR5UMQ_cNLkvntoxalS7NpVwaNwHA0iZmk,7379
|
6
6
|
aient/core/request.py,sha256=Tk8ylLBHPsrA4C_fb2XUEz_ZM7tR4691mlIxn7x8LUU,48249
|
7
|
-
aient/core/response.py,sha256=
|
7
|
+
aient/core/response.py,sha256=7s1Jil0E5nnbL9xQldcjHIqSp0MFeWQo9mNX_iAuvSk,25954
|
8
8
|
aient/core/utils.py,sha256=i9ZwyywBLIhRM0fNmFSD3jF3dBL5QqVMOtSlG_ddv-I,24101
|
9
9
|
aient/core/test/test_base_api.py,sha256=CjfFzMG26r8C4xCPoVkKb3Ac6pp9gy5NUCbZJHoSSsM,393
|
10
10
|
aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
|
@@ -12,7 +12,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
|
|
12
12
|
aient/models/__init__.py,sha256=ouNDNvoBBpIFrLsk09Q_sq23HR0GbLAKfGLIFmfEuXE,219
|
13
13
|
aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
|
14
14
|
aient/models/base.py,sha256=Loyt2F2WrDMBbK-sdmTtgkLVtdUXxK5tg4qoI6nc0Xo,7527
|
15
|
-
aient/models/chatgpt.py,sha256
|
15
|
+
aient/models/chatgpt.py,sha256=d7ZGc2WTrpwevXCFzIX4HU0zZ0T_mwfYkrStZ8yW5v0,37063
|
16
16
|
aient/models/claude.py,sha256=thK9P8qkaaoUN3OOJ9Shw4KDs-pAGKPoX4FOPGFXva8,28597
|
17
17
|
aient/models/duckduckgo.py,sha256=1l7vYCs9SG5SWPCbcl7q6pCcB5AUF_r-a4l9frz3Ogo,8115
|
18
18
|
aient/models/gemini.py,sha256=chGLc-8G_DAOxr10HPoOhvVFW1RvMgHd6mt--VyAW98,14730
|
@@ -29,8 +29,8 @@ aient/plugins/websearch.py,sha256=k23xymhf3xxdc3hc0_0FFZTI9OaThCYM6csYsygCHs8,15
|
|
29
29
|
aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
30
|
aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
|
31
31
|
aient/utils/scripts.py,sha256=O-0IXN3mezPauFs6fw83WDDgklpXTDvcbJBNTDrsIG0,8201
|
32
|
-
aient-1.0.
|
33
|
-
aient-1.0.
|
34
|
-
aient-1.0.
|
35
|
-
aient-1.0.
|
36
|
-
aient-1.0.
|
32
|
+
aient-1.0.32.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
|
33
|
+
aient-1.0.32.dist-info/METADATA,sha256=l2oZ7hWojO3bq4hWyjpxkxVvhuWAZAqiYwHdj5y0Dd0,4986
|
34
|
+
aient-1.0.32.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
|
35
|
+
aient-1.0.32.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
|
36
|
+
aient-1.0.32.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|