aient 1.0.31__py3-none-any.whl → 1.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aient/core/response.py +8 -17
- aient/models/chatgpt.py +4 -0
- {aient-1.0.31.dist-info → aient-1.0.32.dist-info}/METADATA +1 -1
- {aient-1.0.31.dist-info → aient-1.0.32.dist-info}/RECORD +7 -7
- {aient-1.0.31.dist-info → aient-1.0.32.dist-info}/WHEEL +0 -0
- {aient-1.0.31.dist-info → aient-1.0.32.dist-info}/licenses/LICENSE +0 -0
- {aient-1.0.31.dist-info → aient-1.0.32.dist-info}/top_level.txt +0 -0
aient/core/response.py
CHANGED
@@ -85,7 +85,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
85
85
|
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
86
86
|
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
87
87
|
yield sse_string
|
88
|
-
|
88
|
+
yield "data: [DONE]" + end_of_line
|
89
89
|
|
90
90
|
async def fetch_vertex_claude_response_stream(client, url, headers, payload, model):
|
91
91
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -132,7 +132,7 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
|
|
132
132
|
function_full_response = json.dumps(function_call["input"])
|
133
133
|
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id=function_call_id, function_call_name=None, function_call_content=function_full_response)
|
134
134
|
yield sse_string
|
135
|
-
|
135
|
+
yield "data: [DONE]" + end_of_line
|
136
136
|
|
137
137
|
async def fetch_gpt_response_stream(client, url, headers, payload):
|
138
138
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -156,7 +156,6 @@ async def fetch_gpt_response_stream(client, url, headers, payload):
|
|
156
156
|
# logger.info("line: %s", repr(line))
|
157
157
|
if line and line != "data: " and line != "data:" and not line.startswith(": ") and (result:=line.lstrip("data: ").strip()):
|
158
158
|
if result.strip() == "[DONE]":
|
159
|
-
# yield "data: [DONE]" + end_of_line
|
160
159
|
break
|
161
160
|
line = json.loads(result)
|
162
161
|
line['id'] = f"chatcmpl-{random_str}"
|
@@ -242,7 +241,7 @@ async def fetch_gpt_response_stream(client, url, headers, payload):
|
|
242
241
|
if no_stream_content:
|
243
242
|
del line["choices"][0]["message"]
|
244
243
|
yield "data: " + json.dumps(line).strip() + end_of_line
|
245
|
-
|
244
|
+
yield "data: [DONE]" + end_of_line
|
246
245
|
|
247
246
|
async def fetch_azure_response_stream(client, url, headers, payload):
|
248
247
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -265,7 +264,6 @@ async def fetch_azure_response_stream(client, url, headers, payload):
|
|
265
264
|
if line and line != "data: " and line != "data:" and not line.startswith(": "):
|
266
265
|
result = line.lstrip("data: ")
|
267
266
|
if result.strip() == "[DONE]":
|
268
|
-
# yield "data: [DONE]" + end_of_line
|
269
267
|
break
|
270
268
|
line = json.loads(result)
|
271
269
|
no_stream_content = safe_get(line, "choices", 0, "message", "content", default="")
|
@@ -297,7 +295,7 @@ async def fetch_azure_response_stream(client, url, headers, payload):
|
|
297
295
|
if no_stream_content:
|
298
296
|
del line["choices"][0]["message"]
|
299
297
|
yield "data: " + json.dumps(line).strip() + end_of_line
|
300
|
-
|
298
|
+
yield "data: [DONE]" + end_of_line
|
301
299
|
|
302
300
|
async def fetch_cloudflare_response_stream(client, url, headers, payload, model):
|
303
301
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -316,14 +314,13 @@ async def fetch_cloudflare_response_stream(client, url, headers, payload, model)
|
|
316
314
|
if line.startswith("data:"):
|
317
315
|
line = line.lstrip("data: ")
|
318
316
|
if line == "[DONE]":
|
319
|
-
# yield "data: [DONE]" + end_of_line
|
320
317
|
break
|
321
318
|
resp: dict = json.loads(line)
|
322
319
|
message = resp.get("response")
|
323
320
|
if message:
|
324
321
|
sse_string = await generate_sse_response(timestamp, model, content=message)
|
325
322
|
yield sse_string
|
326
|
-
|
323
|
+
yield "data: [DONE]" + end_of_line
|
327
324
|
|
328
325
|
async def fetch_cohere_response_stream(client, url, headers, payload, model):
|
329
326
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -341,13 +338,12 @@ async def fetch_cohere_response_stream(client, url, headers, payload, model):
|
|
341
338
|
# logger.info("line: %s", repr(line))
|
342
339
|
resp: dict = json.loads(line)
|
343
340
|
if resp.get("is_finished") == True:
|
344
|
-
# yield "data: [DONE]" + end_of_line
|
345
341
|
break
|
346
342
|
if resp.get("event_type") == "text-generation":
|
347
343
|
message = resp.get("text")
|
348
344
|
sse_string = await generate_sse_response(timestamp, model, content=message)
|
349
345
|
yield sse_string
|
350
|
-
|
346
|
+
yield "data: [DONE]" + end_of_line
|
351
347
|
|
352
348
|
async def fetch_claude_response_stream(client, url, headers, payload, model):
|
353
349
|
timestamp = int(datetime.timestamp(datetime.now()))
|
@@ -412,7 +408,7 @@ async def fetch_claude_response_stream(client, url, headers, payload, model):
|
|
412
408
|
function_call_content = delta["partial_json"]
|
413
409
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, function_call_content)
|
414
410
|
yield sse_string
|
415
|
-
|
411
|
+
yield "data: [DONE]" + end_of_line
|
416
412
|
|
417
413
|
async def fetch_response(client, url, headers, payload, engine, model):
|
418
414
|
response = None
|
@@ -504,7 +500,6 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
504
500
|
yield response_json
|
505
501
|
|
506
502
|
async def fetch_response_stream(client, url, headers, payload, engine, model):
|
507
|
-
# try:
|
508
503
|
if engine == "gemini" or engine == "vertex-gemini":
|
509
504
|
async for chunk in fetch_gemini_response_stream(client, url, headers, payload, model):
|
510
505
|
yield chunk
|
@@ -527,8 +522,4 @@ async def fetch_response_stream(client, url, headers, payload, engine, model):
|
|
527
522
|
async for chunk in fetch_cohere_response_stream(client, url, headers, payload, model):
|
528
523
|
yield chunk
|
529
524
|
else:
|
530
|
-
raise ValueError("Unknown response")
|
531
|
-
# except httpx.ConnectError as e:
|
532
|
-
# yield {"error": f"500", "details": "fetch_response_stream Connect Error"}
|
533
|
-
# except httpx.ReadTimeout as e:
|
534
|
-
# yield {"error": f"500", "details": "fetch_response_stream Read Response Timeout"}
|
525
|
+
raise ValueError("Unknown response")
|
aient/models/chatgpt.py
CHANGED
@@ -560,6 +560,8 @@ class chatgpt(BaseLLM):
|
|
560
560
|
except requests.exceptions.ReadTimeout:
|
561
561
|
print("请求超时,请检查网络连接或增加超时时间。")
|
562
562
|
return
|
563
|
+
except httpx.RemoteProtocolError:
|
564
|
+
continue
|
563
565
|
except Exception as e:
|
564
566
|
print(f"发生了未预料的错误:{e}")
|
565
567
|
if "Invalid URL" in str(e):
|
@@ -651,6 +653,8 @@ class chatgpt(BaseLLM):
|
|
651
653
|
|
652
654
|
# 成功处理,跳出重试循环
|
653
655
|
break
|
656
|
+
except httpx.RemoteProtocolError:
|
657
|
+
continue
|
654
658
|
except Exception as e:
|
655
659
|
print(f"发生了未预料的错误:{e}")
|
656
660
|
import traceback
|
@@ -4,7 +4,7 @@ aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
|
4
4
|
aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
5
5
|
aient/core/models.py,sha256=8MsuiYHBHVR5UMQ_cNLkvntoxalS7NpVwaNwHA0iZmk,7379
|
6
6
|
aient/core/request.py,sha256=Tk8ylLBHPsrA4C_fb2XUEz_ZM7tR4691mlIxn7x8LUU,48249
|
7
|
-
aient/core/response.py,sha256=
|
7
|
+
aient/core/response.py,sha256=7s1Jil0E5nnbL9xQldcjHIqSp0MFeWQo9mNX_iAuvSk,25954
|
8
8
|
aient/core/utils.py,sha256=i9ZwyywBLIhRM0fNmFSD3jF3dBL5QqVMOtSlG_ddv-I,24101
|
9
9
|
aient/core/test/test_base_api.py,sha256=CjfFzMG26r8C4xCPoVkKb3Ac6pp9gy5NUCbZJHoSSsM,393
|
10
10
|
aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
|
@@ -12,7 +12,7 @@ aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9XkdsuuKFGrwFhF
|
|
12
12
|
aient/models/__init__.py,sha256=ouNDNvoBBpIFrLsk09Q_sq23HR0GbLAKfGLIFmfEuXE,219
|
13
13
|
aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
|
14
14
|
aient/models/base.py,sha256=Loyt2F2WrDMBbK-sdmTtgkLVtdUXxK5tg4qoI6nc0Xo,7527
|
15
|
-
aient/models/chatgpt.py,sha256
|
15
|
+
aient/models/chatgpt.py,sha256=d7ZGc2WTrpwevXCFzIX4HU0zZ0T_mwfYkrStZ8yW5v0,37063
|
16
16
|
aient/models/claude.py,sha256=thK9P8qkaaoUN3OOJ9Shw4KDs-pAGKPoX4FOPGFXva8,28597
|
17
17
|
aient/models/duckduckgo.py,sha256=1l7vYCs9SG5SWPCbcl7q6pCcB5AUF_r-a4l9frz3Ogo,8115
|
18
18
|
aient/models/gemini.py,sha256=chGLc-8G_DAOxr10HPoOhvVFW1RvMgHd6mt--VyAW98,14730
|
@@ -29,8 +29,8 @@ aient/plugins/websearch.py,sha256=k23xymhf3xxdc3hc0_0FFZTI9OaThCYM6csYsygCHs8,15
|
|
29
29
|
aient/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
30
|
aient/utils/prompt.py,sha256=UcSzKkFE4-h_1b6NofI6xgk3GoleqALRKY8VBaXLjmI,11311
|
31
31
|
aient/utils/scripts.py,sha256=O-0IXN3mezPauFs6fw83WDDgklpXTDvcbJBNTDrsIG0,8201
|
32
|
-
aient-1.0.
|
33
|
-
aient-1.0.
|
34
|
-
aient-1.0.
|
35
|
-
aient-1.0.
|
36
|
-
aient-1.0.
|
32
|
+
aient-1.0.32.dist-info/licenses/LICENSE,sha256=XNdbcWldt0yaNXXWB_Bakoqnxb3OVhUft4MgMA_71ds,1051
|
33
|
+
aient-1.0.32.dist-info/METADATA,sha256=l2oZ7hWojO3bq4hWyjpxkxVvhuWAZAqiYwHdj5y0Dd0,4986
|
34
|
+
aient-1.0.32.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
|
35
|
+
aient-1.0.32.dist-info/top_level.txt,sha256=3oXzrP5sAVvyyqabpeq8A2_vfMtY554r4bVE-OHBrZk,6
|
36
|
+
aient-1.0.32.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|