aient 1.1.52__tar.gz → 1.1.54__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {aient-1.1.52/src/aient.egg-info → aient-1.1.54}/PKG-INFO +1 -1
- {aient-1.1.52 → aient-1.1.54}/setup.py +1 -1
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/request.py +26 -9
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/response.py +58 -106
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/chatgpt.py +4 -3
- {aient-1.1.52 → aient-1.1.54/src/aient.egg-info}/PKG-INFO +1 -1
- {aient-1.1.52 → aient-1.1.54}/LICENSE +0 -0
- {aient-1.1.52 → aient-1.1.54}/MANIFEST.in +0 -0
- {aient-1.1.52 → aient-1.1.54}/README.md +0 -0
- {aient-1.1.52 → aient-1.1.54}/setup.cfg +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/__init__.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/.git +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/.gitignore +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/__init__.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/log_config.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/models.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/test/test_base_api.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/test/test_geminimask.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/test/test_image.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/test/test_payload.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/core/utils.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/__init__.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/audio.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/base.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/claude.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/duckduckgo.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/gemini.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/groq.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/models/vertex.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/__init__.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/arXiv.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/config.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/excute_command.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/get_time.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/image.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/list_directory.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/read_file.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/read_image.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/readonly.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/registry.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/run_python.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/websearch.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/plugins/write_file.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/utils/__init__.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/utils/prompt.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient/utils/scripts.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient.egg-info/SOURCES.txt +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient.egg-info/dependency_links.txt +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient.egg-info/requires.txt +0 -0
- {aient-1.1.52 → aient-1.1.54}/src/aient.egg-info/top_level.txt +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_API.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_Deepbricks.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_Web_crawler.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_aiwaves.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_aiwaves_arxiv.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_ask_gemini.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_class.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_claude.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_claude_zh_char.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_ddg_search.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_download_pdf.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_gemini.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_get_token_dict.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_google_search.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_jieba.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_json.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_logging.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_ollama.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_plugin.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_py_run.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_requests.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_search.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_tikitoken.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_token.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_url.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_whisper.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_wildcard.py +0 -0
- {aient-1.1.52 → aient-1.1.54}/test/test_yjh.py +0 -0
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
|
|
4
4
|
|
5
5
|
setup(
|
6
6
|
name="aient",
|
7
|
-
version="1.1.
|
7
|
+
version="1.1.54",
|
8
8
|
description="Aient: The Awakening of Agent.",
|
9
9
|
long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
|
10
10
|
long_description_content_type="text/markdown",
|
@@ -74,9 +74,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
74
74
|
content.append(image_message)
|
75
75
|
elif msg.content:
|
76
76
|
content = [{"text": msg.content}]
|
77
|
-
tool_calls = msg.tool_calls
|
78
77
|
elif msg.content is None:
|
79
|
-
|
78
|
+
tool_calls = msg.tool_calls
|
80
79
|
|
81
80
|
if tool_calls:
|
82
81
|
tool_call = tool_calls[0]
|
@@ -110,7 +109,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
110
109
|
}]
|
111
110
|
}
|
112
111
|
)
|
113
|
-
elif msg.role != "system":
|
112
|
+
elif msg.role != "system" and content:
|
114
113
|
messages.append({"role": msg.role, "parts": content})
|
115
114
|
elif msg.role == "system":
|
116
115
|
content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
|
@@ -409,8 +408,9 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
409
408
|
elif item.type == "image_url" and provider.get("image", True):
|
410
409
|
image_message = await get_image_message(item.image_url.url, engine)
|
411
410
|
content.append(image_message)
|
412
|
-
|
411
|
+
elif msg.content:
|
413
412
|
content = [{"text": msg.content}]
|
413
|
+
elif msg.content is None:
|
414
414
|
tool_calls = msg.tool_calls
|
415
415
|
|
416
416
|
if tool_calls:
|
@@ -445,7 +445,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
445
445
|
}]
|
446
446
|
}
|
447
447
|
)
|
448
|
-
elif msg.role != "system":
|
448
|
+
elif msg.role != "system" and content:
|
449
449
|
messages.append({"role": msg.role, "parts": content})
|
450
450
|
elif msg.role == "system":
|
451
451
|
system_prompt = system_prompt + "\n\n" + content[0]["text"]
|
@@ -1354,7 +1354,8 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
|
|
1354
1354
|
|
1355
1355
|
messages = []
|
1356
1356
|
for msg in request.messages:
|
1357
|
-
|
1357
|
+
tool_calls = None
|
1358
|
+
tool_call_id = None
|
1358
1359
|
if isinstance(msg.content, list):
|
1359
1360
|
content = []
|
1360
1361
|
for item in msg.content:
|
@@ -1366,9 +1367,25 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
|
|
1366
1367
|
content.append(image_message)
|
1367
1368
|
else:
|
1368
1369
|
content = msg.content
|
1369
|
-
|
1370
|
-
|
1371
|
-
|
1370
|
+
tool_calls = msg.tool_calls
|
1371
|
+
tool_call_id = msg.tool_call_id
|
1372
|
+
|
1373
|
+
if tool_calls:
|
1374
|
+
tool_calls_list = []
|
1375
|
+
for tool_call in tool_calls:
|
1376
|
+
tool_calls_list.append({
|
1377
|
+
"id": tool_call.id,
|
1378
|
+
"type": tool_call.type,
|
1379
|
+
"function": {
|
1380
|
+
"name": tool_call.function.name,
|
1381
|
+
"arguments": tool_call.function.arguments
|
1382
|
+
}
|
1383
|
+
})
|
1384
|
+
if provider.get("tools"):
|
1385
|
+
messages.append({"role": msg.role, "tool_calls": tool_calls_list})
|
1386
|
+
elif tool_call_id:
|
1387
|
+
if provider.get("tools"):
|
1388
|
+
messages.append({"role": msg.role, "tool_call_id": tool_call_id, "content": content})
|
1372
1389
|
else:
|
1373
1390
|
# print("content", content)
|
1374
1391
|
if isinstance(content, list):
|
@@ -20,6 +20,35 @@ async def check_response(response, error_log):
|
|
20
20
|
return {"error": f"{error_log} HTTP Error", "status_code": response.status_code, "details": error_json}
|
21
21
|
return None
|
22
22
|
|
23
|
+
def gemini_json_poccess(response_str):
|
24
|
+
promptTokenCount = 0
|
25
|
+
candidatesTokenCount = 0
|
26
|
+
totalTokenCount = 0
|
27
|
+
image_base64 = None
|
28
|
+
|
29
|
+
response_json = json.loads(response_str)
|
30
|
+
json_data = safe_get(response_json, "candidates", 0, "content", default=None)
|
31
|
+
finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
|
32
|
+
if finishReason:
|
33
|
+
promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
|
34
|
+
candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
|
35
|
+
totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
|
36
|
+
|
37
|
+
content = safe_get(json_data, "parts", 0, "text", default="")
|
38
|
+
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
39
|
+
if b64_json:
|
40
|
+
image_base64 = b64_json
|
41
|
+
|
42
|
+
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
43
|
+
|
44
|
+
function_call_name = safe_get(json_data, "functionCall", "name", default=None)
|
45
|
+
function_full_response = safe_get(json_data, "functionCall", "args", default="")
|
46
|
+
function_full_response = json.dumps(function_full_response) if function_full_response else None
|
47
|
+
|
48
|
+
blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
|
49
|
+
|
50
|
+
return is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
|
51
|
+
|
23
52
|
async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
24
53
|
timestamp = int(datetime.timestamp(datetime.now()))
|
25
54
|
async with client.stream('POST', url, headers=headers, json=payload) as response:
|
@@ -28,131 +57,54 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
28
57
|
yield error_message
|
29
58
|
return
|
30
59
|
buffer = ""
|
31
|
-
cache_buffer = ""
|
32
|
-
revicing_function_call = False
|
33
|
-
function_full_response = "{"
|
34
|
-
need_function_call = False
|
35
|
-
is_finish = False
|
36
60
|
promptTokenCount = 0
|
37
61
|
candidatesTokenCount = 0
|
38
62
|
totalTokenCount = 0
|
39
63
|
parts_json = ""
|
40
|
-
image_base64 = ""
|
41
|
-
# line_index = 0
|
42
|
-
# last_text_line = 0
|
43
|
-
# if "thinking" in model:
|
44
|
-
# is_thinking = True
|
45
|
-
# else:
|
46
|
-
# is_thinking = False
|
47
64
|
async for chunk in response.aiter_text():
|
48
65
|
buffer += chunk
|
49
|
-
cache_buffer += chunk
|
50
66
|
|
51
67
|
while "\n" in buffer:
|
52
68
|
line, buffer = buffer.split("\n", 1)
|
53
|
-
# line_index += 1
|
54
69
|
if line.startswith("data: "):
|
55
|
-
|
56
|
-
|
57
|
-
json_data = safe_get(response_json, "candidates", 0, "content", default=None)
|
58
|
-
finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
|
59
|
-
if finishReason:
|
60
|
-
promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
|
61
|
-
candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
|
62
|
-
totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
|
63
|
-
|
64
|
-
content = safe_get(json_data, "parts", 0, "text", default="")
|
65
|
-
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
66
|
-
if b64_json:
|
67
|
-
image_base64 = b64_json
|
68
|
-
|
69
|
-
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
70
|
-
if is_thinking:
|
71
|
-
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
72
|
-
yield sse_string
|
73
|
-
elif not image_base64 and content:
|
74
|
-
sse_string = await generate_sse_response(timestamp, model, content=content)
|
75
|
-
yield sse_string
|
76
|
-
|
77
|
-
continue
|
78
|
-
|
79
|
-
# https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
|
80
|
-
if line and '\"finishReason\": \"' in line:
|
81
|
-
if "stop" not in line.lower():
|
82
|
-
logger.error(f"finishReason: {line}")
|
83
|
-
is_finish = True
|
84
|
-
if is_finish and '\"promptTokenCount\": ' in line:
|
85
|
-
json_data = parse_json_safely( "{" + line + "}")
|
86
|
-
promptTokenCount = json_data.get('promptTokenCount', 0)
|
87
|
-
if is_finish and '\"candidatesTokenCount\": ' in line:
|
88
|
-
json_data = parse_json_safely( "{" + line + "}")
|
89
|
-
candidatesTokenCount = json_data.get('candidatesTokenCount', 0)
|
90
|
-
if is_finish and '\"totalTokenCount\": ' in line:
|
91
|
-
json_data = parse_json_safely( "{" + line + "}")
|
92
|
-
totalTokenCount = json_data.get('totalTokenCount', 0)
|
93
|
-
|
94
|
-
if (line and '"parts": [' in line or parts_json != "") and is_finish == False:
|
70
|
+
parts_json = line.lstrip("data: ").strip()
|
71
|
+
else:
|
95
72
|
parts_json += line
|
96
|
-
|
97
|
-
# tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
|
98
|
-
tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{")
|
99
|
-
if "inlineData" in tmp_parts_json:
|
100
|
-
tmp_parts_json = tmp_parts_json + "}}]}"
|
101
|
-
else:
|
102
|
-
tmp_parts_json = tmp_parts_json + "}]}"
|
73
|
+
parts_json = parts_json.lstrip("[,")
|
103
74
|
try:
|
104
|
-
|
105
|
-
|
106
|
-
content = safe_get(json_data, "parts", 0, "text", default="")
|
107
|
-
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
108
|
-
if b64_json:
|
109
|
-
image_base64 = b64_json
|
110
|
-
|
111
|
-
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
112
|
-
if is_thinking:
|
113
|
-
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
114
|
-
yield sse_string
|
115
|
-
elif not image_base64 and content:
|
116
|
-
sse_string = await generate_sse_response(timestamp, model, content=content)
|
117
|
-
yield sse_string
|
75
|
+
json.loads(parts_json)
|
118
76
|
except json.JSONDecodeError:
|
119
|
-
logger.error(f"无法解析JSON: {parts_json}")
|
120
|
-
parts_json = ""
|
121
|
-
|
122
|
-
if line and ('\"functionCall\": {' in line or revicing_function_call):
|
123
|
-
revicing_function_call = True
|
124
|
-
need_function_call = True
|
125
|
-
if ']' in line:
|
126
|
-
revicing_function_call = False
|
127
77
|
continue
|
128
78
|
|
129
|
-
|
79
|
+
# https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
|
80
|
+
is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
|
130
81
|
|
131
|
-
|
132
|
-
|
133
|
-
|
82
|
+
if is_thinking:
|
83
|
+
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
84
|
+
yield sse_string
|
85
|
+
elif not image_base64 and content:
|
86
|
+
sse_string = await generate_sse_response(timestamp, model, content=content)
|
87
|
+
yield sse_string
|
134
88
|
|
135
|
-
|
136
|
-
|
137
|
-
function_call_name = function_call["functionCall"]["name"]
|
138
|
-
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
|
139
|
-
yield sse_string
|
140
|
-
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
141
|
-
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
142
|
-
yield sse_string
|
89
|
+
if image_base64:
|
90
|
+
yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
|
143
91
|
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
92
|
+
if function_call_name:
|
93
|
+
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
|
94
|
+
yield sse_string
|
95
|
+
if function_full_response:
|
96
|
+
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
97
|
+
yield sse_string
|
149
98
|
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
99
|
+
if parts_json == "[]" or blockReason == "PROHIBITED_CONTENT":
|
100
|
+
sse_string = await generate_sse_response(timestamp, model, stop="PROHIBITED_CONTENT")
|
101
|
+
yield sse_string
|
102
|
+
elif finishReason:
|
103
|
+
sse_string = await generate_sse_response(timestamp, model, stop="stop")
|
104
|
+
yield sse_string
|
105
|
+
break
|
106
|
+
|
107
|
+
parts_json = ""
|
156
108
|
|
157
109
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, totalTokenCount, promptTokenCount, candidatesTokenCount)
|
158
110
|
yield sse_string
|
@@ -187,7 +187,8 @@ class chatgpt(BaseLLM):
|
|
187
187
|
# print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
|
188
188
|
while message_index < conversation_len:
|
189
189
|
if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
|
190
|
-
if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content")
|
190
|
+
if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
|
191
|
+
and self.conversation[convo_id][message_index].get("content") != self.conversation[convo_id][message_index + 1].get("content"):
|
191
192
|
if type(self.conversation[convo_id][message_index + 1]["content"]) == str \
|
192
193
|
and type(self.conversation[convo_id][message_index]["content"]) == list:
|
193
194
|
self.conversation[convo_id][message_index + 1]["content"] = [{"type": "text", "text": self.conversation[convo_id][message_index + 1]["content"]}]
|
@@ -754,8 +755,8 @@ class chatgpt(BaseLLM):
|
|
754
755
|
|
755
756
|
# 打印日志
|
756
757
|
if self.print_log:
|
757
|
-
print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
|
758
|
-
print("api_url", kwargs.get('api_url', self.api_url.chat_url))
|
758
|
+
# print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
|
759
|
+
# print("api_url", kwargs.get('api_url', self.api_url.chat_url))
|
759
760
|
print("api_url", url)
|
760
761
|
# print("headers", headers)
|
761
762
|
print("api_key", kwargs.get('api_key', self.api_key))
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|