aient 1.2.41__tar.gz → 1.2.43__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {aient-1.2.41 → aient-1.2.43}/PKG-INFO +1 -1
  2. {aient-1.2.41 → aient-1.2.43}/aient/core/request.py +29 -7
  3. {aient-1.2.41 → aient-1.2.43}/aient/core/response.py +30 -1
  4. {aient-1.2.41 → aient-1.2.43}/aient/core/utils.py +5 -1
  5. {aient-1.2.41 → aient-1.2.43}/aient/models/chatgpt.py +3 -10
  6. {aient-1.2.41 → aient-1.2.43}/aient.egg-info/PKG-INFO +1 -1
  7. {aient-1.2.41 → aient-1.2.43}/pyproject.toml +1 -1
  8. {aient-1.2.41 → aient-1.2.43}/LICENSE +0 -0
  9. {aient-1.2.41 → aient-1.2.43}/README.md +0 -0
  10. {aient-1.2.41 → aient-1.2.43}/aient/__init__.py +0 -0
  11. {aient-1.2.41 → aient-1.2.43}/aient/architext/architext/__init__.py +0 -0
  12. {aient-1.2.41 → aient-1.2.43}/aient/architext/architext/core.py +0 -0
  13. {aient-1.2.41 → aient-1.2.43}/aient/architext/test/openai_client.py +0 -0
  14. {aient-1.2.41 → aient-1.2.43}/aient/architext/test/test.py +0 -0
  15. {aient-1.2.41 → aient-1.2.43}/aient/architext/test/test_save_load.py +0 -0
  16. {aient-1.2.41 → aient-1.2.43}/aient/core/__init__.py +0 -0
  17. {aient-1.2.41 → aient-1.2.43}/aient/core/log_config.py +0 -0
  18. {aient-1.2.41 → aient-1.2.43}/aient/core/models.py +0 -0
  19. {aient-1.2.41 → aient-1.2.43}/aient/core/test/test_base_api.py +0 -0
  20. {aient-1.2.41 → aient-1.2.43}/aient/core/test/test_geminimask.py +0 -0
  21. {aient-1.2.41 → aient-1.2.43}/aient/core/test/test_image.py +0 -0
  22. {aient-1.2.41 → aient-1.2.43}/aient/core/test/test_payload.py +0 -0
  23. {aient-1.2.41 → aient-1.2.43}/aient/models/__init__.py +0 -0
  24. {aient-1.2.41 → aient-1.2.43}/aient/models/audio.py +0 -0
  25. {aient-1.2.41 → aient-1.2.43}/aient/models/base.py +0 -0
  26. {aient-1.2.41 → aient-1.2.43}/aient/plugins/__init__.py +0 -0
  27. {aient-1.2.41 → aient-1.2.43}/aient/plugins/arXiv.py +0 -0
  28. {aient-1.2.41 → aient-1.2.43}/aient/plugins/config.py +0 -0
  29. {aient-1.2.41 → aient-1.2.43}/aient/plugins/excute_command.py +0 -0
  30. {aient-1.2.41 → aient-1.2.43}/aient/plugins/get_time.py +0 -0
  31. {aient-1.2.41 → aient-1.2.43}/aient/plugins/image.py +0 -0
  32. {aient-1.2.41 → aient-1.2.43}/aient/plugins/list_directory.py +0 -0
  33. {aient-1.2.41 → aient-1.2.43}/aient/plugins/read_image.py +0 -0
  34. {aient-1.2.41 → aient-1.2.43}/aient/plugins/readonly.py +0 -0
  35. {aient-1.2.41 → aient-1.2.43}/aient/plugins/registry.py +0 -0
  36. {aient-1.2.41 → aient-1.2.43}/aient/plugins/run_python.py +0 -0
  37. {aient-1.2.41 → aient-1.2.43}/aient/plugins/websearch.py +0 -0
  38. {aient-1.2.41 → aient-1.2.43}/aient/utils/__init__.py +0 -0
  39. {aient-1.2.41 → aient-1.2.43}/aient/utils/prompt.py +0 -0
  40. {aient-1.2.41 → aient-1.2.43}/aient/utils/scripts.py +0 -0
  41. {aient-1.2.41 → aient-1.2.43}/aient.egg-info/SOURCES.txt +0 -0
  42. {aient-1.2.41 → aient-1.2.43}/aient.egg-info/dependency_links.txt +0 -0
  43. {aient-1.2.41 → aient-1.2.43}/aient.egg-info/requires.txt +0 -0
  44. {aient-1.2.41 → aient-1.2.43}/aient.egg-info/top_level.txt +0 -0
  45. {aient-1.2.41 → aient-1.2.43}/setup.cfg +0 -0
  46. {aient-1.2.41 → aient-1.2.43}/test/test_Web_crawler.py +0 -0
  47. {aient-1.2.41 → aient-1.2.43}/test/test_ddg_search.py +0 -0
  48. {aient-1.2.41 → aient-1.2.43}/test/test_google_search.py +0 -0
  49. {aient-1.2.41 → aient-1.2.43}/test/test_ollama.py +0 -0
  50. {aient-1.2.41 → aient-1.2.43}/test/test_plugin.py +0 -0
  51. {aient-1.2.41 → aient-1.2.43}/test/test_url.py +0 -0
  52. {aient-1.2.41 → aient-1.2.43}/test/test_whisper.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.41
3
+ Version: 1.2.43
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1015,9 +1015,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1015
1015
  for item in msg.content:
1016
1016
  if item.type == "text":
1017
1017
  text_message = await get_text_message(item.text, engine)
1018
+ if "v1/responses" in url:
1019
+ text_message["type"] = "input_text"
1018
1020
  content.append(text_message)
1019
1021
  elif item.type == "image_url" and provider.get("image", True) and "o1-mini" not in original_model:
1020
1022
  image_message = await get_image_message(item.image_url.url, engine)
1023
+ if "v1/responses" in url:
1024
+ image_message = {
1025
+ "type": "input_image",
1026
+ "image_url": image_message["image_url"]["url"]
1027
+ }
1021
1028
  content.append(image_message)
1022
1029
  else:
1023
1030
  content = msg.content
@@ -1049,10 +1056,16 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1049
1056
  system_msg = messages.pop(0)
1050
1057
  messages[0]["content"] = system_msg["content"] + messages[0]["content"]
1051
1058
 
1052
- payload = {
1053
- "model": original_model,
1054
- "messages": messages,
1055
- }
1059
+ if "v1/responses" in url:
1060
+ payload = {
1061
+ "model": original_model,
1062
+ "input": messages,
1063
+ }
1064
+ else:
1065
+ payload = {
1066
+ "model": original_model,
1067
+ "messages": messages,
1068
+ }
1056
1069
 
1057
1070
  miss_fields = [
1058
1071
  'model',
@@ -1085,11 +1098,20 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
1085
1098
  "o3" in original_model or "o4" in original_model or \
1086
1099
  "gpt-oss" in original_model or "gpt-5" in original_model:
1087
1100
  if request.model.endswith("high"):
1088
- payload["reasoning_effort"] = "high"
1101
+ if "v1/responses" in url:
1102
+ payload["reasoning"] = {"effort": "high"}
1103
+ else:
1104
+ payload["reasoning_effort"] = "high"
1089
1105
  elif request.model.endswith("low"):
1090
- payload["reasoning_effort"] = "low"
1106
+ if "v1/responses" in url:
1107
+ payload["reasoning"] = {"effort": "low"}
1108
+ else:
1109
+ payload["reasoning_effort"] = "low"
1091
1110
  else:
1092
- payload["reasoning_effort"] = "medium"
1111
+ if "v1/responses" in url:
1112
+ payload["reasoning"] = {"effort": "medium"}
1113
+ else:
1114
+ payload["reasoning_effort"] = "medium"
1093
1115
 
1094
1116
  if "temperature" in payload:
1095
1117
  payload.pop("temperature")
@@ -213,6 +213,10 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
213
213
 
214
214
  buffer = ""
215
215
  enter_buffer = ""
216
+
217
+ input_tokens = 0
218
+ output_tokens = 0
219
+
216
220
  async for chunk in response.aiter_text():
217
221
  buffer += chunk
218
222
  while "\n" in buffer:
@@ -221,12 +225,32 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
221
225
  if line.startswith(": keepalive"):
222
226
  yield line + end_of_line
223
227
  continue
224
- if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()):
228
+ if line and not line.startswith(":") and (result:=line.lstrip("data: ").strip()) and not line.startswith("event: "):
225
229
  if result.strip() == "[DONE]":
226
230
  break
227
231
  line = await asyncio.to_thread(json.loads, result)
228
232
  line['id'] = f"chatcmpl-{random_str}"
229
233
 
234
+ # v1/responses
235
+ if line.get("type") == "response.reasoning_summary_text.delta" and line.get("delta"):
236
+ sse_string = await generate_sse_response(timestamp, payload["model"], reasoning_content=line.get("delta"))
237
+ yield sse_string
238
+ continue
239
+ elif line.get("type") == "response.output_text.delta" and line.get("delta"):
240
+ sse_string = await generate_sse_response(timestamp, payload["model"], content=line.get("delta"))
241
+ yield sse_string
242
+ continue
243
+ elif line.get("type") == "response.output_text.done":
244
+ sse_string = await generate_sse_response(timestamp, payload["model"], stop="stop")
245
+ yield sse_string
246
+ continue
247
+ elif line.get("type") == "response.completed":
248
+ input_tokens = safe_get(line, "response", "usage", "input_tokens", default=0)
249
+ output_tokens = safe_get(line, "response", "usage", "output_tokens", default=0)
250
+ continue
251
+ elif line.get("type", "").startswith("response."):
252
+ continue
253
+
230
254
  # 处理 <think> 标签
231
255
  content = safe_get(line, "choices", 0, "delta", "content", default="")
232
256
  if "<think>" in content:
@@ -322,6 +346,11 @@ async def fetch_gpt_response_stream(client, url, headers, payload, timeout):
322
346
  del line["choices"][0]["message"]
323
347
  json_line = await asyncio.to_thread(json.dumps, line)
324
348
  yield "data: " + json_line.strip() + end_of_line
349
+
350
+ if input_tokens and output_tokens:
351
+ sse_string = await generate_sse_response(timestamp, payload["model"], None, None, None, None, None, total_tokens=input_tokens + output_tokens, prompt_tokens=input_tokens, completion_tokens=output_tokens)
352
+ yield sse_string
353
+
325
354
  yield "data: [DONE]" + end_of_line
326
355
 
327
356
  async def fetch_azure_response_stream(client, url, headers, payload, timeout):
@@ -50,7 +50,11 @@ class BaseAPI:
50
50
  self.v1_models: str = urlunparse(parsed_url[:2] + ("v1/models",) + ("",) * 3)
51
51
  else:
52
52
  self.v1_models: str = urlunparse(parsed_url[:2] + (before_v1 + "models",) + ("",) * 3)
53
- self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
53
+
54
+ if "v1/responses" in parsed_url.path:
55
+ self.chat_url: str = urlunparse(parsed_url[:2] + ("v1/responses",) + ("",) * 3)
56
+ else:
57
+ self.chat_url: str = urlunparse(parsed_url[:2] + (before_v1 + "chat/completions",) + ("",) * 3)
54
58
  self.image_url: str = urlunparse(parsed_url[:2] + (before_v1 + "images/generations",) + ("",) * 3)
55
59
  if parsed_url.hostname == "dashscope.aliyuncs.com":
56
60
  self.audio_transcriptions: str = urlunparse(parsed_url[:2] + ("/api/v1/services/aigc/multimodal-generation/generation",) + ("",) * 3)
@@ -12,7 +12,7 @@ from typing import Union, Optional, Callable
12
12
  from .base import BaseLLM
13
13
  from ..plugins.registry import registry
14
14
  from ..plugins import PLUGINS, get_tools_result_async, function_call_list, update_tools_config
15
- from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content, find_most_frequent_phrase
15
+ from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
16
16
  from ..core.request import prepare_request_payload
17
17
  from ..core.response import fetch_response_stream, fetch_response
18
18
  from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
@@ -80,7 +80,7 @@ class TaskComplete(Exception):
80
80
  self.completion_message = message
81
81
  super().__init__(f"Task completed with message: {message}")
82
82
 
83
-
83
+ # 结尾重复响应错误
84
84
  class RepetitiveResponseError(Exception):
85
85
  """Custom exception for detecting repetitive and meaningless generated strings."""
86
86
  def __init__(self, message, phrase, count):
@@ -446,13 +446,6 @@ class chatgpt(BaseLLM):
446
446
 
447
447
  if not full_response.strip() and not need_function_call:
448
448
  raise EmptyResponseError("Response is empty")
449
- most_frequent_phrase, most_frequent_phrase_count = find_most_frequent_phrase(full_response)
450
- if most_frequent_phrase_count > 100:
451
- raise RepetitiveResponseError(
452
- f"Detected repetitive and meaningless content. The phrase '{most_frequent_phrase}' appeared {most_frequent_phrase_count} times.",
453
- most_frequent_phrase,
454
- most_frequent_phrase_count
455
- )
456
449
 
457
450
  if self.print_log:
458
451
  self.logger.info(f"total_tokens: {total_tokens}")
@@ -630,7 +623,7 @@ class chatgpt(BaseLLM):
630
623
  elif tool_name.endswith("goal"):
631
624
  goal_provider = self.conversation[convo_id].provider("goal")
632
625
  if goal_provider:
633
- goal_provider += tool_response
626
+ goal_provider += f"\n\n<{tool_name}>{tool_response}</{tool_name}>"
634
627
  final_tool_response = "Get goal successfully! The goal has been updated in the tag <goal>."
635
628
  elif tool_name == "write_to_file":
636
629
  tool_args = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.41
3
+ Version: 1.2.43
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "aient"
3
- version = "1.2.41"
3
+ version = "1.2.43"
4
4
  description = "Aient: The Awakening of Agent."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes