aient 1.2.28__tar.gz → 1.2.31__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {aient-1.2.28 → aient-1.2.31}/PKG-INFO +1 -1
  2. {aient-1.2.28 → aient-1.2.31}/aient/architext/architext/core.py +35 -2
  3. {aient-1.2.28 → aient-1.2.31}/aient/architext/test/test.py +124 -0
  4. {aient-1.2.28 → aient-1.2.31}/aient/core/request.py +4 -2
  5. {aient-1.2.28 → aient-1.2.31}/aient/core/utils.py +6 -1
  6. {aient-1.2.28 → aient-1.2.31}/aient/models/audio.py +2 -2
  7. {aient-1.2.28 → aient-1.2.31}/aient/models/base.py +2 -2
  8. {aient-1.2.28 → aient-1.2.31}/aient/models/chatgpt.py +39 -23
  9. {aient-1.2.28 → aient-1.2.31}/aient/plugins/image.py +2 -2
  10. {aient-1.2.28 → aient-1.2.31}/aient/utils/prompt.py +0 -4
  11. {aient-1.2.28 → aient-1.2.31}/aient/utils/scripts.py +0 -8
  12. {aient-1.2.28 → aient-1.2.31}/aient.egg-info/PKG-INFO +1 -1
  13. {aient-1.2.28 → aient-1.2.31}/aient.egg-info/SOURCES.txt +1 -3
  14. {aient-1.2.28 → aient-1.2.31}/pyproject.toml +1 -1
  15. {aient-1.2.28 → aient-1.2.31}/test/test_whisper.py +1 -1
  16. aient-1.2.28/test/test_search.py +0 -18
  17. aient-1.2.28/test/test_yjh.py +0 -21
  18. {aient-1.2.28 → aient-1.2.31}/LICENSE +0 -0
  19. {aient-1.2.28 → aient-1.2.31}/README.md +0 -0
  20. {aient-1.2.28 → aient-1.2.31}/aient/__init__.py +0 -0
  21. {aient-1.2.28 → aient-1.2.31}/aient/architext/architext/__init__.py +0 -0
  22. {aient-1.2.28 → aient-1.2.31}/aient/architext/test/openai_client.py +0 -0
  23. {aient-1.2.28 → aient-1.2.31}/aient/architext/test/test_save_load.py +0 -0
  24. {aient-1.2.28 → aient-1.2.31}/aient/core/__init__.py +0 -0
  25. {aient-1.2.28 → aient-1.2.31}/aient/core/log_config.py +0 -0
  26. {aient-1.2.28 → aient-1.2.31}/aient/core/models.py +0 -0
  27. {aient-1.2.28 → aient-1.2.31}/aient/core/response.py +0 -0
  28. {aient-1.2.28 → aient-1.2.31}/aient/core/test/test_base_api.py +0 -0
  29. {aient-1.2.28 → aient-1.2.31}/aient/core/test/test_geminimask.py +0 -0
  30. {aient-1.2.28 → aient-1.2.31}/aient/core/test/test_image.py +0 -0
  31. {aient-1.2.28 → aient-1.2.31}/aient/core/test/test_payload.py +0 -0
  32. {aient-1.2.28 → aient-1.2.31}/aient/models/__init__.py +0 -0
  33. {aient-1.2.28 → aient-1.2.31}/aient/plugins/__init__.py +0 -0
  34. {aient-1.2.28 → aient-1.2.31}/aient/plugins/arXiv.py +0 -0
  35. {aient-1.2.28 → aient-1.2.31}/aient/plugins/config.py +0 -0
  36. {aient-1.2.28 → aient-1.2.31}/aient/plugins/excute_command.py +0 -0
  37. {aient-1.2.28 → aient-1.2.31}/aient/plugins/get_time.py +0 -0
  38. {aient-1.2.28 → aient-1.2.31}/aient/plugins/list_directory.py +0 -0
  39. {aient-1.2.28 → aient-1.2.31}/aient/plugins/read_file.py +0 -0
  40. {aient-1.2.28 → aient-1.2.31}/aient/plugins/read_image.py +0 -0
  41. {aient-1.2.28 → aient-1.2.31}/aient/plugins/readonly.py +0 -0
  42. {aient-1.2.28 → aient-1.2.31}/aient/plugins/registry.py +0 -0
  43. {aient-1.2.28 → aient-1.2.31}/aient/plugins/run_python.py +0 -0
  44. {aient-1.2.28 → aient-1.2.31}/aient/plugins/websearch.py +0 -0
  45. {aient-1.2.28 → aient-1.2.31}/aient/plugins/write_file.py +0 -0
  46. {aient-1.2.28 → aient-1.2.31}/aient/utils/__init__.py +0 -0
  47. {aient-1.2.28 → aient-1.2.31}/aient.egg-info/dependency_links.txt +0 -0
  48. {aient-1.2.28 → aient-1.2.31}/aient.egg-info/requires.txt +0 -0
  49. {aient-1.2.28 → aient-1.2.31}/aient.egg-info/top_level.txt +0 -0
  50. {aient-1.2.28 → aient-1.2.31}/setup.cfg +0 -0
  51. {aient-1.2.28 → aient-1.2.31}/test/test_Web_crawler.py +0 -0
  52. {aient-1.2.28 → aient-1.2.31}/test/test_ddg_search.py +0 -0
  53. {aient-1.2.28 → aient-1.2.31}/test/test_google_search.py +0 -0
  54. {aient-1.2.28 → aient-1.2.31}/test/test_ollama.py +0 -0
  55. {aient-1.2.28 → aient-1.2.31}/test/test_plugin.py +0 -0
  56. {aient-1.2.28 → aient-1.2.31}/test/test_url.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.28
3
+ Version: 1.2.31
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -104,9 +104,10 @@ class ContextProvider(ABC):
104
104
  return NotImplemented
105
105
 
106
106
  class Texts(ContextProvider):
107
- def __init__(self, text: Optional[Union[str, Callable[[], str]]] = None, name: Optional[str] = None, visible: bool = True):
107
+ def __init__(self, text: Optional[Union[str, Callable[[], str]]] = None, name: Optional[str] = None, visible: bool = True, newline: bool = False):
108
108
  if text is None and name is None:
109
109
  raise ValueError("Either 'text' or 'name' must be provided.")
110
+ self.newline = newline
110
111
 
111
112
  # Ensure that non-callable inputs are treated as strings
112
113
  if not callable(text):
@@ -373,8 +374,11 @@ class Message(ABC):
373
374
  for item in self._items:
374
375
  block = item.get_content_block()
375
376
  if block and block.content is not None:
377
+ # Check if it's a Texts provider with newline=True
378
+ # and it's not the very first item with content.
379
+ if isinstance(item, Texts) and hasattr(item, 'newline') and item.newline and final_parts:
380
+ final_parts.append("\n\n")
376
381
  final_parts.append(block.content)
377
-
378
382
  return "".join(final_parts)
379
383
 
380
384
  def pop(self, name: str) -> Optional[ContextProvider]:
@@ -468,6 +472,35 @@ class Message(ABC):
468
472
  # and our custom __eq__ on ContextProvider handles the comparison logic.
469
473
  return item in self._items
470
474
 
475
+ def has(self, provider_type: type) -> bool:
476
+ """Checks if the message contains a provider of a specific type."""
477
+ if not isinstance(provider_type, type) or not issubclass(provider_type, ContextProvider):
478
+ raise TypeError("provider_type must be a subclass of ContextProvider")
479
+ return any(isinstance(p, provider_type) for p in self._items)
480
+
481
+ def lstrip(self, provider_type: type):
482
+ """
483
+ 从消息的左侧(开头)移除所有指定类型的 provider。
484
+ 移除操作会一直持续,直到遇到一个不同类型的 provider 为止。
485
+ """
486
+ while self._items and type(self._items[0]) is provider_type:
487
+ self.pop(self._items[0].name)
488
+
489
+ def rstrip(self, provider_type: type):
490
+ """
491
+ 从消息的右侧(末尾)移除所有指定类型的 provider。
492
+ 移除操作会一直持续,直到遇到一个不同类型的 provider 为止。
493
+ """
494
+ while self._items and type(self._items[-1]) is provider_type:
495
+ self.pop(self._items[-1].name)
496
+
497
+ def strip(self, provider_type: type):
498
+ """
499
+ 从消息的两侧移除所有指定类型的 provider。
500
+ """
501
+ self.lstrip(provider_type)
502
+ self.rstrip(provider_type)
503
+
471
504
  def __bool__(self) -> bool:
472
505
  return bool(self._items)
473
506
  def get(self, key: str, default: Any = None) -> Any:
@@ -1454,6 +1454,130 @@ Files: {Files(visible=True, name="files")}
1454
1454
  self.assertEqual(len(message_mixed.provider()), 1)
1455
1455
  self.assertIsInstance(message_mixed.provider()[0], Texts)
1456
1456
 
1457
+ async def test_zaa_has_method_for_provider_type_check(self):
1458
+ """测试 Message.has(type) 方法是否能正确检查 provider 类型"""
1459
+ # 1. 创建一个混合类型的消息
1460
+ message_with_text = UserMessage(Texts("hi"), Images("url"))
1461
+
1462
+ # 2. 测试存在的情况
1463
+ # This line is expected to fail with an AttributeError before implementation
1464
+ self.assertTrue(message_with_text.has(Texts))
1465
+ self.assertTrue(message_with_text.has(Images))
1466
+
1467
+ # 3. 测试不存在的情况
1468
+ self.assertFalse(message_with_text.has(Tools))
1469
+
1470
+ # 4. 测试空消息
1471
+ empty_message = UserMessage()
1472
+ self.assertFalse(empty_message.has(Texts))
1473
+
1474
+ # 5. 测试传入无效类型
1475
+ with self.assertRaises(TypeError):
1476
+ message_with_text.has(str)
1477
+
1478
+ with self.assertRaises(TypeError):
1479
+ # Also test with a class that is not a subclass of ContextProvider
1480
+ class NotAProvider: pass
1481
+ message_with_text.has(NotAProvider)
1482
+
1483
+ async def test_zab_lstrip_and_rstrip(self):
1484
+ """测试 lstrip, rstrip, 和 strip 方法是否能正确移除两侧的特定类型的 provider"""
1485
+ # 1. 定义一个用于测试的子类
1486
+ class SpecialTexts(Texts):
1487
+ pass
1488
+ url = "data:image/png;base64,FAKE_IMG"
1489
+
1490
+ # 2. 创建一个复杂的测试消息
1491
+ message = UserMessage(
1492
+ Texts("leading1"),
1493
+ Texts("leading2"),
1494
+ Images(url, name="image1"),
1495
+ Texts("middle"),
1496
+ SpecialTexts("special_middle"),
1497
+ Images(url, name="image2"),
1498
+ Texts("trailing1"),
1499
+ SpecialTexts("special_trailing"), # rstrip(Texts) should stop here
1500
+ Texts("trailing2")
1501
+ )
1502
+
1503
+ # 3. 测试 rstrip(Texts)
1504
+ r_stripped_message = UserMessage(*message.provider()) # 创建副本
1505
+ r_stripped_message.rstrip(Texts)
1506
+ # 应移除 "trailing2",但在 "special_trailing" 处停止
1507
+ self.assertEqual(len(r_stripped_message), 8)
1508
+ self.assertIs(type(r_stripped_message[-1]), SpecialTexts)
1509
+
1510
+ # 4. 测试 lstrip(Texts)
1511
+ l_stripped_message = UserMessage(*message.provider()) # 创建副本
1512
+ l_stripped_message.lstrip(Texts)
1513
+ # 应移除 "leading1" 和 "leading2",但在 "image1" 处停止
1514
+ self.assertEqual(len(l_stripped_message), 7)
1515
+ self.assertIs(type(l_stripped_message[0]), Images)
1516
+
1517
+ # 5. 测试 strip(Texts)
1518
+ stripped_message = UserMessage(*message.provider()) # 创建副本
1519
+ stripped_message.strip(Texts)
1520
+ # 应同时移除 "leading1", "leading2", 和 "trailing2"
1521
+ self.assertEqual(len(stripped_message), 6)
1522
+ self.assertIs(type(stripped_message[0]), Images)
1523
+ self.assertIs(type(stripped_message[-1]), SpecialTexts)
1524
+
1525
+ # 6. 测试在一个只包含一种类型的消息上进行剥离
1526
+ only_texts = UserMessage(Texts("a"), Texts("b"))
1527
+ only_texts.strip(Texts)
1528
+ self.assertEqual(len(only_texts), 0)
1529
+
1530
+ # 7. 测试剥离一个不包含目标类型的消息
1531
+ only_images = UserMessage(Images("url1"), Images("url2"))
1532
+ only_images.strip(Texts)
1533
+ self.assertEqual(len(only_images), 2) # 不应改变
1534
+
1535
+ # 8. 测试在一个空消息上进行剥离
1536
+ empty_message = UserMessage()
1537
+ empty_message.strip(Texts)
1538
+ self.assertEqual(len(empty_message), 0)
1539
+
1540
+ # 9. 测试剥离子类
1541
+ message_ending_with_special = UserMessage(Texts("a"), SpecialTexts("b"))
1542
+ message_ending_with_special.rstrip(SpecialTexts)
1543
+ self.assertEqual(len(message_ending_with_special), 1)
1544
+ self.assertIsInstance(message_ending_with_special[0], Texts)
1545
+
1546
+ async def test_zac_texts_join_parameter(self):
1547
+ """测试 Texts provider 是否支持通过参数控制拼接方式"""
1548
+ # 1. 测试默认行为:直接拼接
1549
+ message_default = UserMessage(
1550
+ Texts("First line."),
1551
+ Texts("Second line.")
1552
+ )
1553
+ rendered_default = await message_default.render_latest()
1554
+ self.assertEqual(rendered_default['content'], "First line.Second line.")
1555
+
1556
+ # 2. 测试新功能:使用 \n\n 拼接
1557
+ # 假设新参数为 `newline=True`
1558
+ message_newline = UserMessage(
1559
+ Texts("First paragraph."),
1560
+ Texts("Second paragraph.", newline=True)
1561
+ )
1562
+ rendered_newline = await message_newline.render_latest()
1563
+ self.assertEqual(rendered_newline['content'], "First paragraph.\n\nSecond paragraph.")
1564
+
1565
+ # 3. 测试多个 provider 的情况
1566
+ message_multiple = UserMessage(
1567
+ Texts("First."),
1568
+ Texts("Second.", newline=True),
1569
+ Texts("Third.", newline=True)
1570
+ )
1571
+ rendered_multiple = await message_multiple.render_latest()
1572
+ self.assertEqual(rendered_multiple['content'], "First.\n\nSecond.\n\nThird.")
1573
+
1574
+ # 4. 测试只有一个 provider 的情况
1575
+ message_single = UserMessage(
1576
+ Texts("Only one.", newline=True)
1577
+ )
1578
+ rendered_single = await message_single.render_latest()
1579
+ self.assertEqual(rendered_single['content'], "Only one.")
1580
+
1457
1581
 
1458
1582
  # ==============================================================================
1459
1583
  # 6. 演示
@@ -1,5 +1,6 @@
1
1
  import re
2
2
  import json
3
+ import copy
3
4
  import httpx
4
5
  import base64
5
6
  import asyncio
@@ -57,7 +58,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
57
58
  try:
58
59
  request_messages = [Message(role="user", content=request.prompt)]
59
60
  except:
60
- request_messages = request.messages
61
+ request_messages = copy.deepcopy(request.messages)
61
62
  for msg in request_messages:
62
63
  if msg.role == "assistant":
63
64
  msg.role = "model"
@@ -399,7 +400,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
399
400
  systemInstruction = None
400
401
  system_prompt = ""
401
402
  function_arguments = None
402
- for msg in request.messages:
403
+ request_messages = copy.deepcopy(request.messages)
404
+ for msg in request_messages:
403
405
  if msg.role == "assistant":
404
406
  msg.role = "model"
405
407
  tool_calls = None
@@ -228,7 +228,12 @@ async def update_initial_model(provider):
228
228
  def safe_get(data, *keys, default=None):
229
229
  for key in keys:
230
230
  try:
231
- data = data[key] if isinstance(data, (dict, list)) else data.get(key)
231
+ if isinstance(data, (dict, list)):
232
+ data = data[key]
233
+ elif isinstance(key, str) and hasattr(data, key):
234
+ data = getattr(data, key)
235
+ else:
236
+ data = data.get(key)
232
237
  except (KeyError, IndexError, AttributeError, TypeError):
233
238
  return default
234
239
  if not data:
@@ -4,13 +4,13 @@ import json
4
4
  from .base import BaseLLM
5
5
 
6
6
  API = os.environ.get('API', None)
7
- API_URL = os.environ.get('API_URL', None)
7
+ BASE_URL = os.environ.get('BASE_URL', None)
8
8
 
9
9
  class whisper(BaseLLM):
10
10
  def __init__(
11
11
  self,
12
12
  api_key: str,
13
- api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/audio/transcriptions"),
13
+ api_url: str = (os.environ.get("BASE_URL") or "https://api.openai.com/v1/audio/transcriptions"),
14
14
  timeout: float = 20,
15
15
  ):
16
16
  super().__init__(api_key, api_url=api_url, timeout=timeout)
@@ -11,8 +11,8 @@ class BaseLLM:
11
11
  def __init__(
12
12
  self,
13
13
  api_key: str = None,
14
- engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
15
- api_url: str = (os.environ.get("API_URL", None) or "https://api.openai.com/v1/chat/completions"),
14
+ engine: str = os.environ.get("MODEL") or "gpt-3.5-turbo",
15
+ api_url: str = (os.environ.get("BASE_URL", None) or "https://api.openai.com/v1/chat/completions"),
16
16
  system_prompt: str = prompt.chatgpt_system_prompt,
17
17
  proxy: str = None,
18
18
  timeout: float = 600,
@@ -17,6 +17,21 @@ from ..core.request import prepare_request_payload
17
17
  from ..core.response import fetch_response_stream, fetch_response
18
18
  from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
19
19
 
20
+ class ToolResult(Texts):
21
+ def __init__(self, tool_name: str, tool_args: str, tool_response: str, name: Optional[str] = None, visible: bool = True, newline: bool = True):
22
+ super().__init__(text=tool_response, name=name or f"tool_result_{tool_name}", visible=visible, newline=newline)
23
+ self.tool_name = tool_name
24
+ self.tool_args = tool_args
25
+
26
+ async def render(self) -> Optional[str]:
27
+ tool_response = await super().render()
28
+ if tool_response is None:
29
+ tool_response = ""
30
+ if self.tool_args:
31
+ return f"[{self.tool_name}({self.tool_args}) Result]:\n\n{tool_response}"
32
+ else:
33
+ return f"[{self.tool_name} Result]:\n\n{tool_response}"
34
+
20
35
  class APITimeoutError(Exception):
21
36
  """Custom exception for API timeout errors."""
22
37
  pass
@@ -74,8 +89,8 @@ class chatgpt(BaseLLM):
74
89
  def __init__(
75
90
  self,
76
91
  api_key: str = None,
77
- engine: str = os.environ.get("GPT_ENGINE") or "gpt-4o",
78
- api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/chat/completions"),
92
+ engine: str = os.environ.get("MODEL") or "gpt-4o",
93
+ api_url: str = (os.environ.get("BASE_URL") or "https://api.openai.com/v1/chat/completions"),
79
94
  system_prompt: str = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
80
95
  proxy: str = None,
81
96
  timeout: float = 600,
@@ -172,8 +187,8 @@ class chatgpt(BaseLLM):
172
187
  self.conversation[convo_id].append(ToolCalls(tool_calls))
173
188
  self.conversation[convo_id].append(ToolResults(tool_call_id=function_call_id, content=message))
174
189
  else:
175
- last_user_message = self.conversation[convo_id][-1]["content"]
176
- if last_user_message != message:
190
+ last_user_message = self.conversation[convo_id][-1]
191
+ if last_user_message != UserMessage(message):
177
192
  image_message_list = UserMessage()
178
193
  if isinstance(function_arguments, str):
179
194
  functions_list = json.loads(function_arguments)
@@ -564,7 +579,7 @@ class chatgpt(BaseLLM):
564
579
  tool_calls = function_parameter
565
580
 
566
581
  # 处理所有工具调用
567
- all_responses = []
582
+ all_responses = UserMessage()
568
583
 
569
584
  for tool_info in tool_calls:
570
585
  tool_name = tool_info['function_name']
@@ -584,27 +599,28 @@ class chatgpt(BaseLLM):
584
599
  tool_response = chunk.replace("function_response:", "")
585
600
  else:
586
601
  yield chunk
587
- if tool_name == "read_file" and "<tool_error>" not in tool_response:
588
- self.conversation[convo_id].provider("files").update(tool_info['parameter']["file_path"], tool_response)
589
- all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\nRead file successfully! The file content has been updated in the tag <latest_file_content>.")
590
- elif tool_name == "get_knowledge_graph_tree" and "<tool_error>" not in tool_response:
591
- self.conversation[convo_id].provider("knowledge_graph").visible = True
592
- all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\nGet knowledge graph tree successfully! The knowledge graph tree has been updated in the tag <knowledge_graph_tree>.")
593
- elif tool_name == "write_to_file" and "<tool_error>" not in tool_response:
594
- all_responses.append(f"[{tool_name} Result]:\n\n{tool_response}")
595
- elif tool_name == "read_image" and "<tool_error>" not in tool_response:
596
- tool_info["base64_image"] = tool_response
597
- all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\nRead image successfully!")
598
- elif tool_response.startswith("data:image/") and ";base64," in tool_response and "<tool_error>" not in tool_response:
599
- tool_info["base64_image"] = tool_response
600
- all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\nRead image successfully!")
601
- else:
602
- all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\n{tool_response}")
602
+ final_tool_response = tool_response
603
+ if "<tool_error>" not in tool_response:
604
+ if tool_name == "read_file":
605
+ self.conversation[convo_id].provider("files").update(tool_info['parameter']["file_path"], tool_response)
606
+ final_tool_response = "Read file successfully! The file content has been updated in the tag <latest_file_content>."
607
+ elif tool_name == "get_knowledge_graph_tree":
608
+ self.conversation[convo_id].provider("knowledge_graph").visible = True
609
+ final_tool_response = "Get knowledge graph tree successfully! The knowledge graph tree has been updated in the tag <knowledge_graph_tree>."
610
+ elif tool_name == "write_to_file":
611
+ tool_args = None
612
+ elif tool_name == "read_image":
613
+ tool_info["base64_image"] = tool_response
614
+ final_tool_response = "Read image successfully!"
615
+ elif tool_response.startswith("data:image/") and ";base64," in tool_response:
616
+ tool_info["base64_image"] = tool_response
617
+ final_tool_response = "Read image successfully!"
618
+ all_responses.append(ToolResult(tool_name, tool_args, final_tool_response))
603
619
 
604
620
  # 合并所有工具响应
605
- function_response = "\n\n".join(all_responses).strip()
621
+ function_response = all_responses
606
622
  if missing_required_params:
607
- function_response += "\n\n" + "\n\n".join(missing_required_params)
623
+ function_response.append(Texts("\n\n".join(missing_required_params)))
608
624
 
609
625
  # 使用第一个工具的名称和参数作为历史记录
610
626
  function_call_name = tool_calls[0]['function_name']
@@ -5,13 +5,13 @@ from ..models.base import BaseLLM
5
5
  from .registry import register_tool
6
6
 
7
7
  API = os.environ.get('API', None)
8
- API_URL = os.environ.get('API_URL', None)
8
+ BASE_URL = os.environ.get('BASE_URL', None)
9
9
 
10
10
  class dalle3(BaseLLM):
11
11
  def __init__(
12
12
  self,
13
13
  api_key: str,
14
- api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/images/generations"),
14
+ api_url: str = (os.environ.get("BASE_URL") or "https://api.openai.com/v1/images/generations"),
15
15
  timeout: float = 20,
16
16
  ):
17
17
  super().__init__(api_key, api_url=api_url, timeout=timeout)
@@ -90,10 +90,6 @@ chatgpt_system_prompt = (
90
90
  "You are ChatGPT, a large language model trained by OpenAI. Use simple characters to represent mathematical symbols. Do not use LaTeX commands. Respond conversationally"
91
91
  )
92
92
 
93
- claude_system_prompt = (
94
- "You are Claude, a large language model trained by Anthropic. Use simple characters to represent mathematical symbols. Do not use LaTeX commands. Respond conversationally in {}."
95
- )
96
-
97
93
  search_system_prompt = (
98
94
  "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally in {}."
99
95
  "You can break down the task into multiple steps and search the web to answer my questions one by one."
@@ -134,14 +134,6 @@ def is_surrounded_by_chinese(text, index):
134
134
  def replace_char(string, index, new_char):
135
135
  return string[:index] + new_char + string[index+1:]
136
136
 
137
- def claude_replace(text):
138
- Punctuation_mapping = {",": ",", ":": ":", "!": "!", "?": "?", ";": ";"}
139
- key_list = list(Punctuation_mapping.keys())
140
- for i in range(len(text)):
141
- if is_surrounded_by_chinese(text, i) and (text[i] in key_list):
142
- text = replace_char(text, i, Punctuation_mapping[text[i]])
143
- return text
144
-
145
137
  def safe_get(data, *keys, default=None):
146
138
  for key in keys:
147
139
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: aient
3
- Version: 1.2.28
3
+ Version: 1.2.31
4
4
  Summary: Aient: The Awakening of Agent.
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -48,7 +48,5 @@ test/test_ddg_search.py
48
48
  test/test_google_search.py
49
49
  test/test_ollama.py
50
50
  test/test_plugin.py
51
- test/test_search.py
52
51
  test/test_url.py
53
- test/test_whisper.py
54
- test/test_yjh.py
52
+ test/test_whisper.py
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "aient"
3
- version = "1.2.28"
3
+ version = "1.2.31"
4
4
  description = "Aient: The Awakening of Agent."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
@@ -10,5 +10,5 @@ files = {
10
10
  'model': (None, 'whisper-1')
11
11
  }
12
12
 
13
- response = requests.post(os.environ.get('API_URL', None), headers=headers, files=files)
13
+ response = requests.post(os.environ.get('BASE_URL', None), headers=headers, files=files)
14
14
  print(response.text)
@@ -1,18 +0,0 @@
1
- import os
2
- from aient.models import chatgpt
3
-
4
- API = os.environ.get('API', None)
5
- API_URL = os.environ.get('API_URL', None)
6
- GPT_ENGINE = os.environ.get('GPT_ENGINE', 'gpt-4o')
7
-
8
- systemprompt = (
9
- "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally"
10
- )
11
- bot = chatgpt(api_key=API, api_url=API_URL, engine=GPT_ENGINE, system_prompt=systemprompt, print_log=True)
12
- for text in bot.ask_stream("搜索上海的天气"):
13
- # for text in bot.ask_stream("我在广州市,想周一去香港,周四早上回来,是去游玩,请你帮我规划整个行程。包括细节,如交通,住宿,餐饮,价格,等等,最好细节到每天各个部分的时间,花费,等等,尽量具体,用户一看就能直接执行的那种"):
14
- # for text in bot.ask_stream("上海有哪些好玩的地方?"):
15
- # for text in bot.ask_stream("just say test"):
16
- # for text in bot.ask_stream("我在上海想去重庆旅游,我只有2000元预算,我想在重庆玩一周,你能帮我规划一下吗?"):
17
- # for text in bot.ask_stream("我在上海想去重庆旅游,我有一天的时间。你能帮我规划一下吗?"):
18
- print(text, end="")
@@ -1,21 +0,0 @@
1
- import os
2
- from datetime import datetime
3
-
4
- from aient.models import chatgpt
5
- from aient.utils import prompt
6
-
7
- API = os.environ.get('API', None)
8
- API_URL = os.environ.get('API_URL', None)
9
- GPT_ENGINE = os.environ.get('GPT_ENGINE', 'gpt-4o')
10
- LANGUAGE = os.environ.get('LANGUAGE', 'Simplified Chinese')
11
-
12
- current_date = datetime.now()
13
- Current_Date = current_date.strftime("%Y-%m-%d")
14
-
15
- systemprompt = os.environ.get('SYSTEMPROMPT', prompt.system_prompt.format(LANGUAGE, Current_Date))
16
-
17
- bot = chatgpt(api_key=API, api_url=API_URL, engine=GPT_ENGINE, system_prompt=systemprompt)
18
- for text in bot.ask_stream("arXiv:2210.10716 这篇文章讲了啥"):
19
- # for text in bot.ask_stream("今天的微博热搜有哪些?"):
20
- # for text in bot.ask_stream("你现在是什么版本?"):
21
- print(text, end="")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes