beswarm 0.1.36__tar.gz → 0.1.38__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. {beswarm-0.1.36 → beswarm-0.1.38}/PKG-INFO +1 -2
  2. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/setup.py +1 -1
  3. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/claude.py +0 -67
  4. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/groq.py +0 -34
  5. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/config.py +3 -12
  6. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/utils/scripts.py +0 -23
  7. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/UIworker.py +1 -1
  8. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm.egg-info/PKG-INFO +1 -2
  9. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm.egg-info/SOURCES.txt +0 -1
  10. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm.egg-info/requires.txt +0 -1
  11. {beswarm-0.1.36 → beswarm-0.1.38}/pyproject.toml +1 -2
  12. beswarm-0.1.36/beswarm/aient/test/test_langchain_search_old.py +0 -235
  13. {beswarm-0.1.36 → beswarm-0.1.38}/MANIFEST.in +0 -0
  14. {beswarm-0.1.36 → beswarm-0.1.38}/README.md +0 -0
  15. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/__init__.py +0 -0
  16. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/main.py +0 -0
  17. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/__init__.py +0 -0
  18. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/__init__.py +0 -0
  19. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/log_config.py +0 -0
  20. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/models.py +0 -0
  21. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/request.py +0 -0
  22. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/response.py +0 -0
  23. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/test/test_base_api.py +0 -0
  24. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/test/test_geminimask.py +0 -0
  25. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/test/test_image.py +0 -0
  26. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/test/test_payload.py +0 -0
  27. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/core/utils.py +0 -0
  28. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/__init__.py +0 -0
  29. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/audio.py +0 -0
  30. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/base.py +0 -0
  31. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/chatgpt.py +0 -0
  32. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/duckduckgo.py +0 -0
  33. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/gemini.py +0 -0
  34. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/models/vertex.py +0 -0
  35. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/__init__.py +0 -0
  36. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/arXiv.py +0 -0
  37. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/excute_command.py +0 -0
  38. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/get_time.py +0 -0
  39. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/image.py +0 -0
  40. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/list_directory.py +0 -0
  41. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/read_file.py +0 -0
  42. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/registry.py +0 -0
  43. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/run_python.py +0 -0
  44. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/websearch.py +0 -0
  45. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/plugins/write_file.py +0 -0
  46. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/prompt/__init__.py +0 -0
  47. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/prompt/agent.py +0 -0
  48. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/utils/__init__.py +0 -0
  49. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/src/aient/utils/prompt.py +0 -0
  50. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/chatgpt.py +0 -0
  51. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/claude.py +0 -0
  52. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test.py +0 -0
  53. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_API.py +0 -0
  54. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_Deepbricks.py +0 -0
  55. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_Web_crawler.py +0 -0
  56. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_aiwaves.py +0 -0
  57. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_aiwaves_arxiv.py +0 -0
  58. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_ask_gemini.py +0 -0
  59. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_class.py +0 -0
  60. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_claude.py +0 -0
  61. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_claude_zh_char.py +0 -0
  62. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_ddg_search.py +0 -0
  63. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_download_pdf.py +0 -0
  64. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_gemini.py +0 -0
  65. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_get_token_dict.py +0 -0
  66. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_google_search.py +0 -0
  67. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_jieba.py +0 -0
  68. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_json.py +0 -0
  69. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_logging.py +0 -0
  70. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_ollama.py +0 -0
  71. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_plugin.py +0 -0
  72. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_py_run.py +0 -0
  73. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_requests.py +0 -0
  74. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_search.py +0 -0
  75. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_tikitoken.py +0 -0
  76. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_token.py +0 -0
  77. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_url.py +0 -0
  78. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_whisper.py +0 -0
  79. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_wildcard.py +0 -0
  80. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/aient/test/test_yjh.py +0 -0
  81. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/README.md +0 -0
  82. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/arduino-tags.scm +0 -0
  83. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/c-tags.scm +0 -0
  84. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/chatito-tags.scm +0 -0
  85. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/commonlisp-tags.scm +0 -0
  86. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/cpp-tags.scm +0 -0
  87. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/csharp-tags.scm +0 -0
  88. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/d-tags.scm +0 -0
  89. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/dart-tags.scm +0 -0
  90. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/elisp-tags.scm +0 -0
  91. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/elixir-tags.scm +0 -0
  92. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/elm-tags.scm +0 -0
  93. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/gleam-tags.scm +0 -0
  94. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/go-tags.scm +0 -0
  95. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/java-tags.scm +0 -0
  96. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/javascript-tags.scm +0 -0
  97. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/lua-tags.scm +0 -0
  98. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/pony-tags.scm +0 -0
  99. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/properties-tags.scm +0 -0
  100. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/python-tags.scm +0 -0
  101. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/r-tags.scm +0 -0
  102. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/racket-tags.scm +0 -0
  103. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/ruby-tags.scm +0 -0
  104. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/rust-tags.scm +0 -0
  105. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/solidity-tags.scm +0 -0
  106. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/swift-tags.scm +0 -0
  107. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-language-pack/udev-tags.scm +0 -0
  108. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/README.md +0 -0
  109. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/c-tags.scm +0 -0
  110. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/c_sharp-tags.scm +0 -0
  111. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/cpp-tags.scm +0 -0
  112. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/dart-tags.scm +0 -0
  113. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/elisp-tags.scm +0 -0
  114. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/elixir-tags.scm +0 -0
  115. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/elm-tags.scm +0 -0
  116. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/go-tags.scm +0 -0
  117. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/hcl-tags.scm +0 -0
  118. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/java-tags.scm +0 -0
  119. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/javascript-tags.scm +0 -0
  120. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/kotlin-tags.scm +0 -0
  121. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/ocaml-tags.scm +0 -0
  122. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/php-tags.scm +0 -0
  123. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/python-tags.scm +0 -0
  124. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/ql-tags.scm +0 -0
  125. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/ruby-tags.scm +0 -0
  126. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/rust-tags.scm +0 -0
  127. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/scala-tags.scm +0 -0
  128. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/queries/tree-sitter-languages/typescript-tags.scm +0 -0
  129. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/__init__.py +0 -0
  130. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/click.py +0 -0
  131. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/edit_file.py +0 -0
  132. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/planner.py +0 -0
  133. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/repomap.py +0 -0
  134. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/search_arxiv.py +0 -0
  135. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/think.py +0 -0
  136. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/tools/worker.py +0 -0
  137. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm/utils.py +0 -0
  138. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm.egg-info/dependency_links.txt +0 -0
  139. {beswarm-0.1.36 → beswarm-0.1.38}/beswarm.egg-info/top_level.txt +0 -0
  140. {beswarm-0.1.36 → beswarm-0.1.38}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -27,7 +27,6 @@ Requires-Dist: pyperclip>=1.9.0
27
27
  Requires-Dist: pytz>=2025.2
28
28
  Requires-Dist: requests>=2.32.3
29
29
  Requires-Dist: scipy>=1.15.2
30
- Requires-Dist: tiktoken==0.6.0
31
30
  Requires-Dist: tqdm>=4.67.1
32
31
 
33
32
  # beswarm
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.0.93",
7
+ version="1.0.94",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -2,7 +2,6 @@ import os
2
2
  import re
3
3
  import json
4
4
  import copy
5
- import tiktoken
6
5
  import requests
7
6
 
8
7
  from .base import BaseLLM
@@ -65,39 +64,6 @@ class claude(BaseLLM):
65
64
  self.conversation[convo_id] = claudeConversation()
66
65
  self.system_prompt = system_prompt or self.system_prompt
67
66
 
68
- def __truncate_conversation(self, convo_id: str = "default") -> None:
69
- """
70
- Truncate the conversation
71
- """
72
- while True:
73
- if (
74
- self.get_token_count(convo_id) > self.truncate_limit
75
- and len(self.conversation[convo_id]) > 1
76
- ):
77
- # Don't remove the first message
78
- self.conversation[convo_id].pop(1)
79
- else:
80
- break
81
-
82
- def get_token_count(self, convo_id: str = "default") -> int:
83
- """
84
- Get token count
85
- """
86
- tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
87
- encoding = tiktoken.encoding_for_model(self.engine)
88
-
89
- num_tokens = 0
90
- for message in self.conversation[convo_id]:
91
- # every message follows <im_start>{role/name}\n{content}<im_end>\n
92
- num_tokens += 5
93
- for key, value in message.items():
94
- if value:
95
- num_tokens += len(encoding.encode(value))
96
- if key == "name": # if there's a name, the role is omitted
97
- num_tokens += 5 # role is always required and always 1 token
98
- num_tokens += 5 # every reply is primed with <im_start>assistant
99
- return num_tokens
100
-
101
67
  def ask_stream(
102
68
  self,
103
69
  prompt: str,
@@ -267,39 +233,6 @@ class claude3(BaseLLM):
267
233
  self.conversation[convo_id] = list()
268
234
  self.system_prompt = system_prompt or self.system_prompt
269
235
 
270
- def __truncate_conversation(self, convo_id: str = "default") -> None:
271
- """
272
- Truncate the conversation
273
- """
274
- while True:
275
- if (
276
- self.get_token_count(convo_id) > self.truncate_limit
277
- and len(self.conversation[convo_id]) > 1
278
- ):
279
- # Don't remove the first message
280
- self.conversation[convo_id].pop(1)
281
- else:
282
- break
283
-
284
- def get_token_count(self, convo_id: str = "default") -> int:
285
- """
286
- Get token count
287
- """
288
- tiktoken.model.MODEL_TO_ENCODING["claude-2.1"] = "cl100k_base"
289
- encoding = tiktoken.encoding_for_model(self.engine)
290
-
291
- num_tokens = 0
292
- for message in self.conversation[convo_id]:
293
- # every message follows <im_start>{role/name}\n{content}<im_end>\n
294
- num_tokens += 5
295
- for key, value in message.items():
296
- if value:
297
- num_tokens += len(encoding.encode(value))
298
- if key == "name": # if there's a name, the role is omitted
299
- num_tokens += 5 # role is always required and always 1 token
300
- num_tokens += 5 # every reply is primed with <im_start>assistant
301
- return num_tokens
302
-
303
236
  def ask_stream(
304
237
  self,
305
238
  prompt: str,
@@ -1,7 +1,6 @@
1
1
  import os
2
2
  import json
3
3
  import requests
4
- import tiktoken
5
4
 
6
5
  from .base import BaseLLM
7
6
 
@@ -52,39 +51,6 @@ class groq(BaseLLM):
52
51
  self.conversation[convo_id] = list()
53
52
  self.system_prompt = system_prompt or self.system_prompt
54
53
 
55
- def __truncate_conversation(self, convo_id: str = "default") -> None:
56
- """
57
- Truncate the conversation
58
- """
59
- while True:
60
- if (
61
- self.get_token_count(convo_id) > self.truncate_limit
62
- and len(self.conversation[convo_id]) > 1
63
- ):
64
- # Don't remove the first message
65
- self.conversation[convo_id].pop(1)
66
- else:
67
- break
68
-
69
- def get_token_count(self, convo_id: str = "default") -> int:
70
- """
71
- Get token count
72
- """
73
- # tiktoken.model.MODEL_TO_ENCODING["mixtral-8x7b-32768"] = "cl100k_base"
74
- encoding = tiktoken.get_encoding("cl100k_base")
75
-
76
- num_tokens = 0
77
- for message in self.conversation[convo_id]:
78
- # every message follows <im_start>{role/name}\n{content}<im_end>\n
79
- num_tokens += 5
80
- for key, value in message.items():
81
- if value:
82
- num_tokens += len(encoding.encode(value))
83
- if key == "name": # if there's a name, the role is omitted
84
- num_tokens += 5 # role is always required and always 1 token
85
- num_tokens += 5 # every reply is primed with <im_start>assistant
86
- return num_tokens
87
-
88
54
  def ask_stream(
89
55
  self,
90
56
  prompt: str,
@@ -3,8 +3,7 @@ import json
3
3
  import inspect
4
4
 
5
5
  from .registry import registry
6
- from ..utils.scripts import cut_message
7
- from ..utils.prompt import search_key_word_prompt, arxiv_doc_user_prompt
6
+ from ..utils.prompt import search_key_word_prompt
8
7
 
9
8
  async def get_tools_result_async(function_call_name, function_full_response, function_call_max_tokens, engine, robot, api_key, api_url, use_plugins, model, add_message, convo_id, language):
10
9
  function_response = ""
@@ -26,10 +25,7 @@ async def get_tools_result_async(function_call_name, function_full_response, fun
26
25
  yield chunk
27
26
  else:
28
27
  function_response = "\n\n".join(chunk)
29
- # function_response = yield chunk
30
- # function_response = yield from eval(function_call_name)(prompt, keywords)
31
- function_call_max_tokens = 32000
32
- function_response, text_len = cut_message(function_response, function_call_max_tokens, engine)
28
+
33
29
  if function_response:
34
30
  function_response = (
35
31
  f"You need to response the following question: {prompt}. Search results is provided inside <Search_results></Search_results> XML tags. Your task is to think about the question step by step and then answer the above question in {language} based on the Search results provided. Please response in {language} and adopt a style that is logical, in-depth, and detailed. Note: In order to make the answer appear highly professional, you should be an expert in textual analysis, aiming to make the answer precise and comprehensive. Directly response markdown format, without using markdown code blocks. For each sentence quoting search results, a markdown ordered superscript number url link must be used to indicate the source, e.g., [¹](https://www.example.com)"
@@ -40,18 +36,13 @@ async def get_tools_result_async(function_call_name, function_full_response, fun
40
36
  ).format(function_response)
41
37
  else:
42
38
  function_response = "无法找到相关信息,停止使用 tools"
43
- # user_prompt = f"You need to response the following question: {prompt}. Search results is provided inside <Search_results></Search_results> XML tags. Your task is to think about the question step by step and then answer the above question in {config.language} based on the Search results provided. Please response in {config.language} and adopt a style that is logical, in-depth, and detailed. Note: In order to make the answer appear highly professional, you should be an expert in textual analysis, aiming to make the answer precise and comprehensive. Directly response markdown format, without using markdown code blocks"
44
- # self.add_to_conversation(user_prompt, "user", convo_id=convo_id)
39
+
45
40
  elif function_to_call:
46
41
  prompt = json.loads(function_full_response)
47
42
  if inspect.iscoroutinefunction(function_to_call):
48
43
  function_response = await function_to_call(**prompt)
49
44
  else:
50
45
  function_response = function_to_call(**prompt)
51
- function_response, text_len = cut_message(function_response, function_call_max_tokens, engine)
52
-
53
- # if function_call_name == "download_read_arxiv_pdf":
54
- # add_message(arxiv_doc_user_prompt, "user", convo_id=convo_id)
55
46
 
56
47
  function_response = (
57
48
  f"function_response:{function_response}"
@@ -1,33 +1,10 @@
1
1
  import os
2
2
  import json
3
- import base64
4
- import tiktoken
5
3
  import requests
6
4
  import urllib.parse
7
5
 
8
6
  from ..core.utils import get_image_message
9
7
 
10
- def get_encode_text(text, model_name):
11
- tiktoken.get_encoding("cl100k_base")
12
- model_name = "gpt-3.5-turbo"
13
- encoding = tiktoken.encoding_for_model(model_name)
14
- encode_text = encoding.encode(text, disallowed_special=())
15
- return encoding, encode_text
16
-
17
- def get_text_token_len(text, model_name):
18
- encoding, encode_text = get_encode_text(text, model_name)
19
- return len(encode_text)
20
-
21
- def cut_message(message: str, max_tokens: int, model_name: str):
22
- if type(message) != str:
23
- message = str(message)
24
- encoding, encode_text = get_encode_text(message, model_name)
25
- if len(encode_text) > max_tokens:
26
- encode_text = encode_text[:max_tokens]
27
- message = encoding.decode(encode_text)
28
- encode_text = encoding.encode(message, disallowed_special=())
29
- return message, len(encode_text)
30
-
31
8
  def get_doc_from_url(url):
32
9
  filename = urllib.parse.unquote(url.split("/")[-1])
33
10
  response = requests.get(url, stream=True)
@@ -3,7 +3,6 @@ import io
3
3
  import copy
4
4
  import base64
5
5
  import platform
6
- import pyautogui
7
6
  from datetime import datetime
8
7
  from ..aient.src.aient.plugins import register_tool, get_function_call_list
9
8
 
@@ -16,6 +15,7 @@ from ..utils import extract_xml_content
16
15
  async def get_current_screen_image_message(prompt):
17
16
  print("instruction agent 正在截取当前屏幕...")
18
17
  try:
18
+ import pyautogui
19
19
  # 使用 pyautogui 截取屏幕,返回 PIL Image 对象
20
20
  screenshot = pyautogui.screenshot()
21
21
  # img_width, img_height = screenshot.size # 获取截图尺寸
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.1.36
3
+ Version: 0.1.38
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -27,7 +27,6 @@ Requires-Dist: pyperclip>=1.9.0
27
27
  Requires-Dist: pytz>=2025.2
28
28
  Requires-Dist: requests>=2.32.3
29
29
  Requires-Dist: scipy>=1.15.2
30
- Requires-Dist: tiktoken==0.6.0
31
30
  Requires-Dist: tqdm>=4.67.1
32
31
 
33
32
  # beswarm
@@ -66,7 +66,6 @@ beswarm/aient/test/test_get_token_dict.py
66
66
  beswarm/aient/test/test_google_search.py
67
67
  beswarm/aient/test/test_jieba.py
68
68
  beswarm/aient/test/test_json.py
69
- beswarm/aient/test/test_langchain_search_old.py
70
69
  beswarm/aient/test/test_logging.py
71
70
  beswarm/aient/test/test_ollama.py
72
71
  beswarm/aient/test/test_plugin.py
@@ -21,5 +21,4 @@ pyperclip>=1.9.0
21
21
  pytz>=2025.2
22
22
  requests>=2.32.3
23
23
  scipy>=1.15.2
24
- tiktoken==0.6.0
25
24
  tqdm>=4.67.1
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "beswarm"
3
- version = "0.1.36"
3
+ version = "0.1.38"
4
4
  description = "MAS"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
@@ -28,7 +28,6 @@ dependencies = [
28
28
  "pytz>=2025.2",
29
29
  "requests>=2.32.3",
30
30
  "scipy>=1.15.2",
31
- "tiktoken==0.6.0",
32
31
  "tqdm>=4.67.1",
33
32
  ]
34
33
 
@@ -1,235 +0,0 @@
1
- import os
2
- import re
3
-
4
- import sys
5
- sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
6
- import config
7
-
8
- from langchain.chat_models import ChatOpenAI
9
-
10
-
11
- from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain
12
-
13
- from langchain.prompts.chat import (
14
- ChatPromptTemplate,
15
- SystemMessagePromptTemplate,
16
- HumanMessagePromptTemplate,
17
- )
18
- from langchain.embeddings.openai import OpenAIEmbeddings
19
- from langchain.vectorstores import Chroma
20
- from langchain.text_splitter import CharacterTextSplitter
21
-
22
- from langchain.document_loaders import UnstructuredPDFLoader
23
-
24
- def getmd5(string):
25
- import hashlib
26
- md5_hash = hashlib.md5()
27
- md5_hash.update(string.encode('utf-8'))
28
- md5_hex = md5_hash.hexdigest()
29
- return md5_hex
30
-
31
- from utils.sitemap import SitemapLoader
32
- async def get_doc_from_sitemap(url):
33
- # https://www.langchain.asia/modules/indexes/document_loaders/examples/sitemap#%E8%BF%87%E6%BB%A4%E7%AB%99%E7%82%B9%E5%9C%B0%E5%9B%BE-url-
34
- sitemap_loader = SitemapLoader(web_path=url)
35
- docs = await sitemap_loader.load()
36
- return docs
37
-
38
- async def get_doc_from_local(docpath, doctype="md"):
39
- from langchain.document_loaders import DirectoryLoader
40
- # 加载文件夹中的所有txt类型的文件
41
- loader = DirectoryLoader(docpath, glob='**/*.' + doctype)
42
- # 将数据转成 document 对象,每个文件会作为一个 document
43
- documents = loader.load()
44
- return documents
45
-
46
- system_template="""Use the following pieces of context to answer the users question.
47
- If you don't know the answer, just say "Hmm..., I'm not sure.", don't try to make up an answer.
48
- ALWAYS return a "Sources" part in your answer.
49
- The "Sources" part should be a reference to the source of the document from which you got your answer.
50
-
51
- Example of your response should be:
52
-
53
- ```
54
- The answer is foo
55
-
56
- Sources:
57
- 1. abc
58
- 2. xyz
59
- ```
60
- Begin!
61
- ----------------
62
- {summaries}
63
- """
64
- messages = [
65
- SystemMessagePromptTemplate.from_template(system_template),
66
- HumanMessagePromptTemplate.from_template("{question}")
67
- ]
68
- prompt = ChatPromptTemplate.from_messages(messages)
69
-
70
- def get_chain(store, llm):
71
- chain_type_kwargs = {"prompt": prompt}
72
- chain = RetrievalQAWithSourcesChain.from_chain_type(
73
- llm,
74
- chain_type="stuff",
75
- retriever=store.as_retriever(),
76
- chain_type_kwargs=chain_type_kwargs,
77
- reduce_k_below_max_tokens=True
78
- )
79
- return chain
80
-
81
- async def docQA(docpath, query_message, persist_db_path="db", model = "gpt-3.5-turbo"):
82
- chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=config.API)
83
- embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=config.API)
84
-
85
- sitemap = "sitemap.xml"
86
- match = re.match(r'^(https?|ftp)://[^\s/$.?#].[^\s]*$', docpath)
87
- if match:
88
- doc_method = get_doc_from_sitemap
89
- docpath = os.path.join(docpath, sitemap)
90
- else:
91
- doc_method = get_doc_from_local
92
-
93
- persist_db_path = getmd5(docpath)
94
- if not os.path.exists(persist_db_path):
95
- documents = await doc_method(docpath)
96
- # 初始化加载器
97
- text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=50)
98
- # 持久化数据
99
- split_docs = text_splitter.split_documents(documents)
100
- vector_store = Chroma.from_documents(split_docs, embeddings, persist_directory=persist_db_path)
101
- vector_store.persist()
102
- else:
103
- # 加载数据
104
- vector_store = Chroma(persist_directory=persist_db_path, embedding_function=embeddings)
105
-
106
- # 创建问答对象
107
- qa = get_chain(vector_store, chatllm)
108
- # qa = RetrievalQA.from_chain_type(llm=chatllm, chain_type="stuff", retriever=vector_store.as_retriever(), return_source_documents=True)
109
- # 进行问答
110
- result = qa({"question": query_message})
111
- return result
112
-
113
-
114
- def persist_emdedding_pdf(docurl, persist_db_path):
115
- embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
116
- filename = get_doc_from_url(docurl)
117
- docpath = os.getcwd() + "/" + filename
118
- loader = UnstructuredPDFLoader(docpath)
119
- documents = loader.load()
120
- # 初始化加载器
121
- text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=25)
122
- # 切割加载的 document
123
- split_docs = text_splitter.split_documents(documents)
124
- vector_store = Chroma.from_documents(split_docs, embeddings, persist_directory=persist_db_path)
125
- vector_store.persist()
126
- os.remove(docpath)
127
- return vector_store
128
-
129
- async def pdfQA(docurl, docpath, query_message, model="gpt-3.5-turbo"):
130
- chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
131
- embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
132
- persist_db_path = getmd5(docpath)
133
- if not os.path.exists(persist_db_path):
134
- vector_store = persist_emdedding_pdf(docurl, persist_db_path)
135
- else:
136
- vector_store = Chroma(persist_directory=persist_db_path, embedding_function=embeddings)
137
- qa = RetrievalQA.from_chain_type(llm=chatllm, chain_type="stuff", retriever=vector_store.as_retriever(), return_source_documents=True)
138
- result = qa({"query": query_message})
139
- return result['result']
140
-
141
-
142
- def pdf_search(docurl, query_message, model="gpt-3.5-turbo"):
143
- chatllm = ChatOpenAI(temperature=0.5, openai_api_base=config.bot_api_url.v1_url, model_name=model, openai_api_key=os.environ.get('API', None))
144
- embeddings = OpenAIEmbeddings(openai_api_base=config.bot_api_url.v1_url, openai_api_key=os.environ.get('API', None))
145
- filename = get_doc_from_url(docurl)
146
- docpath = os.getcwd() + "/" + filename
147
- loader = UnstructuredPDFLoader(docpath)
148
- try:
149
- documents = loader.load()
150
- except:
151
- print("pdf load error! docpath:", docpath)
152
- return ""
153
- os.remove(docpath)
154
- # 初始化加载器
155
- text_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=25)
156
- # 切割加载的 document
157
- split_docs = text_splitter.split_documents(documents)
158
- vector_store = Chroma.from_documents(split_docs, embeddings)
159
- # 创建问答对象
160
- qa = RetrievalQA.from_chain_type(llm=chatllm, chain_type="stuff", retriever=vector_store.as_retriever(),return_source_documents=True)
161
- # 进行问答
162
- result = qa({"query": query_message})
163
- return result['result']
164
-
165
- def summary_each_url(threads, chainllm, prompt):
166
- summary_prompt = PromptTemplate(
167
- input_variables=["web_summary", "question", "language"],
168
- template=(
169
- "You need to response the following question: {question}."
170
- "Your task is answer the above question in {language} based on the Search results provided. Provide a detailed and in-depth response"
171
- "If there is no relevant content in the search results, just answer None, do not make any explanations."
172
- "Search results: {web_summary}."
173
- ),
174
- )
175
- summary_threads = []
176
-
177
- for t in threads:
178
- tmp = t.join()
179
- print(tmp)
180
- chain = LLMChain(llm=chainllm, prompt=summary_prompt)
181
- chain_thread = ThreadWithReturnValue(target=chain.run, args=({"web_summary": tmp, "question": prompt, "language": config.LANGUAGE},))
182
- chain_thread.start()
183
- summary_threads.append(chain_thread)
184
-
185
- url_result = ""
186
- for t in summary_threads:
187
- tmp = t.join()
188
- print("summary", tmp)
189
- if tmp != "None":
190
- url_result += "\n\n" + tmp
191
- return url_result
192
-
193
- def get_search_results(prompt: str, context_max_tokens: int):
194
-
195
- url_text_list = get_url_text_list(prompt)
196
- useful_source_text = "\n\n".join(url_text_list)
197
- # useful_source_text = summary_each_url(threads, chainllm, prompt)
198
-
199
- useful_source_text, search_tokens_len = cut_message(useful_source_text, context_max_tokens)
200
- print("search tokens len", search_tokens_len, "\n\n")
201
-
202
- return useful_source_text
203
-
204
- from typing import Any
205
- from langchain.schema.output import LLMResult
206
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
207
- class ChainStreamHandler(StreamingStdOutCallbackHandler):
208
- def __init__(self):
209
- self.tokens = []
210
- # 记得结束后这里置true
211
- self.finish = False
212
- self.answer = ""
213
-
214
- def on_llm_new_token(self, token: str, **kwargs):
215
- # print(token)
216
- self.tokens.append(token)
217
- # yield ''.join(self.tokens)
218
- # print(''.join(self.tokens))
219
-
220
- def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
221
- self.finish = 1
222
-
223
- def on_llm_error(self, error: Exception, **kwargs: Any) -> None:
224
- print(str(error))
225
- self.tokens.append(str(error))
226
-
227
- def generate_tokens(self):
228
- while not self.finish or self.tokens:
229
- if self.tokens:
230
- data = self.tokens.pop(0)
231
- self.answer += data
232
- yield data
233
- else:
234
- pass
235
- return self.answer
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes