beswarm 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. beswarm/aient/main.py +50 -0
  2. beswarm/aient/setup.py +15 -0
  3. beswarm/aient/src/aient/__init__.py +1 -0
  4. beswarm/aient/src/aient/core/__init__.py +1 -0
  5. beswarm/aient/src/aient/core/log_config.py +6 -0
  6. beswarm/aient/src/aient/core/models.py +232 -0
  7. beswarm/aient/src/aient/core/request.py +1665 -0
  8. beswarm/aient/src/aient/core/response.py +617 -0
  9. beswarm/aient/src/aient/core/test/test_base_api.py +18 -0
  10. beswarm/aient/src/aient/core/test/test_image.py +15 -0
  11. beswarm/aient/src/aient/core/test/test_payload.py +92 -0
  12. beswarm/aient/src/aient/core/utils.py +715 -0
  13. beswarm/aient/src/aient/models/__init__.py +9 -0
  14. beswarm/aient/src/aient/models/audio.py +63 -0
  15. beswarm/aient/src/aient/models/base.py +251 -0
  16. beswarm/aient/src/aient/models/chatgpt.py +938 -0
  17. beswarm/aient/src/aient/models/claude.py +640 -0
  18. beswarm/aient/src/aient/models/duckduckgo.py +241 -0
  19. beswarm/aient/src/aient/models/gemini.py +357 -0
  20. beswarm/aient/src/aient/models/groq.py +268 -0
  21. beswarm/aient/src/aient/models/vertex.py +420 -0
  22. beswarm/aient/src/aient/plugins/__init__.py +33 -0
  23. beswarm/aient/src/aient/plugins/arXiv.py +48 -0
  24. beswarm/aient/src/aient/plugins/config.py +172 -0
  25. beswarm/aient/src/aient/plugins/excute_command.py +35 -0
  26. beswarm/aient/src/aient/plugins/get_time.py +19 -0
  27. beswarm/aient/src/aient/plugins/image.py +72 -0
  28. beswarm/aient/src/aient/plugins/list_directory.py +50 -0
  29. beswarm/aient/src/aient/plugins/read_file.py +79 -0
  30. beswarm/aient/src/aient/plugins/registry.py +116 -0
  31. beswarm/aient/src/aient/plugins/run_python.py +156 -0
  32. beswarm/aient/src/aient/plugins/websearch.py +394 -0
  33. beswarm/aient/src/aient/plugins/write_file.py +51 -0
  34. beswarm/aient/src/aient/prompt/__init__.py +1 -0
  35. beswarm/aient/src/aient/prompt/agent.py +280 -0
  36. beswarm/aient/src/aient/utils/__init__.py +0 -0
  37. beswarm/aient/src/aient/utils/prompt.py +143 -0
  38. beswarm/aient/src/aient/utils/scripts.py +721 -0
  39. beswarm/aient/test/chatgpt.py +161 -0
  40. beswarm/aient/test/claude.py +32 -0
  41. beswarm/aient/test/test.py +2 -0
  42. beswarm/aient/test/test_API.py +6 -0
  43. beswarm/aient/test/test_Deepbricks.py +20 -0
  44. beswarm/aient/test/test_Web_crawler.py +262 -0
  45. beswarm/aient/test/test_aiwaves.py +25 -0
  46. beswarm/aient/test/test_aiwaves_arxiv.py +19 -0
  47. beswarm/aient/test/test_ask_gemini.py +8 -0
  48. beswarm/aient/test/test_class.py +17 -0
  49. beswarm/aient/test/test_claude.py +23 -0
  50. beswarm/aient/test/test_claude_zh_char.py +26 -0
  51. beswarm/aient/test/test_ddg_search.py +50 -0
  52. beswarm/aient/test/test_download_pdf.py +56 -0
  53. beswarm/aient/test/test_gemini.py +97 -0
  54. beswarm/aient/test/test_get_token_dict.py +21 -0
  55. beswarm/aient/test/test_google_search.py +35 -0
  56. beswarm/aient/test/test_jieba.py +32 -0
  57. beswarm/aient/test/test_json.py +65 -0
  58. beswarm/aient/test/test_langchain_search_old.py +235 -0
  59. beswarm/aient/test/test_logging.py +32 -0
  60. beswarm/aient/test/test_ollama.py +55 -0
  61. beswarm/aient/test/test_plugin.py +16 -0
  62. beswarm/aient/test/test_py_run.py +26 -0
  63. beswarm/aient/test/test_requests.py +162 -0
  64. beswarm/aient/test/test_search.py +18 -0
  65. beswarm/aient/test/test_tikitoken.py +19 -0
  66. beswarm/aient/test/test_token.py +94 -0
  67. beswarm/aient/test/test_url.py +33 -0
  68. beswarm/aient/test/test_whisper.py +14 -0
  69. beswarm/aient/test/test_wildcard.py +20 -0
  70. beswarm/aient/test/test_yjh.py +21 -0
  71. {beswarm-0.1.12.dist-info → beswarm-0.1.13.dist-info}/METADATA +1 -1
  72. beswarm-0.1.13.dist-info/RECORD +131 -0
  73. beswarm-0.1.12.dist-info/RECORD +0 -61
  74. {beswarm-0.1.12.dist-info → beswarm-0.1.13.dist-info}/WHEEL +0 -0
  75. {beswarm-0.1.12.dist-info → beswarm-0.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,9 @@
1
+ from .chatgpt import *
2
+ from .claude import *
3
+ from .gemini import *
4
+ from .vertex import *
5
+ from .groq import *
6
+ from .audio import *
7
+ from .duckduckgo import *
8
+
9
+ # __all__ = ["chatgpt", "claude", "claude3", "gemini", "groq"]
@@ -0,0 +1,63 @@
1
+ import os
2
+ import requests
3
+ import json
4
+ from .base import BaseLLM
5
+
6
+ API = os.environ.get('API', None)
7
+ API_URL = os.environ.get('API_URL', None)
8
+
9
+ class whisper(BaseLLM):
10
+ def __init__(
11
+ self,
12
+ api_key: str,
13
+ api_url: str = (os.environ.get("API_URL") or "https://api.openai.com/v1/audio/transcriptions"),
14
+ timeout: float = 20,
15
+ ):
16
+ super().__init__(api_key, api_url=api_url, timeout=timeout)
17
+ self.engine: str = "whisper-1"
18
+
19
+ def generate(
20
+ self,
21
+ audio_file: bytes,
22
+ model: str = "whisper-1",
23
+ **kwargs,
24
+ ):
25
+ url = self.api_url.audio_transcriptions
26
+ headers = {"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"}
27
+
28
+ files = {
29
+ "file": ("audio.mp3", audio_file, "audio/mpeg")
30
+ }
31
+
32
+ data = {
33
+ "model": os.environ.get("AUDIO_MODEL_NAME") or model or self.engine,
34
+ }
35
+ try:
36
+ response = self.session.post(
37
+ url,
38
+ headers=headers,
39
+ data=data,
40
+ files=files,
41
+ timeout=kwargs.get("timeout", self.timeout),
42
+ stream=True,
43
+ )
44
+ except ConnectionError:
45
+ print("连接错误,请检查服务器状态或网络连接。")
46
+ return
47
+ except requests.exceptions.ReadTimeout:
48
+ print("请求超时,请检查网络连接或增加超时时间。{e}")
49
+ return
50
+ except Exception as e:
51
+ print(f"发生了未预料的错误: {e}")
52
+ return
53
+
54
+ if response.status_code != 200:
55
+ raise Exception(f"{response.status_code} {response.reason} {response.text}")
56
+ json_data = json.loads(response.text)
57
+ text = json_data["text"]
58
+ return text
59
+
60
+ def audio_transcriptions(text):
61
+ dallbot = whisper(api_key=f"{API}")
62
+ for data in dallbot.generate(text):
63
+ return data
@@ -0,0 +1,251 @@
1
+ import os
2
+ import httpx
3
+ import requests
4
+ from pathlib import Path
5
+ from collections import defaultdict
6
+
7
+ from ..utils import prompt
8
+ from ..core.utils import BaseAPI
9
+
10
+ class BaseLLM:
11
+ def __init__(
12
+ self,
13
+ api_key: str = None,
14
+ engine: str = os.environ.get("GPT_ENGINE") or "gpt-3.5-turbo",
15
+ api_url: str = (os.environ.get("API_URL", None) or "https://api.openai.com/v1/chat/completions"),
16
+ system_prompt: str = prompt.chatgpt_system_prompt,
17
+ proxy: str = None,
18
+ timeout: float = 600,
19
+ max_tokens: int = None,
20
+ temperature: float = 0.5,
21
+ top_p: float = 1.0,
22
+ presence_penalty: float = 0.0,
23
+ frequency_penalty: float = 0.0,
24
+ reply_count: int = 1,
25
+ truncate_limit: int = None,
26
+ use_plugins: bool = True,
27
+ print_log: bool = False,
28
+ ) -> None:
29
+ self.api_key: str = api_key
30
+ self.engine: str = engine
31
+ self.api_url: str = BaseAPI(api_url or "https://api.openai.com/v1/chat/completions")
32
+ self.system_prompt: str = system_prompt
33
+ self.max_tokens: int = max_tokens
34
+ self.truncate_limit: int = truncate_limit
35
+ self.temperature: float = temperature
36
+ self.top_p: float = top_p
37
+ self.presence_penalty: float = presence_penalty
38
+ self.frequency_penalty: float = frequency_penalty
39
+ self.reply_count: int = reply_count
40
+ self.truncate_limit: int = truncate_limit or (
41
+ 198000
42
+ if "claude" in engine
43
+ else 1000000
44
+ if "gemini" in engine or "quasar-alpha" in engine
45
+ else 127500
46
+ )
47
+ self.timeout: float = timeout
48
+ self.proxy = proxy
49
+ self.session = requests.Session()
50
+ self.session.proxies.update(
51
+ {
52
+ "http": proxy,
53
+ "https": proxy,
54
+ },
55
+ )
56
+ if proxy := (
57
+ proxy or os.environ.get("all_proxy") or os.environ.get("ALL_PROXY") or None
58
+ ):
59
+ if "socks5h" not in proxy:
60
+ self.aclient = httpx.AsyncClient(
61
+ follow_redirects=True,
62
+ proxies=proxy,
63
+ timeout=timeout,
64
+ )
65
+ else:
66
+ self.aclient = httpx.AsyncClient(
67
+ follow_redirects=True,
68
+ timeout=timeout,
69
+ )
70
+
71
+ self.conversation: dict[str, list[dict]] = {
72
+ "default": [
73
+ {
74
+ "role": "system",
75
+ "content": system_prompt,
76
+ },
77
+ ],
78
+ }
79
+ self.tokens_usage = defaultdict(int)
80
+ self.current_tokens = defaultdict(int)
81
+ self.function_calls_counter = {}
82
+ self.function_call_max_loop = 10
83
+ self.use_plugins = use_plugins
84
+ self.print_log: bool = print_log
85
+
86
+ def add_to_conversation(
87
+ self,
88
+ message: list,
89
+ role: str,
90
+ convo_id: str = "default",
91
+ function_name: str = "",
92
+ ) -> None:
93
+ """
94
+ Add a message to the conversation
95
+ """
96
+ pass
97
+
98
+ def __truncate_conversation(self, convo_id: str = "default") -> None:
99
+ """
100
+ Truncate the conversation
101
+ """
102
+ pass
103
+
104
+ def truncate_conversation(
105
+ self,
106
+ prompt: str,
107
+ role: str = "user",
108
+ convo_id: str = "default",
109
+ model: str = "",
110
+ pass_history: int = 9999,
111
+ **kwargs,
112
+ ) -> None:
113
+ """
114
+ Truncate the conversation
115
+ """
116
+ pass
117
+
118
+ def extract_values(self, obj):
119
+ pass
120
+
121
+ def get_token_count(self, convo_id: str = "default") -> int:
122
+ """
123
+ Get token count
124
+ """
125
+ pass
126
+
127
+ def get_message_token(self, url, json_post):
128
+ pass
129
+
130
+ def get_post_body(
131
+ self,
132
+ prompt: str,
133
+ role: str = "user",
134
+ convo_id: str = "default",
135
+ model: str = "",
136
+ pass_history: int = 9999,
137
+ **kwargs,
138
+ ):
139
+ pass
140
+
141
+ def get_max_tokens(self, convo_id: str) -> int:
142
+ """
143
+ Get max tokens
144
+ """
145
+ pass
146
+
147
+ def ask_stream(
148
+ self,
149
+ prompt: list,
150
+ role: str = "user",
151
+ convo_id: str = "default",
152
+ model: str = "",
153
+ pass_history: int = 9999,
154
+ function_name: str = "",
155
+ **kwargs,
156
+ ):
157
+ """
158
+ Ask a question
159
+ """
160
+ pass
161
+
162
+ async def ask_stream_async(
163
+ self,
164
+ prompt: list,
165
+ role: str = "user",
166
+ convo_id: str = "default",
167
+ model: str = "",
168
+ pass_history: int = 9999,
169
+ function_name: str = "",
170
+ **kwargs,
171
+ ):
172
+ """
173
+ Ask a question
174
+ """
175
+ pass
176
+
177
+ async def ask_async(
178
+ self,
179
+ prompt: str,
180
+ role: str = "user",
181
+ convo_id: str = "default",
182
+ model: str = "",
183
+ pass_history: int = 9999,
184
+ **kwargs,
185
+ ) -> str:
186
+ """
187
+ Non-streaming ask
188
+ """
189
+ response = ""
190
+ async for chunk in self.ask_stream_async(
191
+ prompt=prompt,
192
+ role=role,
193
+ convo_id=convo_id,
194
+ model=model or self.engine,
195
+ pass_history=pass_history,
196
+ **kwargs,
197
+ ):
198
+ response += chunk
199
+ # full_response: str = "".join([r async for r in response])
200
+ full_response: str = "".join(response)
201
+ return full_response
202
+
203
+ def ask(
204
+ self,
205
+ prompt: str,
206
+ role: str = "user",
207
+ convo_id: str = "default",
208
+ model: str = "",
209
+ pass_history: int = 0,
210
+ **kwargs,
211
+ ) -> str:
212
+ """
213
+ Non-streaming ask
214
+ """
215
+ response = self.ask_stream(
216
+ prompt=prompt,
217
+ role=role,
218
+ convo_id=convo_id,
219
+ model=model or self.engine,
220
+ pass_history=pass_history,
221
+ **kwargs,
222
+ )
223
+ full_response: str = "".join(response)
224
+ return full_response
225
+
226
+ def rollback(self, n: int = 1, convo_id: str = "default") -> None:
227
+ """
228
+ Rollback the conversation
229
+ """
230
+ for _ in range(n):
231
+ self.conversation[convo_id].pop()
232
+
233
+ def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
234
+ """
235
+ Reset the conversation
236
+ """
237
+ self.conversation[convo_id] = [
238
+ {"role": "system", "content": system_prompt or self.system_prompt},
239
+ ]
240
+
241
+ def save(self, file: str, *keys: str) -> None:
242
+ """
243
+ Save the Chatbot configuration to a JSON file
244
+ """
245
+ pass
246
+
247
+ def load(self, file: Path, *keys_: str) -> None:
248
+ """
249
+ Load the Chatbot configuration from a JSON file
250
+ """
251
+ pass