xiaogpt 3.5__tar.gz → 3.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xiaogpt-3.5 → xiaogpt-3.20}/PKG-INFO +56 -50
- {xiaogpt-3.5 → xiaogpt-3.20}/README.md +27 -27
- {xiaogpt-3.5 → xiaogpt-3.20}/pyproject.toml +29 -23
- xiaogpt-3.20/xiaogpt/bot/doubao_bot.py +63 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/gemini_bot.py +8 -4
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/config.py +7 -5
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/langchain/chain.py +1 -1
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/langchain/examples/email/mail_box.py +1 -1
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/langchain/examples/email/mail_summary_tools.py +2 -2
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/xiaogpt.py +7 -7
- xiaogpt-3.5/xiaogpt/bot/doubao_bot.py +0 -76
- {xiaogpt-3.5 → xiaogpt-3.20}/LICENSE +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/__init__.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/__main__.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/__init__.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/base_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/chatgptapi_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/glm_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/langchain_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/llama_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/moonshot_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/qwen_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/bot/yi_bot.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/cli.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/langchain/callbacks.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/tts/__init__.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/tts/base.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/tts/file.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/tts/live.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/tts/mi.py +0 -0
- {xiaogpt-3.5 → xiaogpt-3.20}/xiaogpt/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: xiaogpt
|
3
|
-
Version: 3.
|
3
|
+
Version: 3.20
|
4
4
|
Summary: Play ChatGPT or other LLM with xiaomi AI speaker
|
5
5
|
Author-Email: yihong0618 <zouzou0208@gmail.com>
|
6
6
|
License: MIT
|
@@ -8,7 +8,7 @@ Classifier: License :: OSI Approved :: MIT License
|
|
8
8
|
Classifier: Operating System :: OS Independent
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
10
10
|
Project-URL: Homepage, https://github.com/yihong0618/xiaogpt
|
11
|
-
Requires-Python:
|
11
|
+
Requires-Python: <3.13,>=3.9
|
12
12
|
Requires-Dist: miservice_fork
|
13
13
|
Requires-Dist: openai>=1
|
14
14
|
Requires-Dist: aiohttp
|
@@ -26,72 +26,74 @@ Requires-Dist: groq>=0.5.0
|
|
26
26
|
Requires-Dist: pyyaml>=6.0.1
|
27
27
|
Requires-Dist: langchain-community>=0.0.38
|
28
28
|
Requires-Dist: lingua-language-detector>=2.0.2; python_version < "3.13"
|
29
|
+
Requires-Dist: volcengine-python-sdk>=1.1.5
|
29
30
|
Provides-Extra: locked
|
30
31
|
Requires-Dist: aiohappyeyeballs==2.4.0; extra == "locked"
|
31
|
-
Requires-Dist: aiohttp==3.
|
32
|
+
Requires-Dist: aiohttp==3.11.17; extra == "locked"
|
32
33
|
Requires-Dist: aiosignal==1.3.1; extra == "locked"
|
33
34
|
Requires-Dist: annotated-types==0.6.0; extra == "locked"
|
34
35
|
Requires-Dist: anyio==4.3.0; extra == "locked"
|
35
36
|
Requires-Dist: async-timeout==4.0.3; python_version < "3.11" and extra == "locked"
|
36
37
|
Requires-Dist: attrs==23.2.0; extra == "locked"
|
37
38
|
Requires-Dist: azure-cognitiveservices-speech==1.37.0; extra == "locked"
|
38
|
-
Requires-Dist: beautifulsoup4==4.
|
39
|
+
Requires-Dist: beautifulsoup4==4.13.4; extra == "locked"
|
39
40
|
Requires-Dist: cachetools==5.3.2; extra == "locked"
|
40
41
|
Requires-Dist: certifi==2024.2.2; extra == "locked"
|
41
42
|
Requires-Dist: charset-normalizer==3.3.2; extra == "locked"
|
42
43
|
Requires-Dist: click==8.1.7; extra == "locked"
|
43
44
|
Requires-Dist: colorama==0.4.6; platform_system == "Windows" and extra == "locked"
|
44
|
-
Requires-Dist: dashscope==1.
|
45
|
+
Requires-Dist: dashscope==1.23.1; extra == "locked"
|
45
46
|
Requires-Dist: dataclasses-json==0.6.3; extra == "locked"
|
46
47
|
Requires-Dist: distro==1.9.0; extra == "locked"
|
47
48
|
Requires-Dist: edge-tts==6.1.10; extra == "locked"
|
48
49
|
Requires-Dist: exceptiongroup==1.2.0; python_version < "3.11" and extra == "locked"
|
49
50
|
Requires-Dist: frozenlist==1.4.1; extra == "locked"
|
50
|
-
Requires-Dist: google-ai-generativelanguage==0.6.
|
51
|
+
Requires-Dist: google-ai-generativelanguage==0.6.15; extra == "locked"
|
51
52
|
Requires-Dist: google-api-core==2.15.0; extra == "locked"
|
52
53
|
Requires-Dist: google-api-core[grpc]==2.15.0; extra == "locked"
|
53
54
|
Requires-Dist: google-api-python-client==2.125.0; extra == "locked"
|
54
55
|
Requires-Dist: google-auth==2.26.1; extra == "locked"
|
55
56
|
Requires-Dist: google-auth-httplib2==0.2.0; extra == "locked"
|
56
57
|
Requires-Dist: google-cloud-texttospeech==2.16.3; extra == "locked"
|
57
|
-
Requires-Dist: google-generativeai==0.8.
|
58
|
+
Requires-Dist: google-generativeai==0.8.5; extra == "locked"
|
58
59
|
Requires-Dist: google-search-results==2.4.2; extra == "locked"
|
59
60
|
Requires-Dist: googleapis-common-protos==1.62.0; extra == "locked"
|
60
61
|
Requires-Dist: greenlet==3.0.3; (platform_machine == "win32" or platform_machine == "WIN32" or platform_machine == "AMD64" or platform_machine == "amd64" or platform_machine == "x86_64" or platform_machine == "ppc64le" or platform_machine == "aarch64") and extra == "locked"
|
61
|
-
Requires-Dist: groq==0.
|
62
|
+
Requires-Dist: groq==0.22.0; extra == "locked"
|
62
63
|
Requires-Dist: grpcio==1.60.0; extra == "locked"
|
63
64
|
Requires-Dist: grpcio-status==1.60.0; extra == "locked"
|
64
65
|
Requires-Dist: h11==0.14.0; extra == "locked"
|
65
66
|
Requires-Dist: httpcore==1.0.5; extra == "locked"
|
66
67
|
Requires-Dist: httplib2==0.22.0; extra == "locked"
|
67
|
-
Requires-Dist: httpx==0.
|
68
|
+
Requires-Dist: httpx==0.28.1; extra == "locked"
|
69
|
+
Requires-Dist: httpx-sse==0.4.0; extra == "locked"
|
68
70
|
Requires-Dist: httpx-ws==0.6.2; extra == "locked"
|
69
|
-
Requires-Dist: httpx[socks]==0.
|
71
|
+
Requires-Dist: httpx[socks]==0.28.1; extra == "locked"
|
70
72
|
Requires-Dist: idna==3.7; extra == "locked"
|
71
73
|
Requires-Dist: jiter==0.5.0; extra == "locked"
|
72
74
|
Requires-Dist: jsonpatch==1.33; extra == "locked"
|
73
75
|
Requires-Dist: jsonpointer==2.4; extra == "locked"
|
74
|
-
Requires-Dist: langchain==0.3.
|
75
|
-
Requires-Dist: langchain-community==0.3.
|
76
|
-
Requires-Dist: langchain-core==0.3.
|
77
|
-
Requires-Dist: langchain-text-splitters==0.3.
|
76
|
+
Requires-Dist: langchain==0.3.23; extra == "locked"
|
77
|
+
Requires-Dist: langchain-community==0.3.21; extra == "locked"
|
78
|
+
Requires-Dist: langchain-core==0.3.54; extra == "locked"
|
79
|
+
Requires-Dist: langchain-text-splitters==0.3.8; extra == "locked"
|
78
80
|
Requires-Dist: langsmith==0.1.133; extra == "locked"
|
79
81
|
Requires-Dist: lingua-language-detector==2.0.2; python_version < "3.13" and extra == "locked"
|
80
82
|
Requires-Dist: markdown-it-py==3.0.0; extra == "locked"
|
81
83
|
Requires-Dist: marshmallow==3.20.1; extra == "locked"
|
82
84
|
Requires-Dist: mdurl==0.1.2; extra == "locked"
|
83
|
-
Requires-Dist: miservice-fork==2.
|
85
|
+
Requires-Dist: miservice-fork==2.8.2; extra == "locked"
|
84
86
|
Requires-Dist: multidict==6.0.5; extra == "locked"
|
85
87
|
Requires-Dist: mutagen==1.47.0; extra == "locked"
|
86
88
|
Requires-Dist: mypy-extensions==1.0.0; extra == "locked"
|
87
|
-
Requires-Dist: numexpr==2.10.
|
89
|
+
Requires-Dist: numexpr==2.10.2; extra == "locked"
|
88
90
|
Requires-Dist: numpy==1.26.3; extra == "locked"
|
89
|
-
Requires-Dist: openai==1.
|
91
|
+
Requires-Dist: openai==1.75.0; extra == "locked"
|
90
92
|
Requires-Dist: orjson==3.10.0; extra == "locked"
|
91
93
|
Requires-Dist: ormsgpack==1.5.0; extra == "locked"
|
92
94
|
Requires-Dist: packaging==23.2; extra == "locked"
|
93
|
-
Requires-Dist: propcache==0.
|
94
|
-
Requires-Dist: proto-plus==1.
|
95
|
+
Requires-Dist: propcache==0.3.1; extra == "locked"
|
96
|
+
Requires-Dist: proto-plus==1.26.1; extra == "locked"
|
95
97
|
Requires-Dist: protobuf==4.25.1; extra == "locked"
|
96
98
|
Requires-Dist: pyasn1==0.5.1; extra == "locked"
|
97
99
|
Requires-Dist: pyasn1-modules==0.3.0; extra == "locked"
|
@@ -101,27 +103,31 @@ Requires-Dist: pydantic-settings==2.5.2; extra == "locked"
|
|
101
103
|
Requires-Dist: pygments==2.17.2; extra == "locked"
|
102
104
|
Requires-Dist: pyjwt==2.8.0; extra == "locked"
|
103
105
|
Requires-Dist: pyparsing==3.1.2; python_version > "3.0" and extra == "locked"
|
106
|
+
Requires-Dist: python-dateutil==2.9.0.post0; extra == "locked"
|
104
107
|
Requires-Dist: python-dotenv==1.0.1; extra == "locked"
|
105
108
|
Requires-Dist: pyyaml==6.0.2; extra == "locked"
|
106
109
|
Requires-Dist: requests==2.31.0; extra == "locked"
|
107
110
|
Requires-Dist: requests-toolbelt==1.0.0; extra == "locked"
|
108
|
-
Requires-Dist: rich==
|
111
|
+
Requires-Dist: rich==14.0.0; extra == "locked"
|
109
112
|
Requires-Dist: rsa==4.9; extra == "locked"
|
113
|
+
Requires-Dist: setuptools==78.1.1; extra == "locked"
|
114
|
+
Requires-Dist: six==1.17.0; extra == "locked"
|
110
115
|
Requires-Dist: sniffio==1.3.0; extra == "locked"
|
111
116
|
Requires-Dist: socksio==1.0.0; extra == "locked"
|
112
117
|
Requires-Dist: soupsieve==2.5; extra == "locked"
|
113
118
|
Requires-Dist: sqlalchemy==2.0.25; extra == "locked"
|
114
119
|
Requires-Dist: tenacity==8.2.3; extra == "locked"
|
115
|
-
Requires-Dist: tetos==0.4.
|
120
|
+
Requires-Dist: tetos==0.4.2; extra == "locked"
|
116
121
|
Requires-Dist: tqdm==4.66.1; extra == "locked"
|
117
122
|
Requires-Dist: typing-extensions==4.12.2; extra == "locked"
|
118
123
|
Requires-Dist: typing-inspect==0.9.0; extra == "locked"
|
119
124
|
Requires-Dist: uritemplate==4.1.1; extra == "locked"
|
120
125
|
Requires-Dist: urllib3==2.1.0; extra == "locked"
|
126
|
+
Requires-Dist: volcengine-python-sdk==1.1.5; extra == "locked"
|
121
127
|
Requires-Dist: websocket-client==1.8.0; extra == "locked"
|
122
128
|
Requires-Dist: wsproto==1.2.0; extra == "locked"
|
123
|
-
Requires-Dist: yarl==1.
|
124
|
-
Requires-Dist: zhipuai==2.1.5.
|
129
|
+
Requires-Dist: yarl==1.20.0; extra == "locked"
|
130
|
+
Requires-Dist: zhipuai==2.1.5.20250415; extra == "locked"
|
125
131
|
Description-Content-Type: text/markdown
|
126
132
|
|
127
133
|
# xiaogpt
|
@@ -148,17 +154,17 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
148
154
|
- [Llama3](https://console.groq.com/docs/quickstart)
|
149
155
|
- [通义千问](https://help.aliyun.com/zh/dashscope/developer-reference/api-details)
|
150
156
|
|
151
|
-
## 获取小米音响DID
|
157
|
+
## 获取小米音响 DID
|
152
158
|
|
153
|
-
| 系统和Shell | Linux *sh | Windows CMD用户 | Windows PowerShell用户 |
|
159
|
+
| 系统和 Shell | Linux *sh | Windows CMD 用户 | Windows PowerShell 用户 |
|
154
160
|
| ------------- | ---------------------------------------------- | -------------------------------------- | ---------------------------------------------- |
|
155
161
|
| 1、安装包 | `pip install miservice_fork` | `pip install miservice_fork` | `pip install miservice_fork` |
|
156
162
|
| 2、设置变量 | `export MI_USER=xxx` <br> `export MI_PASS=xxx` | `set MI_USER=xxx`<br>`set MI_PASS=xxx` | `$env:MI_USER="xxx"` <br> `$env:MI_PASS="xxx"` |
|
157
|
-
| 3、取得MI_DID | `micli list` | `micli list` | `micli list` |
|
158
|
-
| 4、设置MI_DID | `export MI_DID=xxx` | `set MI_DID=xxx` | `$env:MI_DID="xxx"` |
|
163
|
+
| 3、取得 MI_DID | `micli list` | `micli list` | `micli list` |
|
164
|
+
| 4、设置 MI_DID | `export MI_DID=xxx` | `set MI_DID=xxx` | `$env:MI_DID="xxx"` |
|
159
165
|
|
160
|
-
- 注意不同shell 对环境变量的处理是不同的,尤其是powershell赋值时,可能需要双引号来包括值。
|
161
|
-
- 如果获取did报错时,请更换一下无线网络,有很大概率解决问题。
|
166
|
+
- 注意不同 shell 对环境变量的处理是不同的,尤其是 powershell 赋值时,可能需要双引号来包括值。
|
167
|
+
- 如果获取 did 报错时,请更换一下无线网络,有很大概率解决问题。
|
162
168
|
|
163
169
|
## 一点原理
|
164
170
|
|
@@ -182,12 +188,12 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
182
188
|
- 使用 `--mute_xiaoai` 选项,可以快速停掉小爱的回答
|
183
189
|
- 使用 `--account ${account} --password ${password}`
|
184
190
|
- 如果有能力可以自行替换唤醒词,也可以去掉唤醒词
|
185
|
-
- 使用 `--use_chatgpt_api` 的 api
|
186
|
-
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的api应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
191
|
+
- 使用 `--use_chatgpt_api` 的 api 那样可以更流畅的对话,速度特别快,达到了对话的体验,[openai api](https://platform.openai.com/account/api-keys), 命令 `--use_chatgpt_api`
|
192
|
+
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的 api 应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
187
193
|
- `--use_moonshot_api` and other models please refer below
|
188
194
|
- 可以跟小爱说 `开始持续对话` 自动进入持续对话状态,`结束持续对话` 结束持续对话状态。
|
189
195
|
- 可以使用 `--tts edge` 来获取更好的 tts 能力
|
190
|
-
- 可以使用 `--tts fish --fish_api_key <your-fish-key> --fish_voice_key <fish-voice>` 来获取 [fish-audio](https://fish.audio/) 能力(如何获取 fish voice 见下)
|
196
|
+
- 可以使用 `--tts fish --fish_api_key <your-fish-key> --fish_voice_key <fish-voice>` 来获取 [fish-audio](https://fish.audio/) 能力 (如何获取 fish voice 见下)
|
191
197
|
- 可以使用 `--tts openai` 来获取 openai tts 能力
|
192
198
|
- 可以使用 `--tts azure --azure_tts_speech_key <your-speech-key>` 来获取 Azure TTS 能力
|
193
199
|
- 可以使用 `--use_langchain` 替代 `--use_chatgpt_api` 来调用 LangChain(默认 chatgpt)服务,实现上网检索、数学运算..
|
@@ -260,7 +266,7 @@ python3 xiaogpt.py --hardware Lx06 --use_langchain --mute_xiaoai --stream --open
|
|
260
266
|
|
261
267
|
## config.yaml
|
262
268
|
|
263
|
-
|
269
|
+
如果想通过单一配置文件启动也是可以的,可以通过 `--config` 参数指定配置文件,config 文件必须是合法的 Yaml 或 JSON 格式
|
264
270
|
参数优先级
|
265
271
|
|
266
272
|
- cli args > default > config
|
@@ -296,24 +302,24 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
296
302
|
| hardware | 设备型号 | | |
|
297
303
|
| account | 小爱账户 | | |
|
298
304
|
| password | 小爱账户密码 | | |
|
299
|
-
| openai_key | openai的apikey | | |
|
305
|
+
| openai_key | openai 的 apikey | | |
|
300
306
|
| moonshot_api_key | moonshot kimi 的 [apikey](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B) | | |
|
301
307
|
| yi_api_key | 01 wanwu 的 [apikey](https://platform.lingyiwanwu.com/apikeys) | | |
|
302
308
|
| llama_api_key | groq 的 llama3 [apikey](https://console.groq.com/docs/quickstart) | | |
|
303
|
-
| serpapi_api_key | serpapi的key 参考 [SerpAPI](https://serpapi.com/) | | |
|
309
|
+
| serpapi_api_key | serpapi 的 key 参考 [SerpAPI](https://serpapi.com/) | | |
|
304
310
|
| glm_key | chatglm 的 apikey | | |
|
305
311
|
| gemini_key | gemini 的 apikey [参考](https://makersuite.google.com/app/apikey) | | |
|
306
312
|
| gemini_api_domain | gemini 的自定义域名 [参考](https://github.com/antergone/palm-netlify-proxy) | |
|
307
313
|
| qwen_key | qwen 的 apikey [参考](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) | | |
|
308
|
-
| cookie | 小爱账户cookie
|
309
|
-
| mi_did | 设备did | | |
|
314
|
+
| cookie | 小爱账户 cookie(如果用上面密码登录可以不填) | | |
|
315
|
+
| mi_did | 设备 did | | |
|
310
316
|
| use_command | 使用 MI command 与小爱交互 | `false` | |
|
311
317
|
| mute_xiaoai | 快速停掉小爱自己的回答 | `true` | |
|
312
318
|
| verbose | 是否打印详细日志 | `false` | |
|
313
319
|
| bot | 使用的 bot 类型,目前支持 chatgptapi,newbing, qwen, gemini | `chatgptapi` | |
|
314
320
|
| tts | 使用的 TTS 类型 | `mi` | `edge`、 `openai`、`azure`、`volc`、`baidu`、`google`、`minimax` |
|
315
321
|
| tts_options | TTS 参数字典,参考 [tetos](https://github.com/frostming/tetos) 获取可用参数 | | |
|
316
|
-
| prompt | 自定义prompt | `请用100字以内回答` | |
|
322
|
+
| prompt | 自定义 prompt | `请用100字以内回答` | |
|
317
323
|
| keyword | 自定义请求词列表 | `["请"]` | |
|
318
324
|
| change_prompt_keyword | 更改提示词触发列表 | `["更改提示词"]` | |
|
319
325
|
| start_conversation | 开始持续对话关键词 | `开始持续对话` | |
|
@@ -321,8 +327,8 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
321
327
|
| stream | 使用流式响应,获得更快的响应 | `true` | |
|
322
328
|
| proxy | 支持 HTTP 代理,传入 http proxy URL | "" | |
|
323
329
|
| gpt_options | OpenAI API 的参数字典 | `{}` | |
|
324
|
-
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
325
|
-
| api_base | 如果需要替换默认的api
|
330
|
+
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到 deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
331
|
+
| api_base | 如果需要替换默认的 api,或者使用 Azure OpenAI 服务 | 例如:`https://abc-def.openai.azure.com/` |
|
326
332
|
| volc_access_key | 火山引擎的 access key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | | |
|
327
333
|
| volc_secret_key | 火山引擎的 secret key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | |
|
328
334
|
|
@@ -330,8 +336,8 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
330
336
|
|
331
337
|
1. 请开启小爱同学的蓝牙
|
332
338
|
2. 如果要更改提示词和 PROMPT 在代码最上面自行更改
|
333
|
-
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出GPT的回复但小爱同学不回答GPT的情况。这几个型号也只支持小爱原本的 tts.
|
334
|
-
4. 在wsl
|
339
|
+
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出 GPT 的回复但小爱同学不回答 GPT 的情况。这几个型号也只支持小爱原本的 tts.
|
340
|
+
4. 在 wsl 使用时,需要设置代理为 <http://wls 的 ip:port(vpn 的代理端口)>, 否则会出现连接超时的情况,详情 [报错:Error communicating with OpenAI](https://github.com/yihong0618/xiaogpt/issues/235)
|
335
341
|
|
336
342
|
## QA
|
337
343
|
|
@@ -340,9 +346,9 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
340
346
|
3. 想把它变得更好?PR Issue always welcome.
|
341
347
|
4. 还有问题?提 Issue 哈哈
|
342
348
|
5. Exception: Error <https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx>: Login failed [@KJZH001](https://github.com/KJZH001)<br>
|
343
|
-
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试cookie登录
|
349
|
+
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试 cookie 登录
|
344
350
|
无法抓包的可以在本地部署完毕项目后再用户文件夹`C:\Users\用户名`下面找到.mi.token,然后扔到你无法登录的服务器去<br>
|
345
|
-
若是linux则请放到当前用户的home文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但cookie可能会过一段时间失效,需要重新获取)<br>
|
351
|
+
若是 linux 则请放到当前用户的 home 文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但 cookie 可能会过一段时间失效,需要重新获取)<br>
|
346
352
|
详情请见 [https://github.com/yihong0618/xiaogpt/issues/332](https://github.com/yihong0618/xiaogpt/issues/332)
|
347
353
|
|
348
354
|
## 视频教程
|
@@ -367,7 +373,7 @@ docker run -e OPENAI_API_KEY=<your-openapi-key> yihong0618/xiaogpt --account=<yo
|
|
367
373
|
|
368
374
|
### 使用配置文件
|
369
375
|
|
370
|
-
xiaogpt的配置文件可通过指定volume /config,以及指定参数--config来处理,如
|
376
|
+
xiaogpt 的配置文件可通过指定 volume /config,以及指定参数--config 来处理,如
|
371
377
|
|
372
378
|
```shell
|
373
379
|
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.yaml
|
@@ -379,7 +385,7 @@ docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/conf
|
|
379
385
|
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.yaml
|
380
386
|
```
|
381
387
|
|
382
|
-
### 本地编译Docker Image
|
388
|
+
### 本地编译 Docker Image
|
383
389
|
|
384
390
|
```shell
|
385
391
|
docker build -t xiaogpt .
|
@@ -391,7 +397,7 @@ docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --conf
|
|
391
397
|
docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple -t xiaogpt .
|
392
398
|
```
|
393
399
|
|
394
|
-
如果需要在Apple M1/M2上编译x86
|
400
|
+
如果需要在 Apple M1/M2上编译x86
|
395
401
|
|
396
402
|
```shell
|
397
403
|
docker buildx build --platform=linux/amd64 -t xiaogpt-x86 .
|
@@ -401,7 +407,7 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
401
407
|
|
402
408
|
我们目前支持是三种第三方 TTS:edge/openai/azure/volc/baidu/google
|
403
409
|
|
404
|
-
[edge-tts](https://github.com/rany2/edge-tts) 提供了类似微软tts的能力
|
410
|
+
[edge-tts](https://github.com/rany2/edge-tts) 提供了类似微软 tts 的能力
|
405
411
|
[azure-tts](https://techcommunity.microsoft.com/t5/ai-azure-ai-services-blog/9-more-realistic-ai-voices-for-conversations-now-generally/ba-p/4099471) 提供了微软 azure tts 的能力
|
406
412
|
[openai-tts](https://platform.openai.com/docs/guides/text-to-speech) 提供了类似 openai tts 的能力
|
407
413
|
[fish-tts](https://fish.audio/) 提供了 fish tts 的能力
|
@@ -414,7 +420,7 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
414
420
|
tts: edge
|
415
421
|
```
|
416
422
|
|
417
|
-
For edge
|
423
|
+
For edge 查看更多语言支持,从中选择一个
|
418
424
|
|
419
425
|
```shell
|
420
426
|
edge-tts --list-voices
|
@@ -449,7 +455,7 @@ docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your i
|
|
449
455
|
|
450
456
|
## 推荐的类似项目
|
451
457
|
|
452
|
-
- [XiaoBot](https://github.com/longbai/xiaobot) -> Go语言版本的Fork, 带支持不同平台的UI
|
458
|
+
- [XiaoBot](https://github.com/longbai/xiaobot) -> Go 语言版本的 Fork, 带支持不同平台的 UI
|
453
459
|
- [MiGPT](https://github.com/idootop/mi-gpt) -> Node.js 版,支持流式响应和长短期记忆
|
454
460
|
|
455
461
|
## 感谢
|
@@ -22,17 +22,17 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
22
22
|
- [Llama3](https://console.groq.com/docs/quickstart)
|
23
23
|
- [通义千问](https://help.aliyun.com/zh/dashscope/developer-reference/api-details)
|
24
24
|
|
25
|
-
## 获取小米音响DID
|
25
|
+
## 获取小米音响 DID
|
26
26
|
|
27
|
-
| 系统和Shell | Linux *sh | Windows CMD用户 | Windows PowerShell用户 |
|
27
|
+
| 系统和 Shell | Linux *sh | Windows CMD 用户 | Windows PowerShell 用户 |
|
28
28
|
| ------------- | ---------------------------------------------- | -------------------------------------- | ---------------------------------------------- |
|
29
29
|
| 1、安装包 | `pip install miservice_fork` | `pip install miservice_fork` | `pip install miservice_fork` |
|
30
30
|
| 2、设置变量 | `export MI_USER=xxx` <br> `export MI_PASS=xxx` | `set MI_USER=xxx`<br>`set MI_PASS=xxx` | `$env:MI_USER="xxx"` <br> `$env:MI_PASS="xxx"` |
|
31
|
-
| 3、取得MI_DID | `micli list` | `micli list` | `micli list` |
|
32
|
-
| 4、设置MI_DID | `export MI_DID=xxx` | `set MI_DID=xxx` | `$env:MI_DID="xxx"` |
|
31
|
+
| 3、取得 MI_DID | `micli list` | `micli list` | `micli list` |
|
32
|
+
| 4、设置 MI_DID | `export MI_DID=xxx` | `set MI_DID=xxx` | `$env:MI_DID="xxx"` |
|
33
33
|
|
34
|
-
- 注意不同shell 对环境变量的处理是不同的,尤其是powershell赋值时,可能需要双引号来包括值。
|
35
|
-
- 如果获取did报错时,请更换一下无线网络,有很大概率解决问题。
|
34
|
+
- 注意不同 shell 对环境变量的处理是不同的,尤其是 powershell 赋值时,可能需要双引号来包括值。
|
35
|
+
- 如果获取 did 报错时,请更换一下无线网络,有很大概率解决问题。
|
36
36
|
|
37
37
|
## 一点原理
|
38
38
|
|
@@ -56,12 +56,12 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
56
56
|
- 使用 `--mute_xiaoai` 选项,可以快速停掉小爱的回答
|
57
57
|
- 使用 `--account ${account} --password ${password}`
|
58
58
|
- 如果有能力可以自行替换唤醒词,也可以去掉唤醒词
|
59
|
-
- 使用 `--use_chatgpt_api` 的 api
|
60
|
-
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的api应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
59
|
+
- 使用 `--use_chatgpt_api` 的 api 那样可以更流畅的对话,速度特别快,达到了对话的体验,[openai api](https://platform.openai.com/account/api-keys), 命令 `--use_chatgpt_api`
|
60
|
+
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的 api 应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
61
61
|
- `--use_moonshot_api` and other models please refer below
|
62
62
|
- 可以跟小爱说 `开始持续对话` 自动进入持续对话状态,`结束持续对话` 结束持续对话状态。
|
63
63
|
- 可以使用 `--tts edge` 来获取更好的 tts 能力
|
64
|
-
- 可以使用 `--tts fish --fish_api_key <your-fish-key> --fish_voice_key <fish-voice>` 来获取 [fish-audio](https://fish.audio/) 能力(如何获取 fish voice 见下)
|
64
|
+
- 可以使用 `--tts fish --fish_api_key <your-fish-key> --fish_voice_key <fish-voice>` 来获取 [fish-audio](https://fish.audio/) 能力 (如何获取 fish voice 见下)
|
65
65
|
- 可以使用 `--tts openai` 来获取 openai tts 能力
|
66
66
|
- 可以使用 `--tts azure --azure_tts_speech_key <your-speech-key>` 来获取 Azure TTS 能力
|
67
67
|
- 可以使用 `--use_langchain` 替代 `--use_chatgpt_api` 来调用 LangChain(默认 chatgpt)服务,实现上网检索、数学运算..
|
@@ -134,7 +134,7 @@ python3 xiaogpt.py --hardware Lx06 --use_langchain --mute_xiaoai --stream --open
|
|
134
134
|
|
135
135
|
## config.yaml
|
136
136
|
|
137
|
-
|
137
|
+
如果想通过单一配置文件启动也是可以的,可以通过 `--config` 参数指定配置文件,config 文件必须是合法的 Yaml 或 JSON 格式
|
138
138
|
参数优先级
|
139
139
|
|
140
140
|
- cli args > default > config
|
@@ -170,24 +170,24 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
170
170
|
| hardware | 设备型号 | | |
|
171
171
|
| account | 小爱账户 | | |
|
172
172
|
| password | 小爱账户密码 | | |
|
173
|
-
| openai_key | openai的apikey | | |
|
173
|
+
| openai_key | openai 的 apikey | | |
|
174
174
|
| moonshot_api_key | moonshot kimi 的 [apikey](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B) | | |
|
175
175
|
| yi_api_key | 01 wanwu 的 [apikey](https://platform.lingyiwanwu.com/apikeys) | | |
|
176
176
|
| llama_api_key | groq 的 llama3 [apikey](https://console.groq.com/docs/quickstart) | | |
|
177
|
-
| serpapi_api_key | serpapi的key 参考 [SerpAPI](https://serpapi.com/) | | |
|
177
|
+
| serpapi_api_key | serpapi 的 key 参考 [SerpAPI](https://serpapi.com/) | | |
|
178
178
|
| glm_key | chatglm 的 apikey | | |
|
179
179
|
| gemini_key | gemini 的 apikey [参考](https://makersuite.google.com/app/apikey) | | |
|
180
180
|
| gemini_api_domain | gemini 的自定义域名 [参考](https://github.com/antergone/palm-netlify-proxy) | |
|
181
181
|
| qwen_key | qwen 的 apikey [参考](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) | | |
|
182
|
-
| cookie | 小爱账户cookie
|
183
|
-
| mi_did | 设备did | | |
|
182
|
+
| cookie | 小爱账户 cookie(如果用上面密码登录可以不填) | | |
|
183
|
+
| mi_did | 设备 did | | |
|
184
184
|
| use_command | 使用 MI command 与小爱交互 | `false` | |
|
185
185
|
| mute_xiaoai | 快速停掉小爱自己的回答 | `true` | |
|
186
186
|
| verbose | 是否打印详细日志 | `false` | |
|
187
187
|
| bot | 使用的 bot 类型,目前支持 chatgptapi,newbing, qwen, gemini | `chatgptapi` | |
|
188
188
|
| tts | 使用的 TTS 类型 | `mi` | `edge`、 `openai`、`azure`、`volc`、`baidu`、`google`、`minimax` |
|
189
189
|
| tts_options | TTS 参数字典,参考 [tetos](https://github.com/frostming/tetos) 获取可用参数 | | |
|
190
|
-
| prompt | 自定义prompt | `请用100字以内回答` | |
|
190
|
+
| prompt | 自定义 prompt | `请用100字以内回答` | |
|
191
191
|
| keyword | 自定义请求词列表 | `["请"]` | |
|
192
192
|
| change_prompt_keyword | 更改提示词触发列表 | `["更改提示词"]` | |
|
193
193
|
| start_conversation | 开始持续对话关键词 | `开始持续对话` | |
|
@@ -195,8 +195,8 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
195
195
|
| stream | 使用流式响应,获得更快的响应 | `true` | |
|
196
196
|
| proxy | 支持 HTTP 代理,传入 http proxy URL | "" | |
|
197
197
|
| gpt_options | OpenAI API 的参数字典 | `{}` | |
|
198
|
-
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
199
|
-
| api_base | 如果需要替换默认的api
|
198
|
+
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到 deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
199
|
+
| api_base | 如果需要替换默认的 api,或者使用 Azure OpenAI 服务 | 例如:`https://abc-def.openai.azure.com/` |
|
200
200
|
| volc_access_key | 火山引擎的 access key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | | |
|
201
201
|
| volc_secret_key | 火山引擎的 secret key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | |
|
202
202
|
|
@@ -204,8 +204,8 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
204
204
|
|
205
205
|
1. 请开启小爱同学的蓝牙
|
206
206
|
2. 如果要更改提示词和 PROMPT 在代码最上面自行更改
|
207
|
-
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出GPT的回复但小爱同学不回答GPT的情况。这几个型号也只支持小爱原本的 tts.
|
208
|
-
4. 在wsl
|
207
|
+
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出 GPT 的回复但小爱同学不回答 GPT 的情况。这几个型号也只支持小爱原本的 tts.
|
208
|
+
4. 在 wsl 使用时,需要设置代理为 <http://wls 的 ip:port(vpn 的代理端口)>, 否则会出现连接超时的情况,详情 [报错:Error communicating with OpenAI](https://github.com/yihong0618/xiaogpt/issues/235)
|
209
209
|
|
210
210
|
## QA
|
211
211
|
|
@@ -214,9 +214,9 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
214
214
|
3. 想把它变得更好?PR Issue always welcome.
|
215
215
|
4. 还有问题?提 Issue 哈哈
|
216
216
|
5. Exception: Error <https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx>: Login failed [@KJZH001](https://github.com/KJZH001)<br>
|
217
|
-
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试cookie登录
|
217
|
+
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试 cookie 登录
|
218
218
|
无法抓包的可以在本地部署完毕项目后再用户文件夹`C:\Users\用户名`下面找到.mi.token,然后扔到你无法登录的服务器去<br>
|
219
|
-
若是linux则请放到当前用户的home文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但cookie可能会过一段时间失效,需要重新获取)<br>
|
219
|
+
若是 linux 则请放到当前用户的 home 文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但 cookie 可能会过一段时间失效,需要重新获取)<br>
|
220
220
|
详情请见 [https://github.com/yihong0618/xiaogpt/issues/332](https://github.com/yihong0618/xiaogpt/issues/332)
|
221
221
|
|
222
222
|
## 视频教程
|
@@ -241,7 +241,7 @@ docker run -e OPENAI_API_KEY=<your-openapi-key> yihong0618/xiaogpt --account=<yo
|
|
241
241
|
|
242
242
|
### 使用配置文件
|
243
243
|
|
244
|
-
xiaogpt的配置文件可通过指定volume /config,以及指定参数--config来处理,如
|
244
|
+
xiaogpt 的配置文件可通过指定 volume /config,以及指定参数--config 来处理,如
|
245
245
|
|
246
246
|
```shell
|
247
247
|
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.yaml
|
@@ -253,7 +253,7 @@ docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/conf
|
|
253
253
|
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.yaml
|
254
254
|
```
|
255
255
|
|
256
|
-
### 本地编译Docker Image
|
256
|
+
### 本地编译 Docker Image
|
257
257
|
|
258
258
|
```shell
|
259
259
|
docker build -t xiaogpt .
|
@@ -265,7 +265,7 @@ docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --conf
|
|
265
265
|
docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple -t xiaogpt .
|
266
266
|
```
|
267
267
|
|
268
|
-
如果需要在Apple M1/M2上编译x86
|
268
|
+
如果需要在 Apple M1/M2上编译x86
|
269
269
|
|
270
270
|
```shell
|
271
271
|
docker buildx build --platform=linux/amd64 -t xiaogpt-x86 .
|
@@ -275,7 +275,7 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
275
275
|
|
276
276
|
我们目前支持是三种第三方 TTS:edge/openai/azure/volc/baidu/google
|
277
277
|
|
278
|
-
[edge-tts](https://github.com/rany2/edge-tts) 提供了类似微软tts的能力
|
278
|
+
[edge-tts](https://github.com/rany2/edge-tts) 提供了类似微软 tts 的能力
|
279
279
|
[azure-tts](https://techcommunity.microsoft.com/t5/ai-azure-ai-services-blog/9-more-realistic-ai-voices-for-conversations-now-generally/ba-p/4099471) 提供了微软 azure tts 的能力
|
280
280
|
[openai-tts](https://platform.openai.com/docs/guides/text-to-speech) 提供了类似 openai tts 的能力
|
281
281
|
[fish-tts](https://fish.audio/) 提供了 fish tts 的能力
|
@@ -288,7 +288,7 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
288
288
|
tts: edge
|
289
289
|
```
|
290
290
|
|
291
|
-
For edge
|
291
|
+
For edge 查看更多语言支持,从中选择一个
|
292
292
|
|
293
293
|
```shell
|
294
294
|
edge-tts --list-voices
|
@@ -323,7 +323,7 @@ docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your i
|
|
323
323
|
|
324
324
|
## 推荐的类似项目
|
325
325
|
|
326
|
-
- [XiaoBot](https://github.com/longbai/xiaobot) -> Go语言版本的Fork, 带支持不同平台的UI
|
326
|
+
- [XiaoBot](https://github.com/longbai/xiaobot) -> Go 语言版本的 Fork, 带支持不同平台的 UI
|
327
327
|
- [MiGPT](https://github.com/idootop/mi-gpt) -> Node.js 版,支持流式响应和长短期记忆
|
328
328
|
|
329
329
|
## 感谢
|
@@ -10,7 +10,7 @@ classifiers = [
|
|
10
10
|
"Operating System :: OS Independent",
|
11
11
|
"Programming Language :: Python :: 3",
|
12
12
|
]
|
13
|
-
requires-python = ">=3.9"
|
13
|
+
requires-python = ">=3.9,<3.13"
|
14
14
|
dependencies = [
|
15
15
|
"miservice_fork",
|
16
16
|
"openai>=1",
|
@@ -29,9 +29,10 @@ dependencies = [
|
|
29
29
|
"pyyaml>=6.0.1",
|
30
30
|
"langchain-community>=0.0.38",
|
31
31
|
"lingua-language-detector>=2.0.2; python_version < \"3.13\"",
|
32
|
+
"volcengine-python-sdk>=1.1.5",
|
32
33
|
]
|
33
34
|
dynamic = []
|
34
|
-
version = "3.
|
35
|
+
version = "3.20"
|
35
36
|
|
36
37
|
[project.license]
|
37
38
|
text = "MIT"
|
@@ -45,70 +46,71 @@ xiaogpt = "xiaogpt.cli:main"
|
|
45
46
|
[project.optional-dependencies]
|
46
47
|
locked = [
|
47
48
|
"aiohappyeyeballs==2.4.0",
|
48
|
-
"aiohttp==3.
|
49
|
+
"aiohttp==3.11.17",
|
49
50
|
"aiosignal==1.3.1",
|
50
51
|
"annotated-types==0.6.0",
|
51
52
|
"anyio==4.3.0",
|
52
53
|
"async-timeout==4.0.3 ; python_version < \"3.11\"",
|
53
54
|
"attrs==23.2.0",
|
54
55
|
"azure-cognitiveservices-speech==1.37.0",
|
55
|
-
"beautifulsoup4==4.
|
56
|
+
"beautifulsoup4==4.13.4",
|
56
57
|
"cachetools==5.3.2",
|
57
58
|
"certifi==2024.2.2",
|
58
59
|
"charset-normalizer==3.3.2",
|
59
60
|
"click==8.1.7",
|
60
61
|
"colorama==0.4.6 ; platform_system == \"Windows\"",
|
61
|
-
"dashscope==1.
|
62
|
+
"dashscope==1.23.1",
|
62
63
|
"dataclasses-json==0.6.3",
|
63
64
|
"distro==1.9.0",
|
64
65
|
"edge-tts==6.1.10",
|
65
66
|
"exceptiongroup==1.2.0 ; python_version < \"3.11\"",
|
66
67
|
"frozenlist==1.4.1",
|
67
|
-
"google-ai-generativelanguage==0.6.
|
68
|
+
"google-ai-generativelanguage==0.6.15",
|
68
69
|
"google-api-core==2.15.0",
|
69
70
|
"google-api-core[grpc]==2.15.0",
|
70
71
|
"google-api-python-client==2.125.0",
|
71
72
|
"google-auth==2.26.1",
|
72
73
|
"google-auth-httplib2==0.2.0",
|
73
74
|
"google-cloud-texttospeech==2.16.3",
|
74
|
-
"google-generativeai==0.8.
|
75
|
+
"google-generativeai==0.8.5",
|
75
76
|
"google-search-results==2.4.2",
|
76
77
|
"googleapis-common-protos==1.62.0",
|
77
78
|
"greenlet==3.0.3 ; platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\"",
|
78
|
-
"groq==0.
|
79
|
+
"groq==0.22.0",
|
79
80
|
"grpcio==1.60.0",
|
80
81
|
"grpcio-status==1.60.0",
|
81
82
|
"h11==0.14.0",
|
82
83
|
"httpcore==1.0.5",
|
83
84
|
"httplib2==0.22.0",
|
84
|
-
"httpx==0.
|
85
|
+
"httpx==0.28.1",
|
86
|
+
"httpx-sse==0.4.0",
|
85
87
|
"httpx-ws==0.6.2",
|
86
|
-
"httpx[socks]==0.
|
88
|
+
"httpx[socks]==0.28.1",
|
87
89
|
"idna==3.7",
|
88
90
|
"jiter==0.5.0",
|
89
91
|
"jsonpatch==1.33",
|
90
92
|
"jsonpointer==2.4",
|
91
|
-
"langchain==0.3.
|
92
|
-
"langchain-community==0.3.
|
93
|
-
"langchain-core==0.3.
|
94
|
-
"langchain-text-splitters==0.3.
|
93
|
+
"langchain==0.3.23",
|
94
|
+
"langchain-community==0.3.21",
|
95
|
+
"langchain-core==0.3.54",
|
96
|
+
"langchain-text-splitters==0.3.8",
|
95
97
|
"langsmith==0.1.133",
|
96
98
|
"lingua-language-detector==2.0.2 ; python_version < \"3.13\"",
|
97
99
|
"markdown-it-py==3.0.0",
|
98
100
|
"marshmallow==3.20.1",
|
99
101
|
"mdurl==0.1.2",
|
100
|
-
"miservice-fork==2.
|
102
|
+
"miservice-fork==2.8.2",
|
101
103
|
"multidict==6.0.5",
|
102
104
|
"mutagen==1.47.0",
|
103
105
|
"mypy-extensions==1.0.0",
|
104
|
-
"numexpr==2.10.
|
106
|
+
"numexpr==2.10.2",
|
105
107
|
"numpy==1.26.3",
|
106
|
-
"openai==1.
|
108
|
+
"openai==1.75.0",
|
107
109
|
"orjson==3.10.0",
|
108
110
|
"ormsgpack==1.5.0",
|
109
111
|
"packaging==23.2",
|
110
|
-
"propcache==0.
|
111
|
-
"proto-plus==1.
|
112
|
+
"propcache==0.3.1",
|
113
|
+
"proto-plus==1.26.1",
|
112
114
|
"protobuf==4.25.1",
|
113
115
|
"pyasn1==0.5.1",
|
114
116
|
"pyasn1-modules==0.3.0",
|
@@ -118,27 +120,31 @@ locked = [
|
|
118
120
|
"pygments==2.17.2",
|
119
121
|
"pyjwt==2.8.0",
|
120
122
|
"pyparsing==3.1.2 ; python_version > \"3.0\"",
|
123
|
+
"python-dateutil==2.9.0.post0",
|
121
124
|
"python-dotenv==1.0.1",
|
122
125
|
"pyyaml==6.0.2",
|
123
126
|
"requests==2.31.0",
|
124
127
|
"requests-toolbelt==1.0.0",
|
125
|
-
"rich==
|
128
|
+
"rich==14.0.0",
|
126
129
|
"rsa==4.9",
|
130
|
+
"setuptools==78.1.1",
|
131
|
+
"six==1.17.0",
|
127
132
|
"sniffio==1.3.0",
|
128
133
|
"socksio==1.0.0",
|
129
134
|
"soupsieve==2.5",
|
130
135
|
"sqlalchemy==2.0.25",
|
131
136
|
"tenacity==8.2.3",
|
132
|
-
"tetos==0.4.
|
137
|
+
"tetos==0.4.2",
|
133
138
|
"tqdm==4.66.1",
|
134
139
|
"typing-extensions==4.12.2",
|
135
140
|
"typing-inspect==0.9.0",
|
136
141
|
"uritemplate==4.1.1",
|
137
142
|
"urllib3==2.1.0",
|
143
|
+
"volcengine-python-sdk==1.1.5",
|
138
144
|
"websocket-client==1.8.0",
|
139
145
|
"wsproto==1.2.0",
|
140
|
-
"yarl==1.
|
141
|
-
"zhipuai==2.1.5.
|
146
|
+
"yarl==1.20.0",
|
147
|
+
"zhipuai==2.1.5.20250415",
|
142
148
|
]
|
143
149
|
|
144
150
|
[tool.pdm]
|
@@ -0,0 +1,63 @@
|
|
1
|
+
"""ChatGLM bot"""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing import Any
|
6
|
+
|
7
|
+
from rich import print
|
8
|
+
|
9
|
+
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
|
10
|
+
from xiaogpt.config import Config
|
11
|
+
|
12
|
+
|
13
|
+
class DoubaoBot(ChatHistoryMixin, BaseBot):
|
14
|
+
name = "豆包"
|
15
|
+
default_options = {"model": "skylark-chat"} # 根据官方示例修改默认模型
|
16
|
+
|
17
|
+
def __init__(self, api_key: str) -> None:
|
18
|
+
from volcenginesdkarkruntime import Ark # 引入官方 SDK
|
19
|
+
|
20
|
+
self.api_key = api_key
|
21
|
+
self.history = []
|
22
|
+
self.client = Ark(api_key=api_key) # 初始化客户端
|
23
|
+
|
24
|
+
@classmethod
|
25
|
+
def from_config(cls, config: Config):
|
26
|
+
return cls(api_key=config.volc_api_key) # 假设配置文件中有 volc_api_key 字段
|
27
|
+
|
28
|
+
def _get_data(self, query: str, **options: Any):
|
29
|
+
options = {**self.default_options, **options}
|
30
|
+
model = options.pop("model")
|
31
|
+
ms = self.get_messages()
|
32
|
+
ms.append({"role": "user", "content": query})
|
33
|
+
return {"model": model, "messages": ms}
|
34
|
+
|
35
|
+
async def ask(self, query, **options):
|
36
|
+
data = self._get_data(query, **options)
|
37
|
+
try:
|
38
|
+
completion = self.client.chat.completions.create(**data)
|
39
|
+
message = completion.choices[0].message.content
|
40
|
+
self.add_message(query, message)
|
41
|
+
print(message)
|
42
|
+
return message
|
43
|
+
except Exception as e:
|
44
|
+
print(str(e))
|
45
|
+
return
|
46
|
+
|
47
|
+
async def ask_stream(self, query: str, **options: Any):
|
48
|
+
data = self._get_data(query, **options)
|
49
|
+
data["stream"] = True
|
50
|
+
|
51
|
+
try:
|
52
|
+
full_content = ""
|
53
|
+
for chunk in self.client.chat.completions.create(**data):
|
54
|
+
content = chunk.choices[0].delta.content
|
55
|
+
if content:
|
56
|
+
full_content += content
|
57
|
+
print(content, end="", flush=True)
|
58
|
+
yield content
|
59
|
+
print()
|
60
|
+
self.add_message(query, full_content)
|
61
|
+
except Exception as e:
|
62
|
+
print(str(e))
|
63
|
+
return
|
@@ -32,13 +32,15 @@ safety_settings = [
|
|
32
32
|
class GeminiBot(ChatHistoryMixin, BaseBot):
|
33
33
|
name = "Gemini"
|
34
34
|
|
35
|
-
def __init__(
|
35
|
+
def __init__(
|
36
|
+
self, gemini_key: str, gemini_api_domain: str, gemini_model: str
|
37
|
+
) -> None:
|
36
38
|
import google.generativeai as genai
|
37
39
|
|
38
40
|
from google.auth import api_key
|
39
41
|
|
40
42
|
credentials = api_key.Credentials(gemini_key)
|
41
|
-
if
|
43
|
+
if gemini_api_domain:
|
42
44
|
print("Use custom gemini_api_domain: " + gemini_api_domain)
|
43
45
|
credentials._universe_domain = gemini_api_domain
|
44
46
|
genai.configure(
|
@@ -54,7 +56,7 @@ class GeminiBot(ChatHistoryMixin, BaseBot):
|
|
54
56
|
|
55
57
|
self.history = []
|
56
58
|
model = genai.GenerativeModel(
|
57
|
-
model_name="gemini-
|
59
|
+
model_name=gemini_model or "gemini-2.0-flash-lite",
|
58
60
|
generation_config=generation_config,
|
59
61
|
safety_settings=safety_settings,
|
60
62
|
)
|
@@ -63,7 +65,9 @@ class GeminiBot(ChatHistoryMixin, BaseBot):
|
|
63
65
|
@classmethod
|
64
66
|
def from_config(cls, config):
|
65
67
|
return cls(
|
66
|
-
gemini_key=config.gemini_key,
|
68
|
+
gemini_key=config.gemini_key,
|
69
|
+
gemini_api_domain=config.gemini_api_domain,
|
70
|
+
gemini_model=config.gemini_model,
|
67
71
|
)
|
68
72
|
|
69
73
|
async def ask(self, query, **options):
|
@@ -18,7 +18,7 @@ HARDWARE_COMMAND_DICT = {
|
|
18
18
|
# hardware: (tts_command, wakeup_command)
|
19
19
|
"LX06": ("5-1", "5-5"),
|
20
20
|
"L05B": ("5-3", "5-4"),
|
21
|
-
"S12": ("5-1", "5-5"), # 第一代小爱,型号MDZ-25-DA
|
21
|
+
"S12": ("5-1", "5-5"), # 第一代小爱,型号 MDZ-25-DA
|
22
22
|
"S12A": ("5-1", "5-5"),
|
23
23
|
"LX01": ("5-1", "5-5"),
|
24
24
|
"L06A": ("5-1", "5-5"),
|
@@ -28,10 +28,10 @@ HARDWARE_COMMAND_DICT = {
|
|
28
28
|
"X08E": ("7-3", "7-4"),
|
29
29
|
"LX05A": ("5-1", "5-5"), # 小爱红外版
|
30
30
|
"LX5A": ("5-1", "5-5"), # 小爱红外版
|
31
|
-
"L07A": ("5-1", "5-5"), # Redmi小爱音箱Play(l7a)
|
31
|
+
"L07A": ("5-1", "5-5"), # Redmi 小爱音箱 Play(l7a)
|
32
32
|
"L15A": ("7-3", "7-4"),
|
33
|
-
"X6A": ("7-3", "7-4"), # 小米智能家庭屏6
|
34
|
-
"X10A": ("7-3", "7-4"), # 小米智能家庭屏10
|
33
|
+
"X6A": ("7-3", "7-4"), # 小米智能家庭屏 6
|
34
|
+
"X10A": ("7-3", "7-4"), # 小米智能家庭屏 10
|
35
35
|
# add more here
|
36
36
|
}
|
37
37
|
|
@@ -39,7 +39,7 @@ DEFAULT_COMMAND = ("5-1", "5-5")
|
|
39
39
|
|
40
40
|
KEY_WORD = ("帮我", "请")
|
41
41
|
CHANGE_PROMPT_KEY_WORD = ("更改提示词",)
|
42
|
-
PROMPT = "以下请用300字以内回答,请只回答文字不要带链接"
|
42
|
+
PROMPT = "以下请用 300 字以内回答,请只回答文字不要带链接"
|
43
43
|
# simulate_xiaoai_question
|
44
44
|
MI_ASK_SIMULATE_DATA = {
|
45
45
|
"code": 0,
|
@@ -59,6 +59,7 @@ class Config:
|
|
59
59
|
llama_api_key: str = os.getenv("GROQ_API_KEY", "") # use groq
|
60
60
|
glm_key: str = os.getenv("CHATGLM_KEY", "")
|
61
61
|
gemini_key: str = os.getenv("GEMINI_KEY", "") # keep the old rule
|
62
|
+
gemini_model: str = os.getenv("GEMINI_MODEL", "") # keep the old rule
|
62
63
|
qwen_key: str = os.getenv("DASHSCOPE_API_KEY", "") # keep the old rule
|
63
64
|
serpapi_api_key: str = os.getenv("SERPAPI_API_KEY", "")
|
64
65
|
gemini_api_domain: str = os.getenv(
|
@@ -66,6 +67,7 @@ class Config:
|
|
66
67
|
) # 自行部署的 Google Gemini 代理
|
67
68
|
volc_access_key: str = os.getenv("VOLC_ACCESS_KEY", "")
|
68
69
|
volc_secret_key: str = os.getenv("VOLC_SECRET_KEY", "")
|
70
|
+
volc_api_key: str = os.getenv("volc_api_key", "")
|
69
71
|
proxy: str | None = None
|
70
72
|
mi_did: str = os.getenv("MI_DID", "")
|
71
73
|
keyword: Iterable[str] = KEY_WORD
|
@@ -39,5 +39,5 @@ async def agent_search(
|
|
39
39
|
tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=False, memory=memeory
|
40
40
|
)
|
41
41
|
callbacks = [callback] if callback else None
|
42
|
-
# query eg:'杭州亚运会中国队获得了多少枚金牌?' // '计算3的2次方'
|
42
|
+
# query eg:'杭州亚运会中国队获得了多少枚金牌?' // '计算 3 的 2 次方'
|
43
43
|
return await agent.arun(query, callbacks=callbacks)
|
@@ -51,7 +51,7 @@ class Mailbox:
|
|
51
51
|
email_id = email_ids[i]
|
52
52
|
email_content = self.get_email_content(mailbox, email_id)
|
53
53
|
if email_content:
|
54
|
-
all_email_content += f"{i+1}
|
54
|
+
all_email_content += f"{i+1},{email_content}\n"
|
55
55
|
|
56
56
|
# print(all_email_content)
|
57
57
|
|
@@ -14,8 +14,8 @@ class MailSummaryTool(BaseTool):
|
|
14
14
|
all_email_content = mailbox.get_all_work_summary()
|
15
15
|
prompt = """
|
16
16
|
要求你作为一名总编辑。根据输入的多封邮件,对每封做简明扼要的摘要。要求如下:
|
17
|
-
1、对每封邮件摘要总结,摘要总结字数在25字以内
|
18
|
-
2、排版按照 发送人:xx 内容:xx
|
17
|
+
1、对每封邮件摘要总结,摘要总结字数在 25 字以内
|
18
|
+
2、排版按照 发送人:xx 内容:xx(换一行)
|
19
19
|
3、注意换行,要求全文美观简洁
|
20
20
|
4、展示邮件内提到项目名,不用额外扩展讲项目内容和进度
|
21
21
|
"""
|
@@ -358,9 +358,9 @@ class MiGPT:
|
|
358
358
|
task = asyncio.create_task(self.poll_latest_ask())
|
359
359
|
assert task is not None # to keep the reference to task, do not remove this
|
360
360
|
print(
|
361
|
-
f"Running xiaogpt now, 用[green]{'/'.join(self.config.keyword)}[/]开头来提问"
|
361
|
+
f"Running xiaogpt now, 用 [green]{'/'.join(self.config.keyword)}[/] 开头来提问"
|
362
362
|
)
|
363
|
-
print(f"或用[green]{self.config.start_conversation}[/]开始持续对话")
|
363
|
+
print(f"或用 [green]{self.config.start_conversation}[/] 开始持续对话")
|
364
364
|
while True:
|
365
365
|
self.polling_event.set()
|
366
366
|
new_record = await self.last_record.get()
|
@@ -393,12 +393,12 @@ class MiGPT:
|
|
393
393
|
query = re.sub(rf"^({'|'.join(self.config.keyword)})", "", query)
|
394
394
|
# llama3 is not good at Chinese, so we need to add prompt in it.
|
395
395
|
if self.config.bot == "llama":
|
396
|
-
query = f"你是一个基于llama3 的智能助手,请你跟我对话时,一定使用中文,不要夹杂一些英文单词,甚至英语短语也不能随意使用,但类似于 llama3
|
396
|
+
query = f"你是一个基于 llama3 的智能助手,请你跟我对话时,一定使用中文,不要夹杂一些英文单词,甚至英语短语也不能随意使用,但类似于 llama3 这样的专属名词除外,问题是:{query}"
|
397
397
|
|
398
398
|
print("-" * 20)
|
399
399
|
print("问题:" + query + "?")
|
400
400
|
if not self.chatbot.has_history():
|
401
|
-
query = f"{query}
|
401
|
+
query = f"{query},{self.config.prompt}"
|
402
402
|
# some model can not detect the language code, so we need to add it
|
403
403
|
|
404
404
|
if self.config.mute_xiaoai:
|
@@ -409,12 +409,12 @@ class MiGPT:
|
|
409
409
|
await self.do_tts(f"正在问{self.chatbot.name}请耐心等待")
|
410
410
|
try:
|
411
411
|
print(
|
412
|
-
"
|
412
|
+
"以下是小爱的回答:",
|
413
413
|
new_record.get("answers", [])[0].get("tts", {}).get("text"),
|
414
414
|
)
|
415
415
|
except IndexError:
|
416
416
|
print("小爱没回")
|
417
|
-
print(f"以下是 {self.chatbot.name}
|
417
|
+
print(f"以下是 {self.chatbot.name} 的回答:", end="")
|
418
418
|
try:
|
419
419
|
await self.speak(self.ask_gpt(query))
|
420
420
|
except Exception as e:
|
@@ -422,7 +422,7 @@ class MiGPT:
|
|
422
422
|
else:
|
423
423
|
print("回答完毕")
|
424
424
|
if self.in_conversation:
|
425
|
-
print(f"
|
425
|
+
print(f"继续对话,或用 `{self.config.end_conversation}` 结束对话")
|
426
426
|
await self.wakeup_xiaoai()
|
427
427
|
|
428
428
|
async def speak(self, text_stream: AsyncIterator[str]) -> None:
|
@@ -1,76 +0,0 @@
|
|
1
|
-
"""ChatGLM bot"""
|
2
|
-
|
3
|
-
from __future__ import annotations
|
4
|
-
|
5
|
-
import json
|
6
|
-
from typing import Any, AsyncIterator
|
7
|
-
|
8
|
-
import httpx
|
9
|
-
from rich import print
|
10
|
-
|
11
|
-
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
|
12
|
-
from xiaogpt.config import Config
|
13
|
-
from xiaogpt.utils import split_sentences
|
14
|
-
|
15
|
-
|
16
|
-
class DoubaoBot(ChatHistoryMixin, BaseBot):
|
17
|
-
API_URL = "https://maas-api.ml-platform-cn-beijing.volces.com"
|
18
|
-
name = "豆包"
|
19
|
-
default_options = {"model": "skylark-chat"}
|
20
|
-
|
21
|
-
def __init__(self, access_key: str, secret_key: str) -> None:
|
22
|
-
from tetos.volc import VolcSignAuth
|
23
|
-
|
24
|
-
self.auth = VolcSignAuth(access_key, secret_key, "ml_maas", "cn-beijing")
|
25
|
-
self.history = []
|
26
|
-
|
27
|
-
@classmethod
|
28
|
-
def from_config(cls, config: Config):
|
29
|
-
return cls(access_key=config.volc_access_key, secret_key=config.volc_secret_key)
|
30
|
-
|
31
|
-
def _get_data(self, query: str, **options: Any):
|
32
|
-
options = {**self.default_options, **options}
|
33
|
-
model = options.pop("model")
|
34
|
-
ms = self.get_messages()
|
35
|
-
ms.append({"role": "user", "content": query})
|
36
|
-
return {"model": {"name": model}, "parameters": options, "messages": ms}
|
37
|
-
|
38
|
-
async def ask(self, query, **options):
|
39
|
-
data = self._get_data(query, **options)
|
40
|
-
async with httpx.AsyncClient(base_url=self.API_URL, auth=self.auth) as client:
|
41
|
-
resp = await client.post("/api/v1/chat", json=data)
|
42
|
-
resp.raise_for_status()
|
43
|
-
try:
|
44
|
-
message = resp.json()["choice"]["message"]["content"]
|
45
|
-
except Exception as e:
|
46
|
-
print(str(e))
|
47
|
-
return
|
48
|
-
self.add_message(query, message)
|
49
|
-
print(message)
|
50
|
-
return message
|
51
|
-
|
52
|
-
async def ask_stream(self, query: str, **options: Any):
|
53
|
-
data = self._get_data(query, **options)
|
54
|
-
data["stream"] = True
|
55
|
-
|
56
|
-
async def sse_gen(line_iter: AsyncIterator[str]) -> AsyncIterator[str]:
|
57
|
-
message = ""
|
58
|
-
async for chunk in line_iter:
|
59
|
-
if not chunk.startswith("data:"):
|
60
|
-
continue
|
61
|
-
message = chunk[5:].strip()
|
62
|
-
if message == "[DONE]":
|
63
|
-
break
|
64
|
-
data = json.loads(message)
|
65
|
-
text = data["choice"]["message"]["content"]
|
66
|
-
print(text, end="", flush=True)
|
67
|
-
message += text
|
68
|
-
yield text
|
69
|
-
print()
|
70
|
-
self.add_message(query, message)
|
71
|
-
|
72
|
-
async with httpx.AsyncClient(base_url=self.API_URL, auth=self.auth) as client:
|
73
|
-
async with client.stream("POST", "/api/v1/chat", json=data) as resp:
|
74
|
-
resp.raise_for_status()
|
75
|
-
async for sentence in split_sentences(sse_gen(resp.aiter_lines())):
|
76
|
-
yield sentence
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|