xiaogpt 2.64__tar.gz → 2.71__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {xiaogpt-2.64 → xiaogpt-2.71}/PKG-INFO +87 -80
- {xiaogpt-2.64 → xiaogpt-2.71}/README.md +75 -64
- {xiaogpt-2.64 → xiaogpt-2.71}/pyproject.toml +12 -16
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/__init__.py +9 -3
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/chatgptapi_bot.py +1 -1
- xiaogpt-2.71/xiaogpt/bot/llama_bot.py +25 -0
- xiaogpt-2.71/xiaogpt/bot/moonshot_bot.py +28 -0
- xiaogpt-2.71/xiaogpt/bot/yi_bot.py +28 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/cli.py +38 -7
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/config.py +16 -10
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/xiaogpt.py +11 -2
- xiaogpt-2.64/xiaogpt/bot/newbing_bot.py +0 -81
- {xiaogpt-2.64 → xiaogpt-2.71}/LICENSE +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/__init__.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/__main__.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/base_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/doubao_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/gemini_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/glm_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/langchain_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/bot/qwen_bot.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/langchain/callbacks.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/langchain/chain.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/langchain/examples/email/mail_box.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/langchain/examples/email/mail_summary_tools.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/tts/__init__.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/tts/base.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/tts/mi.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/tts/tetos.py +0 -0
- {xiaogpt-2.64 → xiaogpt-2.71}/xiaogpt/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: xiaogpt
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.71
|
4
4
|
Summary: Play ChatGPT or other LLM with xiaomi AI speaker
|
5
5
|
Author-Email: yihong0618 <zouzou0208@gmail.com>
|
6
6
|
License: MIT
|
@@ -15,7 +15,6 @@ Requires-Dist: aiohttp
|
|
15
15
|
Requires-Dist: rich
|
16
16
|
Requires-Dist: zhipuai>=2.0.1
|
17
17
|
Requires-Dist: httpx[socks]
|
18
|
-
Requires-Dist: EdgeGPT==0.1.26
|
19
18
|
Requires-Dist: langchain>=0.0.343
|
20
19
|
Requires-Dist: beautifulsoup4>=4.12.0
|
21
20
|
Requires-Dist: google-search-results>=2.4.2
|
@@ -23,6 +22,8 @@ Requires-Dist: google-generativeai
|
|
23
22
|
Requires-Dist: numexpr>=2.8.6
|
24
23
|
Requires-Dist: dashscope>=1.10.0
|
25
24
|
Requires-Dist: tetos>=0.2.1
|
25
|
+
Requires-Dist: groq>=0.5.0
|
26
|
+
Requires-Dist: pyyaml>=6.0.1
|
26
27
|
Requires-Dist: aiohttp==3.9.5; extra == "locked"
|
27
28
|
Requires-Dist: aiosignal==1.3.1; extra == "locked"
|
28
29
|
Requires-Dist: annotated-types==0.6.0; extra == "locked"
|
@@ -31,30 +32,29 @@ Requires-Dist: async-timeout==4.0.3; python_version < "3.11" and extra == "locke
|
|
31
32
|
Requires-Dist: attrs==23.2.0; extra == "locked"
|
32
33
|
Requires-Dist: azure-cognitiveservices-speech==1.37.0; extra == "locked"
|
33
34
|
Requires-Dist: beautifulsoup4==4.12.3; extra == "locked"
|
34
|
-
Requires-Dist: bingimagecreator==0.5.0; extra == "locked"
|
35
35
|
Requires-Dist: cachetools==5.3.2; extra == "locked"
|
36
36
|
Requires-Dist: certifi==2024.2.2; extra == "locked"
|
37
37
|
Requires-Dist: charset-normalizer==3.3.2; extra == "locked"
|
38
38
|
Requires-Dist: click==8.1.7; extra == "locked"
|
39
39
|
Requires-Dist: colorama==0.4.6; platform_system == "Windows" and extra == "locked"
|
40
|
-
Requires-Dist: dashscope==1.
|
40
|
+
Requires-Dist: dashscope==1.19.0; extra == "locked"
|
41
41
|
Requires-Dist: dataclasses-json==0.6.3; extra == "locked"
|
42
42
|
Requires-Dist: distro==1.9.0; extra == "locked"
|
43
43
|
Requires-Dist: edge-tts==6.1.10; extra == "locked"
|
44
|
-
Requires-Dist: edgegpt==0.1.26; extra == "locked"
|
45
44
|
Requires-Dist: exceptiongroup==1.2.0; python_version < "3.11" and extra == "locked"
|
46
45
|
Requires-Dist: frozenlist==1.4.1; extra == "locked"
|
47
|
-
Requires-Dist: google-ai-generativelanguage==0.6.
|
46
|
+
Requires-Dist: google-ai-generativelanguage==0.6.3; extra == "locked"
|
48
47
|
Requires-Dist: google-api-core==2.15.0; extra == "locked"
|
49
48
|
Requires-Dist: google-api-core[grpc]==2.15.0; extra == "locked"
|
50
49
|
Requires-Dist: google-api-python-client==2.125.0; extra == "locked"
|
51
50
|
Requires-Dist: google-auth==2.26.1; extra == "locked"
|
52
51
|
Requires-Dist: google-auth-httplib2==0.2.0; extra == "locked"
|
53
52
|
Requires-Dist: google-cloud-texttospeech==2.16.3; extra == "locked"
|
54
|
-
Requires-Dist: google-generativeai==0.5.
|
53
|
+
Requires-Dist: google-generativeai==0.5.3; extra == "locked"
|
55
54
|
Requires-Dist: google-search-results==2.4.2; extra == "locked"
|
56
55
|
Requires-Dist: googleapis-common-protos==1.62.0; extra == "locked"
|
57
56
|
Requires-Dist: greenlet==3.0.3; (platform_machine == "win32" or platform_machine == "WIN32" or platform_machine == "AMD64" or platform_machine == "amd64" or platform_machine == "x86_64" or platform_machine == "ppc64le" or platform_machine == "aarch64") and extra == "locked"
|
57
|
+
Requires-Dist: groq==0.5.0; extra == "locked"
|
58
58
|
Requires-Dist: grpcio==1.60.0; extra == "locked"
|
59
59
|
Requires-Dist: grpcio-status==1.60.0; extra == "locked"
|
60
60
|
Requires-Dist: h11==0.14.0; extra == "locked"
|
@@ -65,9 +65,9 @@ Requires-Dist: httpx[socks]==0.27.0; extra == "locked"
|
|
65
65
|
Requires-Dist: idna==3.7; extra == "locked"
|
66
66
|
Requires-Dist: jsonpatch==1.33; extra == "locked"
|
67
67
|
Requires-Dist: jsonpointer==2.4; extra == "locked"
|
68
|
-
Requires-Dist: langchain==0.1.
|
69
|
-
Requires-Dist: langchain-community==0.0.
|
70
|
-
Requires-Dist: langchain-core==0.1.
|
68
|
+
Requires-Dist: langchain==0.1.20; extra == "locked"
|
69
|
+
Requires-Dist: langchain-community==0.0.38; extra == "locked"
|
70
|
+
Requires-Dist: langchain-core==0.1.52; extra == "locked"
|
71
71
|
Requires-Dist: langchain-text-splitters==0.0.1; extra == "locked"
|
72
72
|
Requires-Dist: langsmith==0.1.45; extra == "locked"
|
73
73
|
Requires-Dist: markdown-it-py==3.0.0; extra == "locked"
|
@@ -79,10 +79,9 @@ Requires-Dist: mutagen==1.47.0; extra == "locked"
|
|
79
79
|
Requires-Dist: mypy-extensions==1.0.0; extra == "locked"
|
80
80
|
Requires-Dist: numexpr==2.10.0; extra == "locked"
|
81
81
|
Requires-Dist: numpy==1.26.3; extra == "locked"
|
82
|
-
Requires-Dist: openai==1.
|
82
|
+
Requires-Dist: openai==1.29.0; extra == "locked"
|
83
83
|
Requires-Dist: orjson==3.10.0; extra == "locked"
|
84
84
|
Requires-Dist: packaging==23.2; extra == "locked"
|
85
|
-
Requires-Dist: prompt-toolkit==3.0.43; extra == "locked"
|
86
85
|
Requires-Dist: proto-plus==1.23.0; extra == "locked"
|
87
86
|
Requires-Dist: protobuf==4.25.1; extra == "locked"
|
88
87
|
Requires-Dist: pyasn1==0.5.1; extra == "locked"
|
@@ -93,7 +92,6 @@ Requires-Dist: pygments==2.17.2; extra == "locked"
|
|
93
92
|
Requires-Dist: pyjwt==2.8.0; extra == "locked"
|
94
93
|
Requires-Dist: pyparsing==3.1.2; python_version > "3.0" and extra == "locked"
|
95
94
|
Requires-Dist: pyyaml==6.0.1; extra == "locked"
|
96
|
-
Requires-Dist: regex==2023.12.25; extra == "locked"
|
97
95
|
Requires-Dist: requests==2.31.0; extra == "locked"
|
98
96
|
Requires-Dist: rich==13.7.1; extra == "locked"
|
99
97
|
Requires-Dist: rsa==4.9; extra == "locked"
|
@@ -108,10 +106,8 @@ Requires-Dist: typing-extensions==4.9.0; extra == "locked"
|
|
108
106
|
Requires-Dist: typing-inspect==0.9.0; extra == "locked"
|
109
107
|
Requires-Dist: uritemplate==4.1.1; extra == "locked"
|
110
108
|
Requires-Dist: urllib3==2.1.0; extra == "locked"
|
111
|
-
Requires-Dist: wcwidth==0.2.13; extra == "locked"
|
112
|
-
Requires-Dist: websockets==12.0; extra == "locked"
|
113
109
|
Requires-Dist: yarl==1.9.4; extra == "locked"
|
114
|
-
Requires-Dist: zhipuai==2.0.1; extra == "locked"
|
110
|
+
Requires-Dist: zhipuai==2.0.1.20240423.1; extra == "locked"
|
115
111
|
Provides-Extra: locked
|
116
112
|
Description-Content-Type: text/markdown
|
117
113
|
|
@@ -120,7 +116,7 @@ Description-Content-Type: text/markdown
|
|
120
116
|
[](https://pypi.org/project/xiaogpt)
|
121
117
|
[](https://hub.docker.com/r/yihong0618/xiaogpt)
|
122
118
|
|
123
|
-
https://user-images.githubusercontent.com/15976103/226803357-72f87a41-a15b-409e-94f5-e2d262eecd53.mp4
|
119
|
+
<https://user-images.githubusercontent.com/15976103/226803357-72f87a41-a15b-409e-94f5-e2d262eecd53.mp4>
|
124
120
|
|
125
121
|
Play ChatGPT and other LLM with Xiaomi AI Speaker
|
126
122
|
|
@@ -134,9 +130,13 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
134
130
|
- [ChatGLM](http://open.bigmodel.cn/)
|
135
131
|
- [Gemini](https://makersuite.google.com/app/apikey)
|
136
132
|
- [Doubao](https://console.volcengine.com/iam/keymanage/)
|
133
|
+
- [Moonshot](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
|
134
|
+
- [01](https://platform.lingyiwanwu.com/apikeys)
|
135
|
+
- [Llama3](https://console.groq.com/docs/quickstart)
|
137
136
|
- [通义千问](https://help.aliyun.com/zh/dashscope/developer-reference/api-details)
|
138
137
|
|
139
138
|
## 获取小米音响DID
|
139
|
+
|
140
140
|
| 系统和Shell | Linux *sh | Windows CMD用户 | Windows PowerShell用户 |
|
141
141
|
| ------------- | ---------------------------------------------- | -------------------------------------- | ---------------------------------------------- |
|
142
142
|
| 1、安装包 | `pip install miservice_fork` | `pip install miservice_fork` | `pip install miservice_fork` |
|
@@ -164,13 +164,14 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
164
164
|
- 参考我 fork 的 [MiService](https://github.com/yihong0618/MiService) 项目 README 并在本地 terminal 跑 `micli list` 拿到你音响的 DID 成功 **别忘了设置 export MI_DID=xxx** 这个 MI_DID 用
|
165
165
|
- run `xiaogpt --hardware ${your_hardware} --use_chatgpt_api` hardware 你看小爱屁股上有型号,输入进来,如果在屁股上找不到或者型号不对,可以用 `micli mina` 找到型号
|
166
166
|
- 跑起来之后就可以问小爱同学问题了,“帮我"开头的问题,会发送一份给 ChatGPT 然后小爱同学用 tts 回答
|
167
|
-
-
|
167
|
+
- 如果上面不可用,可以尝试用手机抓包,<https://userprofile.mina.mi.com/device_profile/v2/conversation> 找到 cookie 利用 `--cookie '${cookie}'` cookie 别忘了用单引号包裹
|
168
168
|
- 默认用目前 ubus, 如果你的设备不支持 ubus 可以使用 `--use_command` 来使用 command 来 tts
|
169
169
|
- 使用 `--mute_xiaoai` 选项,可以快速停掉小爱的回答
|
170
170
|
- 使用 `--account ${account} --password ${password}`
|
171
171
|
- 如果有能力可以自行替换唤醒词,也可以去掉唤醒词
|
172
172
|
- 使用 `--use_chatgpt_api` 的 api 那样可以更流畅的对话,速度特别快,达到了对话的体验, [openai api](https://platform.openai.com/account/api-keys), 命令 `--use_chatgpt_api`
|
173
173
|
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的api应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
174
|
+
- `--use_moonshot_api` and other models please refer below
|
174
175
|
- 可以跟小爱说 `开始持续对话` 自动进入持续对话状态,`结束持续对话` 结束持续对话状态。
|
175
176
|
- 可以使用 `--tts edge` 来获取更好的 tts 能力
|
176
177
|
- 可以使用 `--tts openai` 来获取 openai tts 能力
|
@@ -196,11 +197,17 @@ xiaogpt --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key}
|
|
196
197
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key} --gemini_api_domain ${gemini_api_domain}
|
197
198
|
# 如果你想使用阿里的通义千问
|
198
199
|
xiaogpt --hardware LX06 --mute_xiaoai --use_qwen --qwen_key ${qwen_key}
|
200
|
+
# 如果你想使用 kimi
|
201
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_moonshot_api --moonshot_api_key ${moonshot_api_key}
|
202
|
+
# 如果你想使用 llama3
|
203
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_llama --llama_api_key ${llama_api_key}
|
204
|
+
# 如果你想使用 01
|
205
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_yi_api --ti_api_key ${yi_api_key}
|
199
206
|
# 如果你想使用豆包
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
207
|
+
|
208
|
+
|
209
|
+
|
210
|
+
|
204
211
|
export OPENAI_API_KEY=${your_api_key}
|
205
212
|
export SERPAPI_API_KEY=${your_serpapi_key}
|
206
213
|
xiaogpt --hardware Lx06 --use_langchain --mute_xiaoai --stream --openai_key ${your_api_key} --serpapi_api_key ${your_serpapi_key}
|
@@ -227,44 +234,46 @@ python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${ge
|
|
227
234
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key} --gemini_api_domain ${gemini_api_domain}
|
228
235
|
# 如果你想使用阿里的通义千问
|
229
236
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_qwen --qwen_key ${qwen_key}
|
237
|
+
# 如果你想使用 kimi
|
238
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_moonshot_api --moonshot_api_key ${moonshot_api_key}
|
239
|
+
# 如果你想使用 01
|
240
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_yi_api --ti_api_key ${yi_api_key}
|
230
241
|
# 如果你想使用豆包
|
231
242
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_doubao --stream --volc_access_key xxxx --volc_secret_key xxx
|
243
|
+
# 如果你想使用 llama3
|
244
|
+
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_llama --llama_api_key ${llama_api_key}
|
232
245
|
# 如果你想使用 LangChain+SerpApi 实现上网检索或其他本地服务(目前仅支持 stream 模式)
|
233
246
|
export OPENAI_API_KEY=${your_api_key}
|
234
247
|
export SERPAPI_API_KEY=${your_serpapi_key}
|
235
248
|
python3 xiaogpt.py --hardware Lx06 --use_langchain --mute_xiaoai --stream --openai_key ${your_api_key} --serpapi_api_key ${your_serpapi_key}
|
236
249
|
```
|
237
250
|
|
238
|
-
## config.
|
251
|
+
## config.yaml
|
239
252
|
|
240
|
-
如果想通过单一配置文件启动也是可以的, 可以通过 `--config` 参数指定配置文件, config 文件必须是合法的 JSON 格式
|
253
|
+
如果想通过单一配置文件启动也是可以的, 可以通过 `--config` 参数指定配置文件, config 文件必须是合法的 Yaml 或 JSON 格式
|
241
254
|
参数优先级
|
242
255
|
|
243
256
|
- cli args > default > config
|
244
257
|
|
245
258
|
```shell
|
246
|
-
python3 xiaogpt.py --config xiao_config.
|
259
|
+
python3 xiaogpt.py --config xiao_config.yaml
|
247
260
|
# or
|
248
|
-
xiaogpt --config xiao_config.
|
261
|
+
xiaogpt --config xiao_config.yaml
|
249
262
|
```
|
250
263
|
|
251
264
|
或者
|
252
265
|
|
253
266
|
```shell
|
254
|
-
cp xiao_config.
|
267
|
+
cp xiao_config.yaml.example xiao_config.yaml
|
255
268
|
python3 xiaogpt.py
|
256
269
|
```
|
257
270
|
|
258
|
-
若要指定 OpenAI 的模型参数,如 model, temporature, top_p, 请在config.
|
271
|
+
若要指定 OpenAI 的模型参数,如 model, temporature, top_p, 请在 config.yaml 中指定:
|
259
272
|
|
260
|
-
```
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
"temperature": 0.9,
|
265
|
-
"top_p": 0.9,
|
266
|
-
}
|
267
|
-
}
|
273
|
+
```yaml
|
274
|
+
gpt_options:
|
275
|
+
temperature: 0.9
|
276
|
+
top_p: 0.9
|
268
277
|
```
|
269
278
|
|
270
279
|
具体参数作用请参考 [Open AI API 文档](https://platform.openai.com/docs/api-reference/chat/create)。
|
@@ -272,47 +281,47 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
272
281
|
|
273
282
|
## 配置项说明
|
274
283
|
|
275
|
-
| 参数 | 说明
|
276
|
-
| --------------------- |
|
277
|
-
| hardware | 设备型号
|
278
|
-
| account | 小爱账户
|
279
|
-
| password | 小爱账户密码
|
280
|
-
| openai_key | openai的apikey
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
[这里]
|
284
|
+
| 参数 | 说明 | 默认值 | 可选值 |
|
285
|
+
| --------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- |
|
286
|
+
| hardware | 设备型号 | | |
|
287
|
+
| account | 小爱账户 | | |
|
288
|
+
| password | 小爱账户密码 | | |
|
289
|
+
| openai_key | openai的apikey | | |
|
290
|
+
| moonshot_api_key | moonshot kimi 的 [apikey](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B) | | |
|
291
|
+
| yi_api_key | 01 wanwu 的 [apikey](https://platform.lingyiwanwu.com/apikeys) | | |
|
292
|
+
| llama_api_key | groq 的 llama3 [apikey](https://console.groq.com/docs/quickstart) | | |
|
293
|
+
| serpapi_api_key | serpapi的key 参考 [SerpAPI](https://serpapi.com/) | | |
|
294
|
+
| glm_key | chatglm 的 apikey | | |
|
295
|
+
| gemini_key | gemini 的 apikey [参考](https://makersuite.google.com/app/apikey) | | |
|
296
|
+
| gemini_api_domain | gemini 的自定义域名 [参考](https://github.com/antergone/palm-netlify-proxy) | |
|
297
|
+
| qwen_key | qwen 的 apikey [参考](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) | | |
|
298
|
+
| cookie | 小爱账户cookie (如果用上面密码登录可以不填) | | |
|
299
|
+
| mi_did | 设备did | | |
|
300
|
+
| use_command | 使用 MI command 与小爱交互 | `false` | |
|
301
|
+
| mute_xiaoai | 快速停掉小爱自己的回答 | `true` | |
|
302
|
+
| verbose | 是否打印详细日志 | `false` | |
|
303
|
+
| bot | 使用的 bot 类型,目前支持 chatgptapi,newbing, qwen, gemini | `chatgptapi` | |
|
304
|
+
| tts | 使用的 TTS 类型 | `mi` | `edge`、 `openai`、`azure`、`volc`、`baidu`、`google`、`minimax` |
|
305
|
+
| tts_options | TTS 参数字典,参考 [tetos](https://github.com/frostming/tetos) 获取可用参数 | | |
|
306
|
+
| prompt | 自定义prompt | `请用100字以内回答` | |
|
307
|
+
| keyword | 自定义请求词列表 | `["请"]` | |
|
308
|
+
| change_prompt_keyword | 更改提示词触发列表 | `["更改提示词"]` | |
|
309
|
+
| start_conversation | 开始持续对话关键词 | `开始持续对话` | |
|
310
|
+
| end_conversation | 结束持续对话关键词 | `结束持续对话` | |
|
311
|
+
| stream | 使用流式响应,获得更快的响应 | `true` | |
|
312
|
+
| proxy | 支持 HTTP 代理,传入 http proxy URL | "" | |
|
313
|
+
| gpt_options | OpenAI API 的参数字典 | `{}` | |
|
314
|
+
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
315
|
+
| api_base | 如果需要替换默认的api,或者使用Azure OpenAI 服务 | 例如:`https://abc-def.openai.azure.com/` |
|
316
|
+
| volc_access_key | 火山引擎的 access key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | | |
|
317
|
+
| volc_secret_key | 火山引擎的 secret key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | |
|
309
318
|
|
310
319
|
## 注意
|
311
320
|
|
312
321
|
1. 请开启小爱同学的蓝牙
|
313
322
|
2. 如果要更改提示词和 PROMPT 在代码最上面自行更改
|
314
323
|
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出GPT的回复但小爱同学不回答GPT的情况。这几个型号也只支持小爱原本的 tts.
|
315
|
-
4. 在wsl使用时, 需要设置代理为 http://wls的ip:port(vpn的代理端口)
|
324
|
+
4. 在wsl使用时, 需要设置代理为 <http://wls的ip:port(vpn的代理端口)>, 否则会出现连接超时的情况, 详情 [报错: Error communicating with OpenAI](https://github.com/yihong0618/xiaogpt/issues/235)
|
316
325
|
|
317
326
|
## QA
|
318
327
|
|
@@ -320,7 +329,7 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
320
329
|
2. 你做这玩意也没用啊?确实。。。但是挺好玩的,有用对你来说没用,对我们来说不一定呀
|
321
330
|
3. 想把它变得更好?PR Issue always welcome.
|
322
331
|
4. 还有问题?提 Issue 哈哈
|
323
|
-
5. Exception: Error https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx
|
332
|
+
5. Exception: Error <https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx>: Login failed [@KJZH001](https://github.com/KJZH001)<br>
|
324
333
|
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试cookie登录
|
325
334
|
无法抓包的可以在本地部署完毕项目后再用户文件夹`C:\Users\用户名`下面找到.mi.token,然后扔到你无法登录的服务器去<br>
|
326
335
|
若是linux则请放到当前用户的home文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但cookie可能会过一段时间失效,需要重新获取)<br>
|
@@ -328,7 +337,7 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
328
337
|
|
329
338
|
## 视频教程
|
330
339
|
|
331
|
-
https://www.youtube.com/watch?v=K4YA8YwzOOA
|
340
|
+
<https://www.youtube.com/watch?v=K4YA8YwzOOA>
|
332
341
|
|
333
342
|
## Docker
|
334
343
|
|
@@ -351,13 +360,13 @@ docker run -e OPENAI_API_KEY=<your-openapi-key> yihong0618/xiaogpt --account=<yo
|
|
351
360
|
xiaogpt的配置文件可通过指定volume /config,以及指定参数--config来处理,如
|
352
361
|
|
353
362
|
```shell
|
354
|
-
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.
|
363
|
+
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.yaml
|
355
364
|
```
|
356
365
|
|
357
366
|
### 网络使用 host 模型
|
358
367
|
|
359
368
|
```shell
|
360
|
-
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.
|
369
|
+
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.yaml
|
361
370
|
```
|
362
371
|
|
363
372
|
### 本地编译Docker Image
|
@@ -390,10 +399,8 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
390
399
|
|
391
400
|
你可以通过参数 `tts`, 来启用它
|
392
401
|
|
393
|
-
```
|
394
|
-
|
395
|
-
"tts": "edge",
|
396
|
-
}
|
402
|
+
```yaml
|
403
|
+
tts: edge
|
397
404
|
```
|
398
405
|
|
399
406
|
For edge 查看更多语言支持, 从中选择一个
|
@@ -407,7 +414,7 @@ edge-tts --list-voices
|
|
407
414
|
由于 Edge TTS 启动了一个本地的 HTTP 服务,所以需要将容器的端口映射到宿主机上,并且指定本地机器的 hostname:
|
408
415
|
|
409
416
|
```shell
|
410
|
-
docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your ip> yihong0618/xiaogpt --config=/config/config.
|
417
|
+
docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your ip> yihong0618/xiaogpt --config=/config/config.yaml
|
411
418
|
```
|
412
419
|
|
413
420
|
注意端口必须映射为与容器内一致,XIAOGPT_HOSTNAME 需要设置为宿主机的 IP 地址,否则小爱无法正常播放语音。
|
@@ -3,7 +3,7 @@
|
|
3
3
|
[](https://pypi.org/project/xiaogpt)
|
4
4
|
[](https://hub.docker.com/r/yihong0618/xiaogpt)
|
5
5
|
|
6
|
-
https://user-images.githubusercontent.com/15976103/226803357-72f87a41-a15b-409e-94f5-e2d262eecd53.mp4
|
6
|
+
<https://user-images.githubusercontent.com/15976103/226803357-72f87a41-a15b-409e-94f5-e2d262eecd53.mp4>
|
7
7
|
|
8
8
|
Play ChatGPT and other LLM with Xiaomi AI Speaker
|
9
9
|
|
@@ -17,9 +17,13 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
17
17
|
- [ChatGLM](http://open.bigmodel.cn/)
|
18
18
|
- [Gemini](https://makersuite.google.com/app/apikey)
|
19
19
|
- [Doubao](https://console.volcengine.com/iam/keymanage/)
|
20
|
+
- [Moonshot](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B)
|
21
|
+
- [01](https://platform.lingyiwanwu.com/apikeys)
|
22
|
+
- [Llama3](https://console.groq.com/docs/quickstart)
|
20
23
|
- [通义千问](https://help.aliyun.com/zh/dashscope/developer-reference/api-details)
|
21
24
|
|
22
25
|
## 获取小米音响DID
|
26
|
+
|
23
27
|
| 系统和Shell | Linux *sh | Windows CMD用户 | Windows PowerShell用户 |
|
24
28
|
| ------------- | ---------------------------------------------- | -------------------------------------- | ---------------------------------------------- |
|
25
29
|
| 1、安装包 | `pip install miservice_fork` | `pip install miservice_fork` | `pip install miservice_fork` |
|
@@ -47,13 +51,14 @@ Play ChatGPT and other LLM with Xiaomi AI Speaker
|
|
47
51
|
- 参考我 fork 的 [MiService](https://github.com/yihong0618/MiService) 项目 README 并在本地 terminal 跑 `micli list` 拿到你音响的 DID 成功 **别忘了设置 export MI_DID=xxx** 这个 MI_DID 用
|
48
52
|
- run `xiaogpt --hardware ${your_hardware} --use_chatgpt_api` hardware 你看小爱屁股上有型号,输入进来,如果在屁股上找不到或者型号不对,可以用 `micli mina` 找到型号
|
49
53
|
- 跑起来之后就可以问小爱同学问题了,“帮我"开头的问题,会发送一份给 ChatGPT 然后小爱同学用 tts 回答
|
50
|
-
-
|
54
|
+
- 如果上面不可用,可以尝试用手机抓包,<https://userprofile.mina.mi.com/device_profile/v2/conversation> 找到 cookie 利用 `--cookie '${cookie}'` cookie 别忘了用单引号包裹
|
51
55
|
- 默认用目前 ubus, 如果你的设备不支持 ubus 可以使用 `--use_command` 来使用 command 来 tts
|
52
56
|
- 使用 `--mute_xiaoai` 选项,可以快速停掉小爱的回答
|
53
57
|
- 使用 `--account ${account} --password ${password}`
|
54
58
|
- 如果有能力可以自行替换唤醒词,也可以去掉唤醒词
|
55
59
|
- 使用 `--use_chatgpt_api` 的 api 那样可以更流畅的对话,速度特别快,达到了对话的体验, [openai api](https://platform.openai.com/account/api-keys), 命令 `--use_chatgpt_api`
|
56
60
|
- 如果你遇到了墙需要用 Cloudflare Workers 替换 api_base 请使用 `--api_base ${url}` 来替换。 **请注意,此处你输入的api应该是'`https://xxxx/v1`'的字样,域名需要用引号包裹**
|
61
|
+
- `--use_moonshot_api` and other models please refer below
|
57
62
|
- 可以跟小爱说 `开始持续对话` 自动进入持续对话状态,`结束持续对话` 结束持续对话状态。
|
58
63
|
- 可以使用 `--tts edge` 来获取更好的 tts 能力
|
59
64
|
- 可以使用 `--tts openai` 来获取 openai tts 能力
|
@@ -79,11 +84,17 @@ xiaogpt --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key}
|
|
79
84
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key} --gemini_api_domain ${gemini_api_domain}
|
80
85
|
# 如果你想使用阿里的通义千问
|
81
86
|
xiaogpt --hardware LX06 --mute_xiaoai --use_qwen --qwen_key ${qwen_key}
|
87
|
+
# 如果你想使用 kimi
|
88
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_moonshot_api --moonshot_api_key ${moonshot_api_key}
|
89
|
+
# 如果你想使用 llama3
|
90
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_llama --llama_api_key ${llama_api_key}
|
91
|
+
# 如果你想使用 01
|
92
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_yi_api --ti_api_key ${yi_api_key}
|
82
93
|
# 如果你想使用豆包
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
94
|
+
|
95
|
+
|
96
|
+
|
97
|
+
|
87
98
|
export OPENAI_API_KEY=${your_api_key}
|
88
99
|
export SERPAPI_API_KEY=${your_serpapi_key}
|
89
100
|
xiaogpt --hardware Lx06 --use_langchain --mute_xiaoai --stream --openai_key ${your_api_key} --serpapi_api_key ${your_serpapi_key}
|
@@ -110,44 +121,46 @@ python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${ge
|
|
110
121
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_gemini --gemini_key ${gemini_key} --gemini_api_domain ${gemini_api_domain}
|
111
122
|
# 如果你想使用阿里的通义千问
|
112
123
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_qwen --qwen_key ${qwen_key}
|
124
|
+
# 如果你想使用 kimi
|
125
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_moonshot_api --moonshot_api_key ${moonshot_api_key}
|
126
|
+
# 如果你想使用 01
|
127
|
+
xiaogpt --hardware LX06 --mute_xiaoai --use_yi_api --ti_api_key ${yi_api_key}
|
113
128
|
# 如果你想使用豆包
|
114
129
|
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_doubao --stream --volc_access_key xxxx --volc_secret_key xxx
|
130
|
+
# 如果你想使用 llama3
|
131
|
+
python3 xiaogpt.py --hardware LX06 --mute_xiaoai --use_llama --llama_api_key ${llama_api_key}
|
115
132
|
# 如果你想使用 LangChain+SerpApi 实现上网检索或其他本地服务(目前仅支持 stream 模式)
|
116
133
|
export OPENAI_API_KEY=${your_api_key}
|
117
134
|
export SERPAPI_API_KEY=${your_serpapi_key}
|
118
135
|
python3 xiaogpt.py --hardware Lx06 --use_langchain --mute_xiaoai --stream --openai_key ${your_api_key} --serpapi_api_key ${your_serpapi_key}
|
119
136
|
```
|
120
137
|
|
121
|
-
## config.
|
138
|
+
## config.yaml
|
122
139
|
|
123
|
-
如果想通过单一配置文件启动也是可以的, 可以通过 `--config` 参数指定配置文件, config 文件必须是合法的 JSON 格式
|
140
|
+
如果想通过单一配置文件启动也是可以的, 可以通过 `--config` 参数指定配置文件, config 文件必须是合法的 Yaml 或 JSON 格式
|
124
141
|
参数优先级
|
125
142
|
|
126
143
|
- cli args > default > config
|
127
144
|
|
128
145
|
```shell
|
129
|
-
python3 xiaogpt.py --config xiao_config.
|
146
|
+
python3 xiaogpt.py --config xiao_config.yaml
|
130
147
|
# or
|
131
|
-
xiaogpt --config xiao_config.
|
148
|
+
xiaogpt --config xiao_config.yaml
|
132
149
|
```
|
133
150
|
|
134
151
|
或者
|
135
152
|
|
136
153
|
```shell
|
137
|
-
cp xiao_config.
|
154
|
+
cp xiao_config.yaml.example xiao_config.yaml
|
138
155
|
python3 xiaogpt.py
|
139
156
|
```
|
140
157
|
|
141
|
-
若要指定 OpenAI 的模型参数,如 model, temporature, top_p, 请在config.
|
158
|
+
若要指定 OpenAI 的模型参数,如 model, temporature, top_p, 请在 config.yaml 中指定:
|
142
159
|
|
143
|
-
```
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
"temperature": 0.9,
|
148
|
-
"top_p": 0.9,
|
149
|
-
}
|
150
|
-
}
|
160
|
+
```yaml
|
161
|
+
gpt_options:
|
162
|
+
temperature: 0.9
|
163
|
+
top_p: 0.9
|
151
164
|
```
|
152
165
|
|
153
166
|
具体参数作用请参考 [Open AI API 文档](https://platform.openai.com/docs/api-reference/chat/create)。
|
@@ -155,47 +168,47 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
155
168
|
|
156
169
|
## 配置项说明
|
157
170
|
|
158
|
-
| 参数 | 说明
|
159
|
-
| --------------------- |
|
160
|
-
| hardware | 设备型号
|
161
|
-
| account | 小爱账户
|
162
|
-
| password | 小爱账户密码
|
163
|
-
| openai_key | openai的apikey
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
[这里]
|
171
|
+
| 参数 | 说明 | 默认值 | 可选值 |
|
172
|
+
| --------------------- | ---------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- |
|
173
|
+
| hardware | 设备型号 | | |
|
174
|
+
| account | 小爱账户 | | |
|
175
|
+
| password | 小爱账户密码 | | |
|
176
|
+
| openai_key | openai的apikey | | |
|
177
|
+
| moonshot_api_key | moonshot kimi 的 [apikey](https://platform.moonshot.cn/docs/api/chat#%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B) | | |
|
178
|
+
| yi_api_key | 01 wanwu 的 [apikey](https://platform.lingyiwanwu.com/apikeys) | | |
|
179
|
+
| llama_api_key | groq 的 llama3 [apikey](https://console.groq.com/docs/quickstart) | | |
|
180
|
+
| serpapi_api_key | serpapi的key 参考 [SerpAPI](https://serpapi.com/) | | |
|
181
|
+
| glm_key | chatglm 的 apikey | | |
|
182
|
+
| gemini_key | gemini 的 apikey [参考](https://makersuite.google.com/app/apikey) | | |
|
183
|
+
| gemini_api_domain | gemini 的自定义域名 [参考](https://github.com/antergone/palm-netlify-proxy) | |
|
184
|
+
| qwen_key | qwen 的 apikey [参考](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) | | |
|
185
|
+
| cookie | 小爱账户cookie (如果用上面密码登录可以不填) | | |
|
186
|
+
| mi_did | 设备did | | |
|
187
|
+
| use_command | 使用 MI command 与小爱交互 | `false` | |
|
188
|
+
| mute_xiaoai | 快速停掉小爱自己的回答 | `true` | |
|
189
|
+
| verbose | 是否打印详细日志 | `false` | |
|
190
|
+
| bot | 使用的 bot 类型,目前支持 chatgptapi,newbing, qwen, gemini | `chatgptapi` | |
|
191
|
+
| tts | 使用的 TTS 类型 | `mi` | `edge`、 `openai`、`azure`、`volc`、`baidu`、`google`、`minimax` |
|
192
|
+
| tts_options | TTS 参数字典,参考 [tetos](https://github.com/frostming/tetos) 获取可用参数 | | |
|
193
|
+
| prompt | 自定义prompt | `请用100字以内回答` | |
|
194
|
+
| keyword | 自定义请求词列表 | `["请"]` | |
|
195
|
+
| change_prompt_keyword | 更改提示词触发列表 | `["更改提示词"]` | |
|
196
|
+
| start_conversation | 开始持续对话关键词 | `开始持续对话` | |
|
197
|
+
| end_conversation | 结束持续对话关键词 | `结束持续对话` | |
|
198
|
+
| stream | 使用流式响应,获得更快的响应 | `true` | |
|
199
|
+
| proxy | 支持 HTTP 代理,传入 http proxy URL | "" | |
|
200
|
+
| gpt_options | OpenAI API 的参数字典 | `{}` | |
|
201
|
+
| deployment_id | Azure OpenAI 服务的 deployment ID | 参考这个[如何找到deployment_id](https://github.com/yihong0618/xiaogpt/issues/347#issuecomment-1784410784) | |
|
202
|
+
| api_base | 如果需要替换默认的api,或者使用Azure OpenAI 服务 | 例如:`https://abc-def.openai.azure.com/` |
|
203
|
+
| volc_access_key | 火山引擎的 access key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | | |
|
204
|
+
| volc_secret_key | 火山引擎的 secret key 请在[这里](https://console.volcengine.com/iam/keymanage/)获取 | |
|
192
205
|
|
193
206
|
## 注意
|
194
207
|
|
195
208
|
1. 请开启小爱同学的蓝牙
|
196
209
|
2. 如果要更改提示词和 PROMPT 在代码最上面自行更改
|
197
210
|
3. 目前已知 LX04、X10A 和 L05B L05C 可能需要使用 `--use_command`,否则可能会出现终端能输出GPT的回复但小爱同学不回答GPT的情况。这几个型号也只支持小爱原本的 tts.
|
198
|
-
4. 在wsl使用时, 需要设置代理为 http://wls的ip:port(vpn的代理端口)
|
211
|
+
4. 在wsl使用时, 需要设置代理为 <http://wls的ip:port(vpn的代理端口)>, 否则会出现连接超时的情况, 详情 [报错: Error communicating with OpenAI](https://github.com/yihong0618/xiaogpt/issues/235)
|
199
212
|
|
200
213
|
## QA
|
201
214
|
|
@@ -203,7 +216,7 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
203
216
|
2. 你做这玩意也没用啊?确实。。。但是挺好玩的,有用对你来说没用,对我们来说不一定呀
|
204
217
|
3. 想把它变得更好?PR Issue always welcome.
|
205
218
|
4. 还有问题?提 Issue 哈哈
|
206
|
-
5. Exception: Error https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx
|
219
|
+
5. Exception: Error <https://api2.mina.mi.com/admin/v2/device_list?master=0&requestId=app_ios_xxx>: Login failed [@KJZH001](https://github.com/KJZH001)<br>
|
207
220
|
这是由于小米风控导致,海外地区无法登录大陆的账户,请尝试cookie登录
|
208
221
|
无法抓包的可以在本地部署完毕项目后再用户文件夹`C:\Users\用户名`下面找到.mi.token,然后扔到你无法登录的服务器去<br>
|
209
222
|
若是linux则请放到当前用户的home文件夹,此时你可以重新执行先前的命令,不出意外即可正常登录(但cookie可能会过一段时间失效,需要重新获取)<br>
|
@@ -211,7 +224,7 @@ ChatGLM [文档](http://open.bigmodel.cn/doc/api#chatglm_130b)
|
|
211
224
|
|
212
225
|
## 视频教程
|
213
226
|
|
214
|
-
https://www.youtube.com/watch?v=K4YA8YwzOOA
|
227
|
+
<https://www.youtube.com/watch?v=K4YA8YwzOOA>
|
215
228
|
|
216
229
|
## Docker
|
217
230
|
|
@@ -234,13 +247,13 @@ docker run -e OPENAI_API_KEY=<your-openapi-key> yihong0618/xiaogpt --account=<yo
|
|
234
247
|
xiaogpt的配置文件可通过指定volume /config,以及指定参数--config来处理,如
|
235
248
|
|
236
249
|
```shell
|
237
|
-
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.
|
250
|
+
docker run -v <your-config-dir>:/config yihong0618/xiaogpt --config=/config/config.yaml
|
238
251
|
```
|
239
252
|
|
240
253
|
### 网络使用 host 模型
|
241
254
|
|
242
255
|
```shell
|
243
|
-
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.
|
256
|
+
docker run -v <your-config-dir>:/config --network=host yihong0618/xiaogpt --config=/config/config.yaml
|
244
257
|
```
|
245
258
|
|
246
259
|
### 本地编译Docker Image
|
@@ -273,10 +286,8 @@ docker build --build-arg PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple
|
|
273
286
|
|
274
287
|
你可以通过参数 `tts`, 来启用它
|
275
288
|
|
276
|
-
```
|
277
|
-
|
278
|
-
"tts": "edge",
|
279
|
-
}
|
289
|
+
```yaml
|
290
|
+
tts: edge
|
280
291
|
```
|
281
292
|
|
282
293
|
For edge 查看更多语言支持, 从中选择一个
|
@@ -290,7 +301,7 @@ edge-tts --list-voices
|
|
290
301
|
由于 Edge TTS 启动了一个本地的 HTTP 服务,所以需要将容器的端口映射到宿主机上,并且指定本地机器的 hostname:
|
291
302
|
|
292
303
|
```shell
|
293
|
-
docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your ip> yihong0618/xiaogpt --config=/config/config.
|
304
|
+
docker run -v <your-config-dir>:/config -p 9527:9527 -e XIAOGPT_HOSTNAME=<your ip> yihong0618/xiaogpt --config=/config/config.yaml
|
294
305
|
```
|
295
306
|
|
296
307
|
注意端口必须映射为与容器内一致,XIAOGPT_HOSTNAME 需要设置为宿主机的 IP 地址,否则小爱无法正常播放语音。
|
@@ -18,7 +18,6 @@ dependencies = [
|
|
18
18
|
"rich",
|
19
19
|
"zhipuai>=2.0.1",
|
20
20
|
"httpx[socks]",
|
21
|
-
"EdgeGPT==0.1.26",
|
22
21
|
"langchain>=0.0.343",
|
23
22
|
"beautifulsoup4>=4.12.0",
|
24
23
|
"google-search-results>=2.4.2",
|
@@ -26,9 +25,11 @@ dependencies = [
|
|
26
25
|
"numexpr>=2.8.6",
|
27
26
|
"dashscope>=1.10.0",
|
28
27
|
"tetos>=0.2.1",
|
28
|
+
"groq>=0.5.0",
|
29
|
+
"pyyaml>=6.0.1",
|
29
30
|
]
|
30
31
|
dynamic = []
|
31
|
-
version = "2.
|
32
|
+
version = "2.71"
|
32
33
|
|
33
34
|
[project.license]
|
34
35
|
text = "MIT"
|
@@ -49,30 +50,29 @@ locked = [
|
|
49
50
|
"attrs==23.2.0",
|
50
51
|
"azure-cognitiveservices-speech==1.37.0",
|
51
52
|
"beautifulsoup4==4.12.3",
|
52
|
-
"bingimagecreator==0.5.0",
|
53
53
|
"cachetools==5.3.2",
|
54
54
|
"certifi==2024.2.2",
|
55
55
|
"charset-normalizer==3.3.2",
|
56
56
|
"click==8.1.7",
|
57
57
|
"colorama==0.4.6 ; platform_system == \"Windows\"",
|
58
|
-
"dashscope==1.
|
58
|
+
"dashscope==1.19.0",
|
59
59
|
"dataclasses-json==0.6.3",
|
60
60
|
"distro==1.9.0",
|
61
61
|
"edge-tts==6.1.10",
|
62
|
-
"edgegpt==0.1.26",
|
63
62
|
"exceptiongroup==1.2.0 ; python_version < \"3.11\"",
|
64
63
|
"frozenlist==1.4.1",
|
65
|
-
"google-ai-generativelanguage==0.6.
|
64
|
+
"google-ai-generativelanguage==0.6.3",
|
66
65
|
"google-api-core==2.15.0",
|
67
66
|
"google-api-core[grpc]==2.15.0",
|
68
67
|
"google-api-python-client==2.125.0",
|
69
68
|
"google-auth==2.26.1",
|
70
69
|
"google-auth-httplib2==0.2.0",
|
71
70
|
"google-cloud-texttospeech==2.16.3",
|
72
|
-
"google-generativeai==0.5.
|
71
|
+
"google-generativeai==0.5.3",
|
73
72
|
"google-search-results==2.4.2",
|
74
73
|
"googleapis-common-protos==1.62.0",
|
75
74
|
"greenlet==3.0.3 ; platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\"",
|
75
|
+
"groq==0.5.0",
|
76
76
|
"grpcio==1.60.0",
|
77
77
|
"grpcio-status==1.60.0",
|
78
78
|
"h11==0.14.0",
|
@@ -83,9 +83,9 @@ locked = [
|
|
83
83
|
"idna==3.7",
|
84
84
|
"jsonpatch==1.33",
|
85
85
|
"jsonpointer==2.4",
|
86
|
-
"langchain==0.1.
|
87
|
-
"langchain-community==0.0.
|
88
|
-
"langchain-core==0.1.
|
86
|
+
"langchain==0.1.20",
|
87
|
+
"langchain-community==0.0.38",
|
88
|
+
"langchain-core==0.1.52",
|
89
89
|
"langchain-text-splitters==0.0.1",
|
90
90
|
"langsmith==0.1.45",
|
91
91
|
"markdown-it-py==3.0.0",
|
@@ -97,10 +97,9 @@ locked = [
|
|
97
97
|
"mypy-extensions==1.0.0",
|
98
98
|
"numexpr==2.10.0",
|
99
99
|
"numpy==1.26.3",
|
100
|
-
"openai==1.
|
100
|
+
"openai==1.29.0",
|
101
101
|
"orjson==3.10.0",
|
102
102
|
"packaging==23.2",
|
103
|
-
"prompt-toolkit==3.0.43",
|
104
103
|
"proto-plus==1.23.0",
|
105
104
|
"protobuf==4.25.1",
|
106
105
|
"pyasn1==0.5.1",
|
@@ -111,7 +110,6 @@ locked = [
|
|
111
110
|
"pyjwt==2.8.0",
|
112
111
|
"pyparsing==3.1.2 ; python_version > \"3.0\"",
|
113
112
|
"pyyaml==6.0.1",
|
114
|
-
"regex==2023.12.25",
|
115
113
|
"requests==2.31.0",
|
116
114
|
"rich==13.7.1",
|
117
115
|
"rsa==4.9",
|
@@ -126,10 +124,8 @@ locked = [
|
|
126
124
|
"typing-inspect==0.9.0",
|
127
125
|
"uritemplate==4.1.1",
|
128
126
|
"urllib3==2.1.0",
|
129
|
-
"wcwidth==0.2.13",
|
130
|
-
"websockets==12.0",
|
131
127
|
"yarl==1.9.4",
|
132
|
-
"zhipuai==2.0.1",
|
128
|
+
"zhipuai==2.0.1.20240423.1",
|
133
129
|
]
|
134
130
|
|
135
131
|
[tool.pdm]
|
@@ -6,18 +6,22 @@ from xiaogpt.bot.doubao_bot import DoubaoBot
|
|
6
6
|
from xiaogpt.bot.gemini_bot import GeminiBot
|
7
7
|
from xiaogpt.bot.glm_bot import GLMBot
|
8
8
|
from xiaogpt.bot.langchain_bot import LangChainBot
|
9
|
-
from xiaogpt.bot.
|
9
|
+
from xiaogpt.bot.llama_bot import LlamaBot
|
10
|
+
from xiaogpt.bot.moonshot_bot import MoonshotBot
|
10
11
|
from xiaogpt.bot.qwen_bot import QwenBot
|
12
|
+
from xiaogpt.bot.yi_bot import YiBot
|
11
13
|
from xiaogpt.config import Config
|
12
14
|
|
13
15
|
BOTS: dict[str, type[BaseBot]] = {
|
14
|
-
"newbing": NewBingBot,
|
15
16
|
"chatgptapi": ChatGPTBot,
|
16
17
|
"glm": GLMBot,
|
17
18
|
"gemini": GeminiBot,
|
18
19
|
"qwen": QwenBot,
|
19
20
|
"langchain": LangChainBot,
|
20
21
|
"doubao": DoubaoBot,
|
22
|
+
"moonshot": MoonshotBot,
|
23
|
+
"yi": YiBot,
|
24
|
+
"llama": LlamaBot,
|
21
25
|
}
|
22
26
|
|
23
27
|
|
@@ -30,11 +34,13 @@ def get_bot(config: Config) -> BaseBot:
|
|
30
34
|
|
31
35
|
__all__ = [
|
32
36
|
"ChatGPTBot",
|
33
|
-
"NewBingBot",
|
34
37
|
"GLMBot",
|
35
38
|
"GeminiBot",
|
39
|
+
"MoonshotBot",
|
36
40
|
"QwenBot",
|
37
41
|
"get_bot",
|
38
42
|
"LangChainBot",
|
39
43
|
"DoubaoBot",
|
44
|
+
"YiBot",
|
45
|
+
"LlamaBot",
|
40
46
|
]
|
@@ -16,7 +16,7 @@ if TYPE_CHECKING:
|
|
16
16
|
@dataclasses.dataclass
|
17
17
|
class ChatGPTBot(ChatHistoryMixin, BaseBot):
|
18
18
|
name: ClassVar[str] = "ChatGPT"
|
19
|
-
default_options: ClassVar[dict[str, str]] = {"model": "gpt-
|
19
|
+
default_options: ClassVar[dict[str, str]] = {"model": "gpt-4o-2024-05-13"}
|
20
20
|
openai_key: str
|
21
21
|
api_base: str | None = None
|
22
22
|
proxy: str | None = None
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import httpx
|
2
|
+
from groq import Groq as openai
|
3
|
+
from groq import AsyncGroq as AsyncOpenAI
|
4
|
+
|
5
|
+
from xiaogpt.bot.chatgptapi_bot import ChatGPTBot
|
6
|
+
|
7
|
+
|
8
|
+
class LlamaBot(ChatGPTBot):
|
9
|
+
name = "llama"
|
10
|
+
default_options = {"model": "llama3-70b-8192"}
|
11
|
+
|
12
|
+
def __init__(self, llama_api_key: str) -> None:
|
13
|
+
self.llama_api_key = llama_api_key
|
14
|
+
self.history: list[tuple[str, str]] = []
|
15
|
+
|
16
|
+
def _make_openai_client(self, sess: httpx.AsyncClient) -> AsyncOpenAI:
|
17
|
+
return AsyncOpenAI(
|
18
|
+
api_key=self.llama_api_key, http_client=sess, base_url=self.api_base
|
19
|
+
)
|
20
|
+
|
21
|
+
@classmethod
|
22
|
+
def from_config(cls, config):
|
23
|
+
return cls(
|
24
|
+
llama_api_key=config.llama_api_key,
|
25
|
+
)
|
@@ -0,0 +1,28 @@
|
|
1
|
+
import httpx
|
2
|
+
import openai
|
3
|
+
|
4
|
+
from xiaogpt.bot.chatgptapi_bot import ChatGPTBot
|
5
|
+
|
6
|
+
|
7
|
+
class MoonshotBot(ChatGPTBot):
|
8
|
+
name = "Moonshot"
|
9
|
+
default_options = {"model": "moonshot-v1-8k"}
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self, moonshot_api_key: str, api_base="https://api.moonshot.cn/v1"
|
13
|
+
) -> None:
|
14
|
+
self.moonshot_api_key = moonshot_api_key
|
15
|
+
self.api_base = api_base
|
16
|
+
self.history: list[tuple[str, str]] = []
|
17
|
+
|
18
|
+
def _make_openai_client(self, sess: httpx.AsyncClient) -> openai.AsyncOpenAI:
|
19
|
+
return openai.AsyncOpenAI(
|
20
|
+
api_key=self.moonshot_api_key, http_client=sess, base_url=self.api_base
|
21
|
+
)
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def from_config(cls, config):
|
25
|
+
return cls(
|
26
|
+
moonshot_api_key=config.moonshot_api_key,
|
27
|
+
api_base="https://api.moonshot.cn/v1",
|
28
|
+
)
|
@@ -0,0 +1,28 @@
|
|
1
|
+
import httpx
|
2
|
+
import openai
|
3
|
+
|
4
|
+
from xiaogpt.bot.chatgptapi_bot import ChatGPTBot
|
5
|
+
|
6
|
+
|
7
|
+
class YiBot(ChatGPTBot):
|
8
|
+
name = "yi"
|
9
|
+
default_options = {"model": "yi-34b-chat-0205"}
|
10
|
+
|
11
|
+
def __init__(
|
12
|
+
self, yi_api_key: str, api_base="https://api.lingyiwanwu.com/v1"
|
13
|
+
) -> None:
|
14
|
+
self.yi_api_key = yi_api_key
|
15
|
+
self.api_base = api_base
|
16
|
+
self.history: list[tuple[str, str]] = []
|
17
|
+
|
18
|
+
def _make_openai_client(self, sess: httpx.AsyncClient) -> openai.AsyncOpenAI:
|
19
|
+
return openai.AsyncOpenAI(
|
20
|
+
api_key=self.yi_api_key, http_client=sess, base_url=self.api_base
|
21
|
+
)
|
22
|
+
|
23
|
+
@classmethod
|
24
|
+
def from_config(cls, config):
|
25
|
+
return cls(
|
26
|
+
yi_api_key=config.yi_api_key,
|
27
|
+
api_base="https://api.lingyiwanwu.com/v1",
|
28
|
+
)
|
@@ -27,6 +27,21 @@ def main():
|
|
27
27
|
dest="openai_key",
|
28
28
|
help="openai api key",
|
29
29
|
)
|
30
|
+
parser.add_argument(
|
31
|
+
"--moonshot_api_key",
|
32
|
+
dest="moonshot_api_key",
|
33
|
+
help="Moonshot api key",
|
34
|
+
)
|
35
|
+
parser.add_argument(
|
36
|
+
"--llama_api_key",
|
37
|
+
dest="llama_api_key",
|
38
|
+
help="llama(use groq) api key",
|
39
|
+
)
|
40
|
+
parser.add_argument(
|
41
|
+
"--yi_api_key",
|
42
|
+
dest="yi_api_key",
|
43
|
+
help="01wanwu api key",
|
44
|
+
)
|
30
45
|
parser.add_argument(
|
31
46
|
"--glm_key",
|
32
47
|
dest="glm_key",
|
@@ -110,18 +125,25 @@ def main():
|
|
110
125
|
help="if use openai chatgpt api",
|
111
126
|
)
|
112
127
|
bot_group.add_argument(
|
113
|
-
"--
|
128
|
+
"--use_moonshot_api",
|
114
129
|
dest="bot",
|
115
130
|
action="store_const",
|
116
|
-
const="
|
117
|
-
help="if use
|
131
|
+
const="moonshot",
|
132
|
+
help="if use moonshot api",
|
133
|
+
)
|
134
|
+
bot_group.add_argument(
|
135
|
+
"--use_yi_api",
|
136
|
+
dest="bot",
|
137
|
+
action="store_const",
|
138
|
+
const="yi",
|
139
|
+
help="if use yi api",
|
118
140
|
)
|
119
141
|
bot_group.add_argument(
|
120
|
-
"--
|
142
|
+
"--use_langchain",
|
121
143
|
dest="bot",
|
122
144
|
action="store_const",
|
123
|
-
const="
|
124
|
-
help="if use
|
145
|
+
const="langchain",
|
146
|
+
help="if use langchain",
|
125
147
|
)
|
126
148
|
bot_group.add_argument(
|
127
149
|
"--use_glm",
|
@@ -151,6 +173,13 @@ def main():
|
|
151
173
|
const="doubao",
|
152
174
|
help="if use doubao",
|
153
175
|
)
|
176
|
+
bot_group.add_argument(
|
177
|
+
"--use_llama", # use groq
|
178
|
+
dest="bot",
|
179
|
+
action="store_const",
|
180
|
+
const="llama",
|
181
|
+
help="if use groq llama3",
|
182
|
+
)
|
154
183
|
parser.add_argument(
|
155
184
|
"--bing_cookie_path",
|
156
185
|
dest="bing_cookie_path",
|
@@ -162,12 +191,14 @@ def main():
|
|
162
191
|
help="bot type",
|
163
192
|
choices=[
|
164
193
|
"chatgptapi",
|
165
|
-
"newbing",
|
166
194
|
"glm",
|
167
195
|
"gemini",
|
168
196
|
"langchain",
|
169
197
|
"qwen",
|
170
198
|
"doubao",
|
199
|
+
"moonshot",
|
200
|
+
"yi",
|
201
|
+
"llama",
|
171
202
|
],
|
172
203
|
)
|
173
204
|
parser.add_argument(
|
@@ -6,6 +6,8 @@ import os
|
|
6
6
|
from dataclasses import dataclass, field
|
7
7
|
from typing import Any, Iterable, Literal
|
8
8
|
|
9
|
+
import yaml
|
10
|
+
|
9
11
|
from xiaogpt.utils import validate_proxy
|
10
12
|
|
11
13
|
LATEST_ASK_API = "https://userprofile.mina.mi.com/device_profile/v2/conversation?source=dialogu&hardware={hardware}×tamp={timestamp}&limit=2"
|
@@ -37,7 +39,7 @@ DEFAULT_COMMAND = ("5-1", "5-5")
|
|
37
39
|
|
38
40
|
KEY_WORD = ("帮我", "请")
|
39
41
|
CHANGE_PROMPT_KEY_WORD = ("更改提示词",)
|
40
|
-
PROMPT = "以下请用
|
42
|
+
PROMPT = "以下请用300字以内回答,请只回答文字不要带链接"
|
41
43
|
# simulate_xiaoai_question
|
42
44
|
MI_ASK_SIMULATE_DATA = {
|
43
45
|
"code": 0,
|
@@ -52,6 +54,9 @@ class Config:
|
|
52
54
|
account: str = os.getenv("MI_USER", "")
|
53
55
|
password: str = os.getenv("MI_PASS", "")
|
54
56
|
openai_key: str = os.getenv("OPENAI_API_KEY", "")
|
57
|
+
moonshot_api_key: str = os.getenv("MOONSHOT_API_KEY", "")
|
58
|
+
yi_api_key: str = os.getenv("YI_API_KEY", "")
|
59
|
+
llama_api_key: str = os.getenv("GROQ_API_KEY", "") # use groq
|
55
60
|
glm_key: str = os.getenv("CHATGLM_KEY", "")
|
56
61
|
gemini_key: str = os.getenv("GEMINI_KEY", "") # keep the old rule
|
57
62
|
qwen_key: str = os.getenv("DASHSCOPE_API_KEY", "") # keep the old rule
|
@@ -81,18 +86,10 @@ class Config:
|
|
81
86
|
] = "mi"
|
82
87
|
tts_options: dict[str, Any] = field(default_factory=dict)
|
83
88
|
gpt_options: dict[str, Any] = field(default_factory=dict)
|
84
|
-
bing_cookie_path: str = ""
|
85
|
-
bing_cookies: dict | None = None
|
86
89
|
|
87
90
|
def __post_init__(self) -> None:
|
88
91
|
if self.proxy:
|
89
92
|
validate_proxy(self.proxy)
|
90
|
-
if self.bot == "newbing":
|
91
|
-
if not (self.bing_cookie_path or self.bing_cookies):
|
92
|
-
raise Exception(
|
93
|
-
"New bing bot needs bing_cookie_path or bing_cookies, read this: "
|
94
|
-
"https://github.com/acheong08/EdgeGPT#getting-authentication-required"
|
95
|
-
)
|
96
93
|
if (
|
97
94
|
self.api_base
|
98
95
|
and self.api_base.endswith(("openai.azure.com", "openai.azure.com/"))
|
@@ -137,7 +134,10 @@ class Config:
|
|
137
134
|
def read_from_file(cls, config_path: str) -> dict:
|
138
135
|
result = {}
|
139
136
|
with open(config_path, "rb") as f:
|
140
|
-
|
137
|
+
if config_path.endswith(".json"):
|
138
|
+
config = json.load(f)
|
139
|
+
else:
|
140
|
+
config = yaml.safe_load(f)
|
141
141
|
for key, value in config.items():
|
142
142
|
if value is None:
|
143
143
|
continue
|
@@ -157,6 +157,12 @@ class Config:
|
|
157
157
|
key, value = "bot", "qwen"
|
158
158
|
elif key == "use_doubao":
|
159
159
|
key, value = "bot", "doubao"
|
160
|
+
elif key == "use_moonshot":
|
161
|
+
key, value = "bot", "moonshot"
|
162
|
+
elif key == "use_yi":
|
163
|
+
key, value = "bot", "yi"
|
164
|
+
elif key == "use_llama":
|
165
|
+
key, value = "bot", "llama"
|
160
166
|
elif key == "use_langchain":
|
161
167
|
key, value = "bot", "langchain"
|
162
168
|
elif key == "enable_edge_tts":
|
@@ -378,14 +378,23 @@ class MiGPT:
|
|
378
378
|
self.log.debug("No new xiao ai record")
|
379
379
|
continue
|
380
380
|
|
381
|
-
# drop
|
381
|
+
# drop key words
|
382
382
|
query = re.sub(rf"^({'|'.join(self.config.keyword)})", "", query)
|
383
|
+
# llama3 is not good at Chinese, so we need to add prompt in it.
|
384
|
+
if self.config.bot == "llama":
|
385
|
+
query = f"你是一个基于llama3 的智能助手,请你跟我对话时,一定使用中文,不要夹杂一些英文单词,甚至英语短语也不能随意使用,但类似于 llama3 这样的专属名词除外, 问题是:{query}"
|
383
386
|
|
384
387
|
print("-" * 20)
|
385
388
|
print("问题:" + query + "?")
|
386
389
|
if not self.chatbot.has_history():
|
387
390
|
query = f"{query},{self.config.prompt}"
|
388
|
-
|
391
|
+
# some model can not detect the language code, so we need to add it
|
392
|
+
|
393
|
+
if self.config.tts != "mi": # mi only say Chinese
|
394
|
+
query += (
|
395
|
+
",并用本段话的language code作为开头,用|分隔,如:en-US|你好……"
|
396
|
+
)
|
397
|
+
|
389
398
|
if self.config.mute_xiaoai:
|
390
399
|
await self.stop_if_xiaoai_is_playing()
|
391
400
|
else:
|
@@ -1,81 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
import re
|
4
|
-
|
5
|
-
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
|
6
|
-
from xiaogpt.utils import split_sentences
|
7
|
-
|
8
|
-
_reference_link_re = re.compile(r"\[\d+\]: .+?\n+")
|
9
|
-
|
10
|
-
|
11
|
-
class NewBingBot(ChatHistoryMixin, BaseBot):
|
12
|
-
name = "Bing"
|
13
|
-
|
14
|
-
def __init__(
|
15
|
-
self,
|
16
|
-
bing_cookie_path: str = "",
|
17
|
-
bing_cookies: dict | None = None,
|
18
|
-
proxy: str | None = None,
|
19
|
-
):
|
20
|
-
from EdgeGPT import Chatbot
|
21
|
-
|
22
|
-
self.history = []
|
23
|
-
self._bot = Chatbot(
|
24
|
-
cookiePath=bing_cookie_path, cookies=bing_cookies, proxy=proxy
|
25
|
-
)
|
26
|
-
|
27
|
-
@classmethod
|
28
|
-
def from_config(cls, config):
|
29
|
-
return cls(
|
30
|
-
bing_cookie_path=config.bing_cookie_path,
|
31
|
-
bing_cookies=config.bing_cookies,
|
32
|
-
proxy=config.proxy,
|
33
|
-
)
|
34
|
-
|
35
|
-
@staticmethod
|
36
|
-
def clean_text(s):
|
37
|
-
s = s.replace("**", "")
|
38
|
-
s = _reference_link_re.sub("", s)
|
39
|
-
s = re.sub(r"\[[\^\d]+\]", "", s)
|
40
|
-
return s.strip()
|
41
|
-
|
42
|
-
async def ask(self, query, **options):
|
43
|
-
from EdgeGPT import ConversationStyle
|
44
|
-
|
45
|
-
kwargs = {"conversation_style": ConversationStyle.balanced, **options}
|
46
|
-
completion = await self._bot.ask(prompt=query, **kwargs)
|
47
|
-
try:
|
48
|
-
text = self.clean_text(completion["item"]["messages"][1]["text"])
|
49
|
-
except Exception as e:
|
50
|
-
print(str(e))
|
51
|
-
return
|
52
|
-
print(text)
|
53
|
-
return text
|
54
|
-
|
55
|
-
async def ask_stream(self, query, **options):
|
56
|
-
from EdgeGPT import ConversationStyle
|
57
|
-
|
58
|
-
kwargs = {"conversation_style": ConversationStyle.balanced, **options}
|
59
|
-
try:
|
60
|
-
completion = self._bot.ask_stream(prompt=query, **kwargs)
|
61
|
-
except Exception:
|
62
|
-
return
|
63
|
-
|
64
|
-
async def text_gen():
|
65
|
-
current = ""
|
66
|
-
async for final, resp in completion:
|
67
|
-
if final:
|
68
|
-
break
|
69
|
-
text = self.clean_text(resp)
|
70
|
-
if text == current:
|
71
|
-
continue
|
72
|
-
diff = text[len(current) :]
|
73
|
-
print(diff, end="")
|
74
|
-
yield diff
|
75
|
-
current = text
|
76
|
-
|
77
|
-
try:
|
78
|
-
async for sentence in split_sentences(text_gen()):
|
79
|
-
yield sentence
|
80
|
-
finally:
|
81
|
-
print()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|