entari-plugin-hyw 3.3.4__tar.gz → 4.0.0rc4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- entari_plugin_hyw-4.0.0rc4/PKG-INFO +116 -0
- entari_plugin_hyw-4.0.0rc4/README.md +87 -0
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/pyproject.toml +3 -2
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/__init__.py +431 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/index.html +396 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos/cerebras.svg +9 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos/huggingface.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos/xiaomi.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/vite.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/anthropic.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/cerebras.svg +9 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/deepseek.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/gemini.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/google.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/grok.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/huggingface.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/microsoft.svg +15 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/minimax.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/mistral.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/nvida.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/openai.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/openrouter.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/perplexity.svg +24 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/qwen.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/xai.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/xiaomi.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/icon/zai.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/.gitignore +24 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/README.md +5 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/index.html +16 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/package-lock.json +2342 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/package.json +31 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/anthropic.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/cerebras.svg +9 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/deepseek.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/gemini.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/google.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/grok.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/huggingface.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/microsoft.svg +15 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/minimax.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/mistral.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/nvida.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/openai.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/openrouter.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/perplexity.svg +24 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/qwen.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/xai.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/xiaomi.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/logos/zai.png +0 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/public/vite.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/App.vue +412 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/assets/vue.svg +1 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/components/HelloWorld.vue +41 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/components/MarkdownContent.vue +386 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/components/SectionCard.vue +41 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/components/StageCard.vue +237 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/main.ts +5 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/style.css +29 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/test_regex.js +103 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/src/types.ts +52 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/tsconfig.app.json +16 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/tsconfig.json +7 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/tsconfig.node.json +26 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/card-ui/vite.config.ts +16 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw}/history.py +25 -1
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/image_cache.py +274 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw}/misc.py +45 -3
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/pipeline.py +1219 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/prompts.py +47 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/render_vue.py +314 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/search.py +735 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw.egg-info/PKG-INFO +116 -0
- entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw.egg-info/SOURCES.txt +92 -0
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/src/entari_plugin_hyw.egg-info/requires.txt +1 -0
- entari_plugin_hyw-3.3.4/PKG-INFO +0 -142
- entari_plugin_hyw-3.3.4/README.md +0 -114
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/__init__.py +0 -818
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/highlight.css +0 -10
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/highlight.js +0 -1213
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/katex-auto-render.js +0 -1
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/katex.css +0 -1
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/katex.js +0 -1
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/libs/tailwind.css +0 -1
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/package-lock.json +0 -953
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/package.json +0 -16
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/tailwind.config.js +0 -12
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/tailwind.input.css +0 -235
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/template.html +0 -157
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/template.html.bak +0 -157
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/template.j2 +0 -307
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/__init__.py +0 -0
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/config.py +0 -36
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/hyw.py +0 -48
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/pipeline.py +0 -1062
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/render.py +0 -596
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/core/render.py.bak +0 -926
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils/__init__.py +0 -2
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils/browser.py +0 -40
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils/playwright_tool.py +0 -36
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils/prompts.py +0 -129
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/utils/search.py +0 -249
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw.egg-info/PKG-INFO +0 -142
- entari_plugin_hyw-3.3.4/src/entari_plugin_hyw.egg-info/SOURCES.txt +0 -50
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/MANIFEST.in +0 -0
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/setup.cfg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/anthropic.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/deepseek.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/gemini.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/google.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/grok.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/microsoft.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/minimax.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/mistral.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/nvida.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/openai.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/openrouter.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/perplexity.svg +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/qwen.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/xai.png +0 -0
- {entari_plugin_hyw-3.3.4/src/entari_plugin_hyw/assets/icon → entari_plugin_hyw-4.0.0rc4/src/entari_plugin_hyw/assets/card-dist/logos}/zai.png +0 -0
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/src/entari_plugin_hyw.egg-info/dependency_links.txt +0 -0
- {entari_plugin_hyw-3.3.4 → entari_plugin_hyw-4.0.0rc4}/src/entari_plugin_hyw.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: entari_plugin_hyw
|
|
3
|
+
Version: 4.0.0rc4
|
|
4
|
+
Summary: Use large language models to interpret chat messages
|
|
5
|
+
Author-email: kumoSleeping <zjr2992@outlook.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/kumoSleeping/entari-plugin-hyw
|
|
8
|
+
Project-URL: Repository, https://github.com/kumoSleeping/entari-plugin-hyw
|
|
9
|
+
Project-URL: Issue Tracker, https://github.com/kumoSleeping/entari-plugin-hyw/issues
|
|
10
|
+
Keywords: entari,llm,ai,bot,chat
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Requires-Python: <3.13,>=3.10
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
Requires-Dist: arclet-entari[full]>=0.16.5
|
|
20
|
+
Requires-Dist: openai
|
|
21
|
+
Requires-Dist: httpx
|
|
22
|
+
Requires-Dist: markdown>=3.10
|
|
23
|
+
Requires-Dist: crawl4ai>=0.7.8
|
|
24
|
+
Requires-Dist: jinja2>=3.0
|
|
25
|
+
Requires-Dist: ddgs>=9.10.0
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: entari-plugin-server>=0.5.0; extra == "dev"
|
|
28
|
+
Requires-Dist: satori-python-adapter-onebot11>=0.2.5; extra == "dev"
|
|
29
|
+
|
|
30
|
+
# Entari Plugin HYW
|
|
31
|
+
|
|
32
|
+
[](https://badge.fury.io/py/entari-plugin-hyw)
|
|
33
|
+
[](https://opensource.org/licenses/MIT)
|
|
34
|
+
[](https://pypi.org/project/entari-plugin-hyw/)
|
|
35
|
+
|
|
36
|
+
**English** | [简体中文](docs/README_CN.md)
|
|
37
|
+
|
|
38
|
+
**Entari Plugin HYW** is an advanced agentic chat plugin for the [Entari](https://github.com/entari-org/entari) framework. It leverages Large Language Models (LLMs) to provide intelligent, context-aware, and multi-modal responses within instant messaging environments (OneBot 11, Satori).
|
|
39
|
+
|
|
40
|
+
The plugin implements a three-stage pipeline (**Vision**, **Instruct**, **Agent**) to autonomously decide when to search the web, crawl pages, or analyze images to answer user queries effectively.
|
|
41
|
+
|
|
42
|
+
<p align="center">
|
|
43
|
+
<img src="docs/demo_mockup.svg" width="800" />
|
|
44
|
+
</p>
|
|
45
|
+
|
|
46
|
+
## Features
|
|
47
|
+
|
|
48
|
+
- 📖 **Agentic Workflow**
|
|
49
|
+
Autonomous decision-making process to search, browse, and reason.
|
|
50
|
+
|
|
51
|
+
- 🎑 **Multi-Modal Support**
|
|
52
|
+
Native support for image analysis using Vision Language Models (VLMs).
|
|
53
|
+
|
|
54
|
+
- 🔍 **Web Search & Crawling**
|
|
55
|
+
Integrated **DuckDuckGo** and **Crawl4AI** for real-time information retrieval.
|
|
56
|
+
|
|
57
|
+
- 🎨 **Rich Rendering**
|
|
58
|
+
Responses are rendered as images containing Markdown, syntax-highlighted code, LaTeX math, and citation badges.
|
|
59
|
+
|
|
60
|
+
- 🔌 **Protocol Support**
|
|
61
|
+
Deep integration with OneBot 11 and Satori protocols, handling reply context and JSON cards perfectly.
|
|
62
|
+
|
|
63
|
+
## Installation
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
pip install entari-plugin-hyw
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Configuration
|
|
70
|
+
|
|
71
|
+
Configure the plugin in your `entari.yml`.
|
|
72
|
+
|
|
73
|
+
### Minimal Configuration
|
|
74
|
+
|
|
75
|
+
```yaml
|
|
76
|
+
plugins:
|
|
77
|
+
entari_plugin_hyw:
|
|
78
|
+
model_name: google/gemini-2.0-flash-exp
|
|
79
|
+
api_key: "your-or-api-key-here"
|
|
80
|
+
# Rendering Configuration
|
|
81
|
+
render_timeout_ms: 6000 # Browser wait timeout
|
|
82
|
+
render_image_timeout_ms: 3000 # Image load wait timeout
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Usage
|
|
86
|
+
|
|
87
|
+
### Commands
|
|
88
|
+
|
|
89
|
+
- **Text Query**
|
|
90
|
+
```text
|
|
91
|
+
/q What's the latest news on Rust 1.83?
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
- **Image Analysis**
|
|
95
|
+
*(Send an image with command, or reply to an image)*
|
|
96
|
+
```text
|
|
97
|
+
/q [Image] Explain this error.
|
|
98
|
+
```
|
|
99
|
+
- **Quote Query**
|
|
100
|
+
```text
|
|
101
|
+
[quote: User Message] /q
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
- **Follow-up**
|
|
105
|
+
*Reply to the bot's message to continue the conversation.*
|
|
106
|
+
|
|
107
|
+
## Documentation for AI/LLMs
|
|
108
|
+
|
|
109
|
+
- [Instruction Guide (English)](docs/README_LLM_EN.md)
|
|
110
|
+
- [指导手册 (简体中文)](docs/README_LLM_CN.md)
|
|
111
|
+
|
|
112
|
+
---
|
|
113
|
+
|
|
114
|
+
## License
|
|
115
|
+
|
|
116
|
+
This project is licensed under the MIT License.
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# Entari Plugin HYW
|
|
2
|
+
|
|
3
|
+
[](https://badge.fury.io/py/entari-plugin-hyw)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
[](https://pypi.org/project/entari-plugin-hyw/)
|
|
6
|
+
|
|
7
|
+
**English** | [简体中文](docs/README_CN.md)
|
|
8
|
+
|
|
9
|
+
**Entari Plugin HYW** is an advanced agentic chat plugin for the [Entari](https://github.com/entari-org/entari) framework. It leverages Large Language Models (LLMs) to provide intelligent, context-aware, and multi-modal responses within instant messaging environments (OneBot 11, Satori).
|
|
10
|
+
|
|
11
|
+
The plugin implements a three-stage pipeline (**Vision**, **Instruct**, **Agent**) to autonomously decide when to search the web, crawl pages, or analyze images to answer user queries effectively.
|
|
12
|
+
|
|
13
|
+
<p align="center">
|
|
14
|
+
<img src="docs/demo_mockup.svg" width="800" />
|
|
15
|
+
</p>
|
|
16
|
+
|
|
17
|
+
## Features
|
|
18
|
+
|
|
19
|
+
- 📖 **Agentic Workflow**
|
|
20
|
+
Autonomous decision-making process to search, browse, and reason.
|
|
21
|
+
|
|
22
|
+
- 🎑 **Multi-Modal Support**
|
|
23
|
+
Native support for image analysis using Vision Language Models (VLMs).
|
|
24
|
+
|
|
25
|
+
- 🔍 **Web Search & Crawling**
|
|
26
|
+
Integrated **DuckDuckGo** and **Crawl4AI** for real-time information retrieval.
|
|
27
|
+
|
|
28
|
+
- 🎨 **Rich Rendering**
|
|
29
|
+
Responses are rendered as images containing Markdown, syntax-highlighted code, LaTeX math, and citation badges.
|
|
30
|
+
|
|
31
|
+
- 🔌 **Protocol Support**
|
|
32
|
+
Deep integration with OneBot 11 and Satori protocols, handling reply context and JSON cards perfectly.
|
|
33
|
+
|
|
34
|
+
## Installation
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
pip install entari-plugin-hyw
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Configuration
|
|
41
|
+
|
|
42
|
+
Configure the plugin in your `entari.yml`.
|
|
43
|
+
|
|
44
|
+
### Minimal Configuration
|
|
45
|
+
|
|
46
|
+
```yaml
|
|
47
|
+
plugins:
|
|
48
|
+
entari_plugin_hyw:
|
|
49
|
+
model_name: google/gemini-2.0-flash-exp
|
|
50
|
+
api_key: "your-or-api-key-here"
|
|
51
|
+
# Rendering Configuration
|
|
52
|
+
render_timeout_ms: 6000 # Browser wait timeout
|
|
53
|
+
render_image_timeout_ms: 3000 # Image load wait timeout
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Usage
|
|
57
|
+
|
|
58
|
+
### Commands
|
|
59
|
+
|
|
60
|
+
- **Text Query**
|
|
61
|
+
```text
|
|
62
|
+
/q What's the latest news on Rust 1.83?
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
- **Image Analysis**
|
|
66
|
+
*(Send an image with command, or reply to an image)*
|
|
67
|
+
```text
|
|
68
|
+
/q [Image] Explain this error.
|
|
69
|
+
```
|
|
70
|
+
- **Quote Query**
|
|
71
|
+
```text
|
|
72
|
+
[quote: User Message] /q
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
- **Follow-up**
|
|
76
|
+
*Reply to the bot's message to continue the conversation.*
|
|
77
|
+
|
|
78
|
+
## Documentation for AI/LLMs
|
|
79
|
+
|
|
80
|
+
- [Instruction Guide (English)](docs/README_LLM_EN.md)
|
|
81
|
+
- [指导手册 (简体中文)](docs/README_LLM_CN.md)
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
## License
|
|
86
|
+
|
|
87
|
+
This project is licensed under the MIT License.
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "entari_plugin_hyw"
|
|
7
|
-
version = "
|
|
7
|
+
version = "4.0.0-rc4"
|
|
8
8
|
description = "Use large language models to interpret chat messages"
|
|
9
9
|
authors = [{name = "kumoSleeping", email = "zjr2992@outlook.com"}]
|
|
10
10
|
dependencies = [
|
|
@@ -14,8 +14,9 @@ dependencies = [
|
|
|
14
14
|
"markdown>=3.10",
|
|
15
15
|
"crawl4ai>=0.7.8",
|
|
16
16
|
"jinja2>=3.0",
|
|
17
|
+
"ddgs>=9.10.0",
|
|
17
18
|
]
|
|
18
|
-
requires-python = ">=3.10"
|
|
19
|
+
requires-python = ">=3.10,<3.13"
|
|
19
20
|
readme = "README.md"
|
|
20
21
|
license = {text = "MIT"}
|
|
21
22
|
keywords = ["entari", "llm", "ai", "bot", "chat"]
|
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from importlib.metadata import version as get_version
|
|
3
|
+
from typing import List, Dict, Any, Optional, Union
|
|
4
|
+
import time
|
|
5
|
+
import asyncio
|
|
6
|
+
|
|
7
|
+
# 从 pyproject.toml 读取版本号,避免重复维护
|
|
8
|
+
try:
|
|
9
|
+
__version__ = get_version("entari_plugin_hyw")
|
|
10
|
+
except Exception:
|
|
11
|
+
__version__ = "0.0.0"
|
|
12
|
+
|
|
13
|
+
from arclet.alconna import Alconna, Args, AllParam, CommandMeta, Option, Arparma, MultiVar, store_true
|
|
14
|
+
from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, plugin, command
|
|
15
|
+
from arclet.letoderea import on
|
|
16
|
+
from arclet.entari import MessageChain, Text, Image, MessageCreatedEvent, Quote, At
|
|
17
|
+
from satori.element import Custom
|
|
18
|
+
from loguru import logger
|
|
19
|
+
import arclet.letoderea as leto
|
|
20
|
+
from arclet.entari.event.command import CommandReceive
|
|
21
|
+
|
|
22
|
+
from .pipeline import ProcessingPipeline
|
|
23
|
+
from .history import HistoryManager
|
|
24
|
+
from .render_vue import ContentRenderer
|
|
25
|
+
from .misc import process_onebot_json, process_images, resolve_model_name, render_refuse_answer, REFUSE_ANSWER_MARKDOWN
|
|
26
|
+
from arclet.entari.event.lifespan import Cleanup
|
|
27
|
+
|
|
28
|
+
import os
|
|
29
|
+
import secrets
|
|
30
|
+
import base64
|
|
31
|
+
|
|
32
|
+
import re
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def parse_color(color: str) -> str:
|
|
36
|
+
"""
|
|
37
|
+
Parse color from hex or RGB tuple to hex format.
|
|
38
|
+
Supports: #ff0000, ff0000, (255, 0, 0), 255,0,0
|
|
39
|
+
"""
|
|
40
|
+
if not color:
|
|
41
|
+
return "#ef4444"
|
|
42
|
+
|
|
43
|
+
color = str(color).strip()
|
|
44
|
+
|
|
45
|
+
# Hex format: #fff or #ffffff or ffffff
|
|
46
|
+
if color.startswith('#') and len(color) in [4, 7]:
|
|
47
|
+
return color
|
|
48
|
+
if re.match(r'^[0-9a-fA-F]{6}$', color):
|
|
49
|
+
return f'#{color}'
|
|
50
|
+
|
|
51
|
+
# RGB tuple: (r, g, b) or r,g,b
|
|
52
|
+
rgb_match = re.match(r'^\(?(\d+)[,\s]+(\d+)[,\s]+(\d+)\)?$', color)
|
|
53
|
+
if rgb_match:
|
|
54
|
+
r, g, b = (max(0, min(255, int(x))) for x in rgb_match.groups())
|
|
55
|
+
return f'#{r:02x}{g:02x}{b:02x}'
|
|
56
|
+
|
|
57
|
+
logger.warning(f"Invalid color '{color}', using default #ef4444")
|
|
58
|
+
return "#ef4444"
|
|
59
|
+
|
|
60
|
+
class _RecentEventDeduper:
|
|
61
|
+
def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
|
|
62
|
+
self.ttl_seconds = ttl_seconds
|
|
63
|
+
self.max_size = max_size
|
|
64
|
+
self._seen: Dict[str, float] = {}
|
|
65
|
+
|
|
66
|
+
def seen_recently(self, key: str) -> bool:
|
|
67
|
+
now = time.time()
|
|
68
|
+
if len(self._seen) > self.max_size:
|
|
69
|
+
self._prune(now)
|
|
70
|
+
ts = self._seen.get(key)
|
|
71
|
+
if ts is None or now - ts > self.ttl_seconds:
|
|
72
|
+
self._seen[key] = now
|
|
73
|
+
return False
|
|
74
|
+
return True
|
|
75
|
+
|
|
76
|
+
def _prune(self, now: float):
|
|
77
|
+
expired = [k for k, ts in self._seen.items() if now - ts > self.ttl_seconds]
|
|
78
|
+
for k in expired:
|
|
79
|
+
self._seen.pop(k, None)
|
|
80
|
+
if len(self._seen) > self.max_size:
|
|
81
|
+
for k, _ in sorted(self._seen.items(), key=lambda kv: kv[1])[: len(self._seen) - self.max_size]:
|
|
82
|
+
self._seen.pop(k, None)
|
|
83
|
+
|
|
84
|
+
_event_deduper = _RecentEventDeduper()
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class HywConfig(BasicConfModel):
|
|
88
|
+
admins: List[str] = field(default_factory=list)
|
|
89
|
+
models: List[Dict[str, Any]] = field(default_factory=list)
|
|
90
|
+
question_command: str = "/q"
|
|
91
|
+
model_name: Optional[str] = None
|
|
92
|
+
api_key: Optional[str] = None
|
|
93
|
+
base_url: str = "https://openrouter.ai/api/v1"
|
|
94
|
+
vision_model_name: Optional[str] = None
|
|
95
|
+
vision_api_key: Optional[str] = None
|
|
96
|
+
language: str = "Simplified Chinese"
|
|
97
|
+
vision_base_url: Optional[str] = None
|
|
98
|
+
instruct_model_name: Optional[str] = None
|
|
99
|
+
instruct_api_key: Optional[str] = None
|
|
100
|
+
instruct_base_url: Optional[str] = None
|
|
101
|
+
search_base_url: str = "https://lite.duckduckgo.com/lite/?q={query}"
|
|
102
|
+
image_search_base_url: str = "https://duckduckgo.com/?q={query}&iax=images&ia=images"
|
|
103
|
+
headless: bool = False
|
|
104
|
+
save_conversation: bool = False
|
|
105
|
+
icon: str = "openai"
|
|
106
|
+
render_timeout_ms: int = 6000
|
|
107
|
+
render_image_timeout_ms: int = 3000
|
|
108
|
+
extra_body: Optional[Dict[str, Any]] = None
|
|
109
|
+
vision_extra_body: Optional[Dict[str, Any]] = None
|
|
110
|
+
instruct_extra_body: Optional[Dict[str, Any]] = None
|
|
111
|
+
enable_browser_fallback: bool = False
|
|
112
|
+
reaction: bool = False
|
|
113
|
+
quote: bool = True
|
|
114
|
+
temperature: float = 0.4
|
|
115
|
+
# Billing configuration (price per million tokens)
|
|
116
|
+
input_price: Optional[float] = None # $ per 1M input tokens
|
|
117
|
+
output_price: Optional[float] = None # $ per 1M output tokens
|
|
118
|
+
# Vision model pricing overrides (defaults to main model pricing if not set)
|
|
119
|
+
vision_input_price: Optional[float] = None
|
|
120
|
+
vision_output_price: Optional[float] = None
|
|
121
|
+
# Instruct model pricing overrides (defaults to main model pricing if not set)
|
|
122
|
+
instruct_input_price: Optional[float] = None
|
|
123
|
+
instruct_output_price: Optional[float] = None
|
|
124
|
+
# Provider Names
|
|
125
|
+
search_name: str = "DuckDuckGo"
|
|
126
|
+
search_provider: str = "crawl4ai" # crawl4ai | httpx | ddgs
|
|
127
|
+
fetch_provider: str = "crawl4ai" # crawl4ai | jinaai
|
|
128
|
+
jina_api_key: Optional[str] = None # Optional API key for Jina AI
|
|
129
|
+
model_provider: Optional[str] = None
|
|
130
|
+
vision_model_provider: Optional[str] = None
|
|
131
|
+
instruct_model_provider: Optional[str] = None
|
|
132
|
+
|
|
133
|
+
# Search/Fetch Settings
|
|
134
|
+
search_timeout: float = 10.0
|
|
135
|
+
search_retries: int = 2
|
|
136
|
+
fetch_timeout: float = 15.0
|
|
137
|
+
fetch_max_results: int = 5
|
|
138
|
+
fetch_blocked_domains: Optional[List[str]] = None
|
|
139
|
+
|
|
140
|
+
# Fetch Model Config
|
|
141
|
+
fetch_model_name: Optional[str] = None
|
|
142
|
+
fetch_api_key: Optional[str] = None
|
|
143
|
+
fetch_base_url: Optional[str] = None
|
|
144
|
+
fetch_extra_body: Optional[Dict[str, Any]] = None
|
|
145
|
+
fetch_input_price: Optional[float] = None
|
|
146
|
+
fetch_output_price: Optional[float] = None
|
|
147
|
+
# Summary Model Config
|
|
148
|
+
summary_model_name: Optional[str] = None
|
|
149
|
+
summary_api_key: Optional[str] = None
|
|
150
|
+
summary_base_url: Optional[str] = None
|
|
151
|
+
summary_extra_body: Optional[Dict[str, Any]] = None
|
|
152
|
+
summary_input_price: Optional[float] = None
|
|
153
|
+
summary_output_price: Optional[float] = None
|
|
154
|
+
# UI Theme
|
|
155
|
+
theme_color: str = "#ef4444" # Tailwind red-500, supports hex/RGB/color names
|
|
156
|
+
|
|
157
|
+
def __post_init__(self):
|
|
158
|
+
"""Parse and normalize theme color after initialization."""
|
|
159
|
+
self.theme_color = parse_color(self.theme_color)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
conf = plugin_config(HywConfig)
|
|
164
|
+
history_manager = HistoryManager()
|
|
165
|
+
renderer = ContentRenderer()
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
class GlobalCache:
|
|
169
|
+
models_image_path: Optional[str] = None
|
|
170
|
+
|
|
171
|
+
global_cache = GlobalCache()
|
|
172
|
+
|
|
173
|
+
async def react(session: Session, emoji: str):
|
|
174
|
+
if not conf.reaction: return
|
|
175
|
+
try:
|
|
176
|
+
await session.reaction_create(emoji=emoji)
|
|
177
|
+
except Exception as e:
|
|
178
|
+
logger.warning(f"Reaction failed: {e}")
|
|
179
|
+
|
|
180
|
+
async def process_request(
|
|
181
|
+
session: Session[MessageCreatedEvent],
|
|
182
|
+
all_param: Optional[MessageChain] = None,
|
|
183
|
+
selected_model: Optional[str] = None,
|
|
184
|
+
selected_vision_model: Optional[str] = None,
|
|
185
|
+
conversation_key_override: Optional[str] = None,
|
|
186
|
+
local_mode: bool = False,
|
|
187
|
+
) -> None:
|
|
188
|
+
logger.info(f"Processing request: {all_param}")
|
|
189
|
+
mc = MessageChain(all_param)
|
|
190
|
+
logger.info(f"reply: {session.reply}")
|
|
191
|
+
if session.reply:
|
|
192
|
+
try:
|
|
193
|
+
# Check if reply is from self (the bot)
|
|
194
|
+
# 1. Check by Message ID (reliable for bot's own messages if recorded)
|
|
195
|
+
reply_msg_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
|
|
196
|
+
is_bot = False
|
|
197
|
+
|
|
198
|
+
if reply_msg_id and history_manager.is_bot_message(reply_msg_id):
|
|
199
|
+
is_bot = True
|
|
200
|
+
logger.info(f"Reply target {reply_msg_id} identified as bot message via history")
|
|
201
|
+
|
|
202
|
+
if is_bot:
|
|
203
|
+
logger.info("Reply is from me - ignoring content")
|
|
204
|
+
else:
|
|
205
|
+
logger.info(f"Reply is from user (or unknown) - including content")
|
|
206
|
+
mc.extend(MessageChain(" ") + session.reply.origin.message)
|
|
207
|
+
except Exception as e:
|
|
208
|
+
logger.warning(f"Failed to process reply origin: {e}")
|
|
209
|
+
mc.extend(MessageChain(" ") + session.reply.origin.message)
|
|
210
|
+
|
|
211
|
+
# Filter and reconstruct MessageChain
|
|
212
|
+
filtered_elements = mc.get(Text) + mc.get(Image) + mc.get(Custom)
|
|
213
|
+
mc = MessageChain(filtered_elements)
|
|
214
|
+
logger.info(f"mc: {mc}")
|
|
215
|
+
|
|
216
|
+
text_content = str(mc.get(Text)).strip()
|
|
217
|
+
# Remove HTML image tags from text content to prevent "unreasonable code behavior"
|
|
218
|
+
text_content = re.sub(r'<img[^>]+>', '', text_content, flags=re.IGNORECASE)
|
|
219
|
+
|
|
220
|
+
if not text_content and not mc.get(Image) and not mc.get(Custom):
|
|
221
|
+
return
|
|
222
|
+
|
|
223
|
+
# History & Context
|
|
224
|
+
hist_key = conversation_key_override
|
|
225
|
+
if not hist_key and session.reply and hasattr(session.reply.origin, 'id'):
|
|
226
|
+
hist_key = history_manager.get_conversation_id(str(session.reply.origin.id))
|
|
227
|
+
|
|
228
|
+
hist_payload = history_manager.get_history(hist_key) if hist_key else []
|
|
229
|
+
meta = history_manager.get_metadata(hist_key) if hist_key else {}
|
|
230
|
+
context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
|
|
231
|
+
|
|
232
|
+
if conf.reaction: await react(session, "✨")
|
|
233
|
+
|
|
234
|
+
try:
|
|
235
|
+
msg_text = str(mc.get(Text)).strip() if mc.get(Text) else ""
|
|
236
|
+
msg_text = re.sub(r'<img[^>]+>', '', msg_text, flags=re.IGNORECASE)
|
|
237
|
+
|
|
238
|
+
# If message is empty but has images, use a placeholder
|
|
239
|
+
if not msg_text and (mc.get(Image) or mc.get(Custom)):
|
|
240
|
+
msg_text = "[图片]"
|
|
241
|
+
|
|
242
|
+
for custom in [e for e in mc if isinstance(e, Custom)]:
|
|
243
|
+
if custom.tag == 'onebot:json':
|
|
244
|
+
if decoded := process_onebot_json(custom.attributes()): msg_text += f"\n{decoded}"
|
|
245
|
+
break
|
|
246
|
+
|
|
247
|
+
# Model Selection (Step 1)
|
|
248
|
+
# Resolve model names from config if they are short names/keywords
|
|
249
|
+
model = selected_model or meta.get("model")
|
|
250
|
+
if model and model != "off":
|
|
251
|
+
resolved, err = resolve_model_name(model, conf.models)
|
|
252
|
+
if resolved:
|
|
253
|
+
model = resolved
|
|
254
|
+
elif err:
|
|
255
|
+
logger.warning(f"Model resolution warning for {model}: {err}")
|
|
256
|
+
|
|
257
|
+
vision_model = selected_vision_model or meta.get("vision_model")
|
|
258
|
+
if vision_model and vision_model != "off":
|
|
259
|
+
resolved_v, err_v = resolve_model_name(vision_model, conf.models)
|
|
260
|
+
if resolved_v:
|
|
261
|
+
vision_model = resolved_v
|
|
262
|
+
elif err_v:
|
|
263
|
+
logger.warning(f"Vision model resolution warning for {vision_model}: {err_v}")
|
|
264
|
+
|
|
265
|
+
images, err = await process_images(mc, vision_model)
|
|
266
|
+
|
|
267
|
+
# Call Pipeline directly
|
|
268
|
+
safe_input = msg_text
|
|
269
|
+
pipeline = ProcessingPipeline(conf)
|
|
270
|
+
try:
|
|
271
|
+
resp = await pipeline.execute(
|
|
272
|
+
safe_input,
|
|
273
|
+
hist_payload,
|
|
274
|
+
model_name=model,
|
|
275
|
+
images=images,
|
|
276
|
+
selected_vision_model=vision_model,
|
|
277
|
+
)
|
|
278
|
+
finally:
|
|
279
|
+
await pipeline.close()
|
|
280
|
+
|
|
281
|
+
# Step 1 Results
|
|
282
|
+
step1_vision_model = resp.get("vision_model_used")
|
|
283
|
+
step1_model = resp.get("model_used")
|
|
284
|
+
step1_history = resp.get("conversation_history", [])
|
|
285
|
+
step1_stats = resp.get("stats", {})
|
|
286
|
+
|
|
287
|
+
final_resp = resp
|
|
288
|
+
|
|
289
|
+
# Step 2 (Optional)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
# Extract Response Data
|
|
294
|
+
content = final_resp.get("llm_response", "")
|
|
295
|
+
structured = final_resp.get("structured_response", {})
|
|
296
|
+
|
|
297
|
+
# Render
|
|
298
|
+
import tempfile
|
|
299
|
+
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
|
|
300
|
+
output_path = tf.name
|
|
301
|
+
model_used = final_resp.get("model_used")
|
|
302
|
+
|
|
303
|
+
# Determine session short code
|
|
304
|
+
if hist_key:
|
|
305
|
+
display_session_id = history_manager.get_code_by_key(hist_key)
|
|
306
|
+
if not display_session_id:
|
|
307
|
+
display_session_id = history_manager.generate_short_code()
|
|
308
|
+
else:
|
|
309
|
+
display_session_id = history_manager.generate_short_code()
|
|
310
|
+
|
|
311
|
+
# Use stats_list if available, otherwise standard stats
|
|
312
|
+
stats_to_render = final_resp.get("stats_list", final_resp.get("stats", {}))
|
|
313
|
+
|
|
314
|
+
# Check if refuse_answer was triggered
|
|
315
|
+
if final_resp.get("refuse_answer"):
|
|
316
|
+
logger.info(f"Refuse answer triggered. Rendering refuse image. Reason: {final_resp.get('refuse_reason', '')}")
|
|
317
|
+
render_ok = await render_refuse_answer(
|
|
318
|
+
renderer=renderer,
|
|
319
|
+
output_path=output_path,
|
|
320
|
+
reason=final_resp.get('refuse_reason', 'Instruct 专家分配此任务流程失败,请尝试提出其他问题~'),
|
|
321
|
+
theme_color=conf.theme_color,
|
|
322
|
+
)
|
|
323
|
+
else:
|
|
324
|
+
render_ok = await renderer.render(
|
|
325
|
+
markdown_content=content,
|
|
326
|
+
output_path=output_path,
|
|
327
|
+
stats=stats_to_render,
|
|
328
|
+
references=structured.get("references", []),
|
|
329
|
+
page_references=structured.get("page_references", []),
|
|
330
|
+
image_references=structured.get("image_references", []),
|
|
331
|
+
stages_used=final_resp.get("stages_used", []),
|
|
332
|
+
image_timeout=conf.render_image_timeout_ms,
|
|
333
|
+
theme_color=conf.theme_color,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# Send & Save
|
|
337
|
+
if not render_ok:
|
|
338
|
+
logger.error("Render failed; skipping reply. Check Crawl4AI rendering status.")
|
|
339
|
+
if os.path.exists(output_path):
|
|
340
|
+
try:
|
|
341
|
+
os.remove(output_path)
|
|
342
|
+
except Exception as exc:
|
|
343
|
+
logger.warning(f"Failed to delete render output {output_path}: {exc}")
|
|
344
|
+
sent = None
|
|
345
|
+
else:
|
|
346
|
+
# Convert to base64
|
|
347
|
+
with open(output_path, "rb") as f:
|
|
348
|
+
img_data = base64.b64encode(f.read()).decode()
|
|
349
|
+
|
|
350
|
+
# Build single reply chain (image only now)
|
|
351
|
+
elements = []
|
|
352
|
+
elements.append(Image(src=f'data:image/png;base64,{img_data}'))
|
|
353
|
+
|
|
354
|
+
msg_chain = MessageChain(*elements)
|
|
355
|
+
|
|
356
|
+
if conf.quote:
|
|
357
|
+
msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
|
|
358
|
+
|
|
359
|
+
# Use reply_to instead of manual Quote insertion to avoid ActionFailed errors
|
|
360
|
+
sent = await session.send(msg_chain)
|
|
361
|
+
|
|
362
|
+
sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
|
|
363
|
+
msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
|
|
364
|
+
related = [msg_id] + ([str(session.reply.origin.id)] if session.reply and hasattr(session.reply.origin, 'id') else [])
|
|
365
|
+
|
|
366
|
+
history_manager.remember(
|
|
367
|
+
sent_id,
|
|
368
|
+
final_resp.get("conversation_history", []),
|
|
369
|
+
related,
|
|
370
|
+
{
|
|
371
|
+
"model": model_used,
|
|
372
|
+
"trace_markdown": final_resp.get("trace_markdown"),
|
|
373
|
+
},
|
|
374
|
+
context_id,
|
|
375
|
+
code=display_session_id,
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
except Exception as e:
|
|
382
|
+
logger.exception(f"Error: {e}")
|
|
383
|
+
err_msg = f"Error: {e}"
|
|
384
|
+
if conf.quote:
|
|
385
|
+
await session.send([Quote(session.event.message.id), err_msg])
|
|
386
|
+
else:
|
|
387
|
+
await session.send(err_msg)
|
|
388
|
+
|
|
389
|
+
# Save conversation on error if response was generated
|
|
390
|
+
if 'resp' in locals() and resp and conf.save_conversation:
|
|
391
|
+
try:
|
|
392
|
+
# Use a temporary ID for error cases
|
|
393
|
+
error_id = f"error_{int(time.time())}_{secrets.token_hex(4)}"
|
|
394
|
+
history_manager.remember(error_id, resp.get("conversation_history", []), [], {"model": model_used if 'model_used' in locals() else "unknown", "error": str(e)}, context_id, code=display_session_id if 'display_session_id' in locals() else None)
|
|
395
|
+
# history_manager.save_to_disk(error_id)
|
|
396
|
+
logger.info(f"Saved error conversation memory to {error_id}")
|
|
397
|
+
except Exception as save_err:
|
|
398
|
+
logger.error(f"Failed to save error conversation: {save_err}")
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
alc = Alconna(
|
|
402
|
+
conf.question_command,
|
|
403
|
+
Args["all_param;?", AllParam],
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
@command.on(alc)
|
|
407
|
+
async def handle_question_command(session: Session[MessageCreatedEvent], result: Arparma):
|
|
408
|
+
"""Handle main Question command"""
|
|
409
|
+
try:
|
|
410
|
+
logger.info(f"Question Command Triggered. Message: {result}")
|
|
411
|
+
mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
|
|
412
|
+
dedupe_key = f"{getattr(session.account, 'id', 'account')}:{mid}"
|
|
413
|
+
if _event_deduper.seen_recently(dedupe_key):
|
|
414
|
+
logger.warning(f"Duplicate command event ignored: {dedupe_key}")
|
|
415
|
+
return
|
|
416
|
+
except Exception:
|
|
417
|
+
pass
|
|
418
|
+
|
|
419
|
+
logger.info(f"Question Command Triggered. Message: {session.event.message}")
|
|
420
|
+
|
|
421
|
+
args = result.all_matched_args
|
|
422
|
+
logger.info(f"Matched Args: {args}")
|
|
423
|
+
|
|
424
|
+
await process_request(session, args.get("all_param"), selected_model=None, selected_vision_model=None, conversation_key_override=None, local_mode=False)
|
|
425
|
+
|
|
426
|
+
metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version=__version__, config=HywConfig)
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
@listen(CommandReceive)
|
|
430
|
+
async def remove_at(content: MessageChain):
|
|
431
|
+
return content.lstrip(At)
|