@shun-js/aibaiban-server 1.1.7 → 1.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@shun-js/aibaiban-server",
|
|
3
|
-
"version": "1.1.
|
|
3
|
+
"version": "1.1.8",
|
|
4
4
|
"description": "aibaiban.com server",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai aibaiban"
|
|
@@ -37,7 +37,7 @@
|
|
|
37
37
|
"qiao-z-nuser": "^6.0.0",
|
|
38
38
|
"qiao-z-service": "^6.0.0",
|
|
39
39
|
"qiao-z-sms": "^6.0.0",
|
|
40
|
-
"viho-llm": "^1.0.
|
|
40
|
+
"viho-llm": "^1.0.8",
|
|
41
41
|
"zod": "^4.3.6",
|
|
42
42
|
"zod-to-json-schema": "^3.25.1"
|
|
43
43
|
},
|
|
@@ -45,5 +45,5 @@
|
|
|
45
45
|
"access": "public",
|
|
46
46
|
"registry": "https://registry.npmjs.org/"
|
|
47
47
|
},
|
|
48
|
-
"gitHead": "
|
|
48
|
+
"gitHead": "4b29e10a1051e2364ccd96f6f74daba34f49a7e4"
|
|
49
49
|
}
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// service
|
|
2
2
|
const service = require('../service/LLMService.js');
|
|
3
|
+
const service2 = require('../service/LLMServiceV2.js');
|
|
3
4
|
|
|
4
5
|
/**
|
|
5
6
|
* controller
|
|
@@ -9,4 +10,9 @@ module.exports = (app) => {
|
|
|
9
10
|
app.post('/chat-streaming', (req, res) => {
|
|
10
11
|
service.drawAgent(req, res);
|
|
11
12
|
});
|
|
13
|
+
|
|
14
|
+
// draw agent
|
|
15
|
+
app.post('/draw-agent', (req, res) => {
|
|
16
|
+
service2.drawAgent(req, res);
|
|
17
|
+
});
|
|
12
18
|
};
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
// llm
|
|
2
|
+
const { OpenAIAPI, runAgents } = require('viho-llm');
|
|
3
|
+
const prompts = require('../util/prompt-agent-v2.js');
|
|
4
|
+
|
|
5
|
+
// util
|
|
6
|
+
const { chatFeishuMsg, errorFeishuMsg } = require('../util/feishu.js');
|
|
7
|
+
|
|
8
|
+
// LLM 配置
|
|
9
|
+
const llmConfig = global.QZ_CONFIG.llm;
|
|
10
|
+
const finalLLMConfig = llmConfig[llmConfig.default];
|
|
11
|
+
const llm = OpenAIAPI(finalLLMConfig);
|
|
12
|
+
const modelName = finalLLMConfig.modelName;
|
|
13
|
+
const thinking = finalLLMConfig.thinking;
|
|
14
|
+
|
|
15
|
+
/**
|
|
16
|
+
* drawAgentV2 - 对话式 Agent
|
|
17
|
+
* 1 次决策 LLM → reply / generate(elaborate+generate) / irrelevant
|
|
18
|
+
*/
|
|
19
|
+
exports.drawAgent = async (req, res) => {
|
|
20
|
+
const methodName = 'drawAgentV2';
|
|
21
|
+
const messages = req.body.messages;
|
|
22
|
+
|
|
23
|
+
if (!messages?.length) {
|
|
24
|
+
const msg = 'need messages';
|
|
25
|
+
req.logger.error(methodName, msg);
|
|
26
|
+
res.jsonFail(msg);
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// 启动 SSE
|
|
31
|
+
res.streamingStart();
|
|
32
|
+
|
|
33
|
+
const lastUserMsg = messages.filter((m) => m.role === 'user').pop()?.content || '';
|
|
34
|
+
req.logger.info(methodName, 'lastUserMsg', lastUserMsg);
|
|
35
|
+
chatFeishuMsg(req, `v2-${lastUserMsg}`);
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
const startTime = Date.now();
|
|
39
|
+
|
|
40
|
+
// 1. Agent 决策
|
|
41
|
+
res.streaming(`data: ${JSON.stringify({ type: 'status', step: 'thinking' })}\n\n`);
|
|
42
|
+
|
|
43
|
+
let decision = null;
|
|
44
|
+
await runAgents([
|
|
45
|
+
{
|
|
46
|
+
agentStartCallback: () => {
|
|
47
|
+
req.logger.info(methodName, 'step: agent decision');
|
|
48
|
+
},
|
|
49
|
+
agentRequestOptions: {
|
|
50
|
+
llm,
|
|
51
|
+
modelName,
|
|
52
|
+
thinking,
|
|
53
|
+
isJson: true,
|
|
54
|
+
messages: [{ role: 'system', content: prompts.AGENT_PROMPT }, ...messages],
|
|
55
|
+
},
|
|
56
|
+
agentEndCallback: (result) => {
|
|
57
|
+
decision = result;
|
|
58
|
+
const duration = Date.now() - startTime;
|
|
59
|
+
req.logger.info(methodName, 'decision', JSON.stringify(decision), `${duration}ms`);
|
|
60
|
+
chatFeishuMsg(req, `v2-decision-${decision.action}`);
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
]);
|
|
64
|
+
|
|
65
|
+
// 2. 分发
|
|
66
|
+
if (decision.action === 'reply') {
|
|
67
|
+
res.streaming(`data: ${JSON.stringify({ type: 'message', content: decision.message })}\n\n`);
|
|
68
|
+
} else if (decision.action === 'irrelevant') {
|
|
69
|
+
res.streaming(`data: ${JSON.stringify({ type: 'message', content: prompts.FIXED_REPLY })}\n\n`);
|
|
70
|
+
} else if (decision.action === 'generate') {
|
|
71
|
+
let elaboration = '';
|
|
72
|
+
|
|
73
|
+
await runAgents([
|
|
74
|
+
// elaborate
|
|
75
|
+
{
|
|
76
|
+
agentStartCallback: () => {
|
|
77
|
+
res.streaming(`data: ${JSON.stringify({ type: 'status', step: 'elaborate' })}\n\n`);
|
|
78
|
+
req.logger.info(methodName, 'step: elaborate');
|
|
79
|
+
},
|
|
80
|
+
agentRequestOptions: {
|
|
81
|
+
llm,
|
|
82
|
+
modelName,
|
|
83
|
+
thinking,
|
|
84
|
+
get messages() {
|
|
85
|
+
return [
|
|
86
|
+
{
|
|
87
|
+
role: 'user',
|
|
88
|
+
content: prompts.ELABORATE_PROMPT.replace('{input}', decision.description).replace(
|
|
89
|
+
'{diagramType}',
|
|
90
|
+
decision.diagramType,
|
|
91
|
+
),
|
|
92
|
+
},
|
|
93
|
+
];
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
agentEndCallback: (result) => {
|
|
97
|
+
elaboration = result;
|
|
98
|
+
req.logger.info(methodName, 'elaboration', String(elaboration).slice(0, 100) + '...');
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
// generate
|
|
102
|
+
{
|
|
103
|
+
agentStartCallback: () => {
|
|
104
|
+
res.streaming(`data: ${JSON.stringify({ type: 'status', step: 'generate' })}\n\n`);
|
|
105
|
+
req.logger.info(methodName, 'step: generate');
|
|
106
|
+
},
|
|
107
|
+
agentRequestOptions: {
|
|
108
|
+
llm,
|
|
109
|
+
modelName,
|
|
110
|
+
thinking,
|
|
111
|
+
get messages() {
|
|
112
|
+
return [
|
|
113
|
+
{
|
|
114
|
+
role: 'user',
|
|
115
|
+
content: prompts.GENERATE_PROMPT.replace('{diagramType}', decision.diagramType).replace(
|
|
116
|
+
'{elaboration}',
|
|
117
|
+
elaboration,
|
|
118
|
+
),
|
|
119
|
+
},
|
|
120
|
+
];
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
agentEndCallback: (result) => {
|
|
124
|
+
const mermaidCode = result;
|
|
125
|
+
const duration = Date.now() - startTime;
|
|
126
|
+
req.logger.info(
|
|
127
|
+
methodName,
|
|
128
|
+
'mermaidCode',
|
|
129
|
+
String(mermaidCode).slice(0, 100) + '...',
|
|
130
|
+
`total: ${duration}ms`,
|
|
131
|
+
);
|
|
132
|
+
chatFeishuMsg(req, `v2-mermaid-${mermaidCode}`);
|
|
133
|
+
res.streaming(`data: ${JSON.stringify({ type: 'mermaid', code: mermaidCode, duration })}\n\n`);
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
]);
|
|
137
|
+
} else {
|
|
138
|
+
// 未知 action fallback
|
|
139
|
+
res.streaming(
|
|
140
|
+
`data: ${JSON.stringify({ type: 'message', content: decision.message || '可以再说一次吗?' })}\n\n`,
|
|
141
|
+
);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
res.streamingEnd();
|
|
145
|
+
} catch (error) {
|
|
146
|
+
req.logger.error(methodName, 'error', error);
|
|
147
|
+
errorFeishuMsg(req, error.message);
|
|
148
|
+
res.streaming(`data: ${JSON.stringify({ type: 'error', message: error.message })}\n\n`);
|
|
149
|
+
res.streamingEnd();
|
|
150
|
+
}
|
|
151
|
+
};
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agent V2 Prompts - 对话式 Agent
|
|
3
|
+
*/
|
|
4
|
+
const { ELABORATE_PROMPT, GENERATE_PROMPT, FIXED_REPLY } = require('./prompt-agent.js');
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Agent 决策 prompt
|
|
8
|
+
* 一次 LLM 调用完成意图判断 + 决策
|
|
9
|
+
*/
|
|
10
|
+
const AGENT_PROMPT = `你是 AI 白板助手,帮用户在白板上画图表。
|
|
11
|
+
|
|
12
|
+
## 能力
|
|
13
|
+
支持 4 种图表:flowchart(流程图)、sequence(时序图)、classDiagram(类图)、erDiagram(ER图)。
|
|
14
|
+
|
|
15
|
+
## 决策规则
|
|
16
|
+
收到用户消息后判断,只回复 JSON:
|
|
17
|
+
|
|
18
|
+
1. 回复文字(追问、确认、引导):
|
|
19
|
+
{"action":"reply","message":"你的回复内容"}
|
|
20
|
+
|
|
21
|
+
2. 生成图表(信息已足够明确):
|
|
22
|
+
{"action":"generate","diagramType":"类型","description":"详细描述,包含所有节点、关系、步骤"}
|
|
23
|
+
|
|
24
|
+
3. 与画图完全无关:
|
|
25
|
+
{"action":"irrelevant"}
|
|
26
|
+
|
|
27
|
+
## 判断标准
|
|
28
|
+
- 用户说了图表类型 + 具体场景/内容 → generate
|
|
29
|
+
- 用户只说了类型没说内容 → reply 追问具体场景
|
|
30
|
+
- 用户只说了内容没说类型 → 你来判断最合适的类型,直接 generate
|
|
31
|
+
- 用户说"帮我画个图" → reply 追问想画什么
|
|
32
|
+
- 用户要修改已有图表 → 根据对话历史理解上下文,generate 完整新图表
|
|
33
|
+
- 用户闲聊但话题相关 → reply 自然回应并引导回画图
|
|
34
|
+
- 用户闲聊话题无关 → irrelevant
|
|
35
|
+
|
|
36
|
+
## 回复风格
|
|
37
|
+
- 追问时友好自然,像朋友聊天,不要生硬
|
|
38
|
+
- 可以给建议和示例帮助用户想清楚需求
|
|
39
|
+
- generate 时 description 要尽可能详细完整
|
|
40
|
+
|
|
41
|
+
只回复 JSON。`;
|
|
42
|
+
|
|
43
|
+
module.exports = { AGENT_PROMPT, ELABORATE_PROMPT, GENERATE_PROMPT, FIXED_REPLY };
|