koishi-plugin-chatluna-anuneko-api-adapter 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/anuneko-client.d.ts +16 -0
- package/lib/anuneko-requester.d.ts +20 -0
- package/lib/index.d.ts +16 -0
- package/lib/index.js +594 -0
- package/lib/logger.d.ts +6 -0
- package/package.json +69 -0
- package/readme.md +5 -0
- package/src/anuneko-client.ts +112 -0
- package/src/anuneko-requester.ts +318 -0
- package/src/index.ts +265 -0
- package/src/locales/en-US.schema.yml +4 -0
- package/src/locales/zh-CN.schema.yml +4 -0
- package/src/logger.ts +18 -0
package/package.json
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "koishi-plugin-chatluna-anuneko-api-adapter",
|
|
3
|
+
"description": "anuneko API adapter for ChatLuna, using pearktrue API.",
|
|
4
|
+
"version": "1.0.0",
|
|
5
|
+
"main": "lib/index.js",
|
|
6
|
+
"typings": "lib/index.d.ts",
|
|
7
|
+
"files": [
|
|
8
|
+
"lib",
|
|
9
|
+
"src"
|
|
10
|
+
],
|
|
11
|
+
"homepage": "https://github.com/koishi-shangxue-plugins/koishi-shangxue-apps/tree/main/",
|
|
12
|
+
"bugs": {
|
|
13
|
+
"url": "https://github.com/koishi-shangxue-plugins/koishi-shangxue-apps/issues"
|
|
14
|
+
},
|
|
15
|
+
"license": "AGPL-3.0",
|
|
16
|
+
"engines": {
|
|
17
|
+
"node": ">=18.0.0"
|
|
18
|
+
},
|
|
19
|
+
"resolutions": {
|
|
20
|
+
"@langchain/core": "0.3.62",
|
|
21
|
+
"js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.21"
|
|
22
|
+
},
|
|
23
|
+
"overrides": {
|
|
24
|
+
"@langchain/core": "0.3.62",
|
|
25
|
+
"js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.21"
|
|
26
|
+
},
|
|
27
|
+
"pnpm": {
|
|
28
|
+
"overrides": {
|
|
29
|
+
"@langchain/core": "0.3.62",
|
|
30
|
+
"js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.21"
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"keywords": [
|
|
34
|
+
"chatbot",
|
|
35
|
+
"koishi",
|
|
36
|
+
"plugin",
|
|
37
|
+
"service",
|
|
38
|
+
"chatgpt",
|
|
39
|
+
"gpt",
|
|
40
|
+
"chatluna",
|
|
41
|
+
"adapter"
|
|
42
|
+
],
|
|
43
|
+
"dependencies": {
|
|
44
|
+
"@chatluna/v1-shared-adapter": "^1.0.14",
|
|
45
|
+
"@langchain/core": "0.3.62",
|
|
46
|
+
"zod": "3.25.76",
|
|
47
|
+
"zod-to-json-schema": "^3.24.6"
|
|
48
|
+
},
|
|
49
|
+
"devDependencies": {
|
|
50
|
+
"atsc": "^2.1.0",
|
|
51
|
+
"koishi": "^4.18.9",
|
|
52
|
+
"koishi-plugin-chatluna": "^1.3.5"
|
|
53
|
+
},
|
|
54
|
+
"peerDependencies": {
|
|
55
|
+
"koishi": "^4.18.9",
|
|
56
|
+
"koishi-plugin-chatluna": "^1.3.5"
|
|
57
|
+
},
|
|
58
|
+
"koishi": {
|
|
59
|
+
"description": {
|
|
60
|
+
"zh": "ChatLuna 的 anuneko API 适配器。零成本、快速体验Chatluna。",
|
|
61
|
+
"en": "anuneko API adapter for ChatLuna, using pearktrue API."
|
|
62
|
+
},
|
|
63
|
+
"service": {
|
|
64
|
+
"required": [
|
|
65
|
+
"chatluna"
|
|
66
|
+
]
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
package/readme.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import { Context } from 'koishi'
|
|
2
|
+
import { PlatformModelAndEmbeddingsClient } from 'koishi-plugin-chatluna/llm-core/platform/client'
|
|
3
|
+
import {
|
|
4
|
+
ChatLunaBaseEmbeddings,
|
|
5
|
+
ChatLunaChatModel
|
|
6
|
+
} from 'koishi-plugin-chatluna/llm-core/platform/model'
|
|
7
|
+
import {
|
|
8
|
+
ModelInfo,
|
|
9
|
+
ModelType
|
|
10
|
+
} from 'koishi-plugin-chatluna/llm-core/platform/types'
|
|
11
|
+
import {
|
|
12
|
+
ChatLunaError,
|
|
13
|
+
ChatLunaErrorCode
|
|
14
|
+
} from 'koishi-plugin-chatluna/utils/error'
|
|
15
|
+
import { Config } from './index'
|
|
16
|
+
import { AnunekoRequester } from './anuneko-requester'
|
|
17
|
+
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat'
|
|
18
|
+
import { getModelMaxContextSize } from '@chatluna/v1-shared-adapter'
|
|
19
|
+
import { RunnableConfig } from '@langchain/core/runnables'
|
|
20
|
+
import { logInfo } from './logger'
|
|
21
|
+
|
|
22
|
+
export class AnunekoClient extends PlatformModelAndEmbeddingsClient {
|
|
23
|
+
platform = 'anuneko'
|
|
24
|
+
|
|
25
|
+
private _requester: AnunekoRequester
|
|
26
|
+
|
|
27
|
+
constructor(
|
|
28
|
+
ctx: Context,
|
|
29
|
+
private _config: Config,
|
|
30
|
+
public plugin: ChatLunaPlugin
|
|
31
|
+
) {
|
|
32
|
+
super(ctx, plugin.platformConfigPool)
|
|
33
|
+
this.platform = _config.platform
|
|
34
|
+
this._requester = new AnunekoRequester(
|
|
35
|
+
ctx,
|
|
36
|
+
plugin.platformConfigPool,
|
|
37
|
+
_config,
|
|
38
|
+
plugin
|
|
39
|
+
)
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
async refreshModels(config?: RunnableConfig): Promise<ModelInfo[]> {
|
|
43
|
+
return [
|
|
44
|
+
{
|
|
45
|
+
name: 'orange-cat',
|
|
46
|
+
type: ModelType.llm,
|
|
47
|
+
capabilities: [],
|
|
48
|
+
maxTokens: 128000
|
|
49
|
+
},
|
|
50
|
+
{
|
|
51
|
+
name: 'exotic-shorthair',
|
|
52
|
+
type: ModelType.llm,
|
|
53
|
+
capabilities: [],
|
|
54
|
+
maxTokens: 128000
|
|
55
|
+
}
|
|
56
|
+
]
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
protected _createModel(
|
|
60
|
+
model: string
|
|
61
|
+
): ChatLunaChatModel | ChatLunaBaseEmbeddings {
|
|
62
|
+
logInfo('[anuneko] _createModel called for model:', model)
|
|
63
|
+
logInfo('[anuneko] _modelInfos keys:', Object.keys(this._modelInfos))
|
|
64
|
+
|
|
65
|
+
const info = this._modelInfos[model]
|
|
66
|
+
|
|
67
|
+
logInfo('[anuneko] Model info:', JSON.stringify(info))
|
|
68
|
+
|
|
69
|
+
if (info == null) {
|
|
70
|
+
this.ctx.logger.error('[anuneko] Model info is null!')
|
|
71
|
+
throw new ChatLunaError(
|
|
72
|
+
ChatLunaErrorCode.MODEL_NOT_FOUND,
|
|
73
|
+
new Error(
|
|
74
|
+
`The model ${model} is not found in the models: ${JSON.stringify(Object.keys(this._modelInfos))}`
|
|
75
|
+
)
|
|
76
|
+
)
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
logInfo('[anuneko] Model type:', info.type, 'Expected:', ModelType.llm)
|
|
80
|
+
logInfo('[anuneko] Type check:', info.type === ModelType.llm)
|
|
81
|
+
|
|
82
|
+
if (info.type === ModelType.llm) {
|
|
83
|
+
logInfo('[anuneko] Creating ChatLunaChatModel...')
|
|
84
|
+
const modelMaxContextSize = getModelMaxContextSize(info)
|
|
85
|
+
|
|
86
|
+
const chatModel = new ChatLunaChatModel({
|
|
87
|
+
modelInfo: info,
|
|
88
|
+
requester: this._requester,
|
|
89
|
+
model,
|
|
90
|
+
maxTokenLimit: info.maxTokens || modelMaxContextSize || 128_000,
|
|
91
|
+
modelMaxContextSize,
|
|
92
|
+
timeout: this._config.timeout,
|
|
93
|
+
maxRetries: this._config.maxRetries,
|
|
94
|
+
llmType: 'openai',
|
|
95
|
+
isThinkModel: false
|
|
96
|
+
})
|
|
97
|
+
|
|
98
|
+
logInfo('[anuneko] ChatLunaChatModel created successfully')
|
|
99
|
+
logInfo('[anuneko] Instance check:', chatModel instanceof ChatLunaChatModel)
|
|
100
|
+
|
|
101
|
+
return chatModel
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
this.ctx.logger.error('[anuneko] Model type is not LLM!')
|
|
105
|
+
throw new ChatLunaError(
|
|
106
|
+
ChatLunaErrorCode.MODEL_NOT_FOUND,
|
|
107
|
+
new Error(
|
|
108
|
+
`The model ${model} is not a chat model, type is ${info.type}`
|
|
109
|
+
)
|
|
110
|
+
)
|
|
111
|
+
}
|
|
112
|
+
}
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import { ChatGeneration, ChatGenerationChunk } from '@langchain/core/outputs'
|
|
2
|
+
import {
|
|
3
|
+
ModelRequester,
|
|
4
|
+
ModelRequestParams
|
|
5
|
+
} from 'koishi-plugin-chatluna/llm-core/platform/api'
|
|
6
|
+
import {
|
|
7
|
+
ClientConfig,
|
|
8
|
+
ClientConfigPool
|
|
9
|
+
} from 'koishi-plugin-chatluna/llm-core/platform/config'
|
|
10
|
+
import { Config, logger } from './index'
|
|
11
|
+
import { logInfo } from './logger'
|
|
12
|
+
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat'
|
|
13
|
+
import { Context } from 'koishi'
|
|
14
|
+
import {
|
|
15
|
+
AIMessageChunk,
|
|
16
|
+
HumanMessage,
|
|
17
|
+
SystemMessage
|
|
18
|
+
} from '@langchain/core/messages'
|
|
19
|
+
|
|
20
|
+
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
|
|
21
|
+
|
|
22
|
+
interface KoishiHumanMessage extends HumanMessage {
|
|
23
|
+
channelId?: string
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
interface InternalModelRequestParams extends ModelRequestParams {
|
|
27
|
+
input: (HumanMessage | SystemMessage)[]
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export class AnunekoRequester extends ModelRequester {
|
|
31
|
+
// 存储每个用户的会话ID
|
|
32
|
+
private sessionMap = new Map<string, string>()
|
|
33
|
+
// 存储每个用户的当前模型
|
|
34
|
+
private modelMap = new Map<string, string>()
|
|
35
|
+
|
|
36
|
+
constructor(
|
|
37
|
+
ctx: Context,
|
|
38
|
+
_configPool: ClientConfigPool<ClientConfig>,
|
|
39
|
+
public _pluginConfig: Config,
|
|
40
|
+
_plugin: ChatLunaPlugin
|
|
41
|
+
) {
|
|
42
|
+
super(ctx, _configPool, _pluginConfig, _plugin)
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// 清理指定用户的会话
|
|
46
|
+
public clearSession(userId: string): boolean {
|
|
47
|
+
const hasSession = this.sessionMap.has(userId)
|
|
48
|
+
this.sessionMap.delete(userId)
|
|
49
|
+
this.modelMap.delete(userId)
|
|
50
|
+
return hasSession
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// 清理所有会话
|
|
54
|
+
public clearAllSessions(): number {
|
|
55
|
+
const count = this.sessionMap.size
|
|
56
|
+
this.sessionMap.clear()
|
|
57
|
+
this.modelMap.clear()
|
|
58
|
+
return count
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// 构建请求头
|
|
62
|
+
public buildHeaders() {
|
|
63
|
+
const headers: Record<string, string> = {
|
|
64
|
+
'accept': '*/*',
|
|
65
|
+
'content-type': 'application/json',
|
|
66
|
+
'origin': 'https://anuneko.com',
|
|
67
|
+
'referer': 'https://anuneko.com/',
|
|
68
|
+
'user-agent': 'Mozilla/5.0',
|
|
69
|
+
'x-app_id': 'com.anuttacon.neko',
|
|
70
|
+
'x-client_type': '4',
|
|
71
|
+
'x-device_id': '7b75a432-6b24-48ad-b9d3-3dc57648e3e3',
|
|
72
|
+
'x-token': this._pluginConfig.xToken
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if (this._pluginConfig.cookie) {
|
|
76
|
+
headers['Cookie'] = this._pluginConfig.cookie
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
return headers
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// 创建新会话
|
|
83
|
+
private async createNewSession(userId: string, modelName: string): Promise<string | null> {
|
|
84
|
+
const headers = this.buildHeaders()
|
|
85
|
+
const data = { model: modelName }
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
logInfo('Creating new session with model:', modelName)
|
|
89
|
+
const response = await fetch('https://anuneko.com/api/v1/chat', {
|
|
90
|
+
method: 'POST',
|
|
91
|
+
headers,
|
|
92
|
+
body: JSON.stringify(data)
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
const responseData = await response.json()
|
|
96
|
+
const chatId = responseData.chat_id || responseData.id
|
|
97
|
+
if (chatId) {
|
|
98
|
+
this.sessionMap.set(userId, chatId)
|
|
99
|
+
this.modelMap.set(userId, modelName)
|
|
100
|
+
logInfo('New session created with ID:', chatId)
|
|
101
|
+
|
|
102
|
+
// 切换模型以确保一致性
|
|
103
|
+
await this.switchModel(userId, chatId, modelName)
|
|
104
|
+
return chatId
|
|
105
|
+
}
|
|
106
|
+
} catch (error) {
|
|
107
|
+
this.logger.error('Failed to create new session:', error)
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
return null
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// 切换模型
|
|
114
|
+
private async switchModel(userId: string, chatId: string, modelName: string): Promise<boolean> {
|
|
115
|
+
const headers = this.buildHeaders()
|
|
116
|
+
const data = { chat_id: chatId, model: modelName }
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
logInfo('Switching model to:', modelName)
|
|
120
|
+
const response = await fetch('https://anuneko.com/api/v1/user/select_model', {
|
|
121
|
+
method: 'POST',
|
|
122
|
+
headers,
|
|
123
|
+
body: JSON.stringify(data)
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
if (response.ok) {
|
|
127
|
+
this.modelMap.set(userId, modelName)
|
|
128
|
+
logInfo('Model switched successfully')
|
|
129
|
+
return true
|
|
130
|
+
}
|
|
131
|
+
} catch (error) {
|
|
132
|
+
this.logger.error('Failed to switch model:', error)
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
return false
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// 自动选择分支
|
|
139
|
+
private async sendChoice(msgId: string): Promise<void> {
|
|
140
|
+
const headers = this.buildHeaders()
|
|
141
|
+
const data = { msg_id: msgId, choice_idx: 0 }
|
|
142
|
+
|
|
143
|
+
try {
|
|
144
|
+
await fetch('https://anuneko.com/api/v1/msg/select-choice', {
|
|
145
|
+
method: 'POST',
|
|
146
|
+
headers,
|
|
147
|
+
body: JSON.stringify(data)
|
|
148
|
+
})
|
|
149
|
+
logInfo('Choice sent for msg_id:', msgId)
|
|
150
|
+
} catch (error) {
|
|
151
|
+
this.logger.error('Failed to send choice:', error)
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// 流式回复
|
|
156
|
+
async *completionStreamInternal(
|
|
157
|
+
params: ModelRequestParams
|
|
158
|
+
): AsyncGenerator<ChatGenerationChunk> {
|
|
159
|
+
const internalParams = params as InternalModelRequestParams
|
|
160
|
+
// 过滤掉所有非 HumanMessage 的消息,并只取最后一条
|
|
161
|
+
const humanMessages = internalParams.input.filter(
|
|
162
|
+
(message) => message instanceof HumanMessage
|
|
163
|
+
) as KoishiHumanMessage[]
|
|
164
|
+
const lastMessage = humanMessages.at(-1)
|
|
165
|
+
|
|
166
|
+
logInfo('Receive params from chatluna', JSON.stringify(params, null, 2))
|
|
167
|
+
|
|
168
|
+
if (!lastMessage) {
|
|
169
|
+
this.logger.warn('No human message found in the input.')
|
|
170
|
+
return
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
const prompt = lastMessage.content as string
|
|
174
|
+
// 使用 channelId 作为会话标识,如果没有则使用 userId
|
|
175
|
+
const sessionKey = lastMessage.channelId || lastMessage.id || 'default'
|
|
176
|
+
|
|
177
|
+
logInfo('使用会话标识:', sessionKey)
|
|
178
|
+
|
|
179
|
+
// 从模型名称推断使用的模型
|
|
180
|
+
let modelName = 'Orange Cat' // 默认橘猫
|
|
181
|
+
if (params.model.includes('exotic') || params.model.includes('shorthair')) {
|
|
182
|
+
modelName = 'Exotic Shorthair'
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// 获取或创建会话
|
|
186
|
+
let sessionId = this.sessionMap.get(sessionKey)
|
|
187
|
+
const currentModel = this.modelMap.get(sessionKey)
|
|
188
|
+
|
|
189
|
+
// 如果没有会话或模型不匹配,创建新会话
|
|
190
|
+
if (!sessionId || currentModel !== modelName) {
|
|
191
|
+
sessionId = await this.createNewSession(sessionKey, modelName)
|
|
192
|
+
if (!sessionId) {
|
|
193
|
+
const errorText = '创建会话失败,请稍后再试。'
|
|
194
|
+
yield new ChatGenerationChunk({
|
|
195
|
+
text: errorText,
|
|
196
|
+
message: new AIMessageChunk({ content: errorText })
|
|
197
|
+
})
|
|
198
|
+
return
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
const headers = this.buildHeaders()
|
|
203
|
+
const url = `https://anuneko.com/api/v1/msg/${sessionId}/stream`
|
|
204
|
+
const data = { contents: [prompt] }
|
|
205
|
+
|
|
206
|
+
let retries = 3
|
|
207
|
+
while (retries > 0) {
|
|
208
|
+
try {
|
|
209
|
+
logInfo('Sending request to API:', url, JSON.stringify(data, null, 2))
|
|
210
|
+
|
|
211
|
+
let result = ''
|
|
212
|
+
let currentMsgId: string | null = null
|
|
213
|
+
|
|
214
|
+
// 使用流式请求
|
|
215
|
+
const response = await fetch(url, {
|
|
216
|
+
method: 'POST',
|
|
217
|
+
headers,
|
|
218
|
+
body: JSON.stringify(data)
|
|
219
|
+
})
|
|
220
|
+
|
|
221
|
+
if (!response.ok) {
|
|
222
|
+
throw new Error(`Request failed with status ${response.status}`)
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// 处理流式响应
|
|
226
|
+
const reader = response.body.getReader()
|
|
227
|
+
const decoder = new TextDecoder()
|
|
228
|
+
|
|
229
|
+
while (true) {
|
|
230
|
+
const { done, value } = await reader.read()
|
|
231
|
+
if (done) break
|
|
232
|
+
|
|
233
|
+
const chunkStr = decoder.decode(value, { stream: true })
|
|
234
|
+
const lines = chunkStr.split('\n')
|
|
235
|
+
|
|
236
|
+
for (const line of lines) {
|
|
237
|
+
if (!line || !line.startsWith('data: ')) {
|
|
238
|
+
// 检查是否是错误响应
|
|
239
|
+
if (line.trim()) {
|
|
240
|
+
try {
|
|
241
|
+
const errorJson = JSON.parse(line)
|
|
242
|
+
if (errorJson.code === 'chat_choice_shown') {
|
|
243
|
+
const errorText = '⚠️ 检测到对话分支未选择,请重试或新建会话。'
|
|
244
|
+
yield new ChatGenerationChunk({
|
|
245
|
+
text: errorText,
|
|
246
|
+
message: new AIMessageChunk({ content: errorText })
|
|
247
|
+
})
|
|
248
|
+
return
|
|
249
|
+
}
|
|
250
|
+
} catch {
|
|
251
|
+
// 忽略解析错误
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
continue
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const rawJson = line.substring(6).trim()
|
|
258
|
+
if (!rawJson) continue
|
|
259
|
+
|
|
260
|
+
try {
|
|
261
|
+
const j = JSON.parse(rawJson)
|
|
262
|
+
|
|
263
|
+
// 更新 msg_id
|
|
264
|
+
if (j.msg_id) {
|
|
265
|
+
currentMsgId = j.msg_id
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// 处理多分支内容
|
|
269
|
+
if (j.c && Array.isArray(j.c)) {
|
|
270
|
+
for (const choice of j.c) {
|
|
271
|
+
const idx = choice.c ?? 0
|
|
272
|
+
if (idx === 0 && choice.v) {
|
|
273
|
+
result += choice.v
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
// 处理常规内容
|
|
278
|
+
else if (j.v && typeof j.v === 'string') {
|
|
279
|
+
result += j.v
|
|
280
|
+
}
|
|
281
|
+
} catch (error) {
|
|
282
|
+
logInfo('Failed to parse JSON:', rawJson, error)
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// 流结束后,如果有 msg_id,自动确认选择第一项
|
|
288
|
+
if (currentMsgId) {
|
|
289
|
+
await this.sendChoice(currentMsgId)
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
logInfo('Received complete response:', result)
|
|
293
|
+
|
|
294
|
+
yield new ChatGenerationChunk({
|
|
295
|
+
text: result,
|
|
296
|
+
message: new AIMessageChunk({ content: result })
|
|
297
|
+
})
|
|
298
|
+
return // 成功,退出重试循环
|
|
299
|
+
} catch (error) {
|
|
300
|
+
this.logger.error(`Request failed, ${retries - 1} retries left.`, error)
|
|
301
|
+
retries--
|
|
302
|
+
if (retries === 0) {
|
|
303
|
+
const errorText = `请求失败,请稍后再试: ${error.message}`
|
|
304
|
+
yield new ChatGenerationChunk({
|
|
305
|
+
text: errorText,
|
|
306
|
+
message: new AIMessageChunk({ content: errorText })
|
|
307
|
+
})
|
|
308
|
+
} else {
|
|
309
|
+
await sleep(1000) // 等待1秒后重试
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
get logger() {
|
|
316
|
+
return logger
|
|
317
|
+
}
|
|
318
|
+
}
|