@templmf/temp-solf-lmf 0.0.48 → 0.0.49
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/api.js +51 -0
- package/chatApi.js +63 -0
- package/package.json +1 -1
- package/guanwang.zip +0 -0
package/api.js
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* API 配置
|
|
3
|
+
* 在项目根目录创建 .env.local,填入以下变量:
|
|
4
|
+
*
|
|
5
|
+
* # 文本 LLM
|
|
6
|
+
* VITE_API_BASE_URL=https://your-api-host.com
|
|
7
|
+
* VITE_API_KEY=sk-xxxxxxxxxxxxxxxxxxxx
|
|
8
|
+
* VITE_MODEL=gpt-4o
|
|
9
|
+
*
|
|
10
|
+
* # 多模态视觉模型(独立 endpoint / key,不填则复用上面的配置)
|
|
11
|
+
* VITE_VL_API_BASE_URL=https://your-vl-api-host.com
|
|
12
|
+
* VITE_VL_API_KEY=sk-xxxxxxxxxxxxxxxxxxxx
|
|
13
|
+
* VITE_VL_MODEL=qwen3-vl-8b-instruct-k100
|
|
14
|
+
*/
|
|
15
|
+
export const API_CONFIG = {
|
|
16
|
+
baseURL: import.meta.env.VITE_API_BASE_URL || 'https://api.openai.com',
|
|
17
|
+
apiKey: import.meta.env.VITE_API_KEY || 'sk-your-key-here',
|
|
18
|
+
model: import.meta.env.VITE_MODEL || 'gpt-4o',
|
|
19
|
+
|
|
20
|
+
vlBaseURL: import.meta.env.VITE_VL_API_BASE_URL || null, // 不填则复用 baseURL
|
|
21
|
+
vlApiKey: import.meta.env.VITE_VL_API_KEY || null, // 不填则复用 apiKey
|
|
22
|
+
vlModel: import.meta.env.VITE_VL_MODEL || null,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* 根据消息内容判断是否需要视觉模型
|
|
27
|
+
*/
|
|
28
|
+
export function isVisualRequest(messages) {
|
|
29
|
+
return messages.some(m => {
|
|
30
|
+
const c = m.content
|
|
31
|
+
return Array.isArray(c) && c.some(b => b.type === 'image_url' || b.type === 'image')
|
|
32
|
+
})
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* 根据消息内容选择对应的请求配置(model / baseURL / apiKey)
|
|
37
|
+
*/
|
|
38
|
+
export function selectConfig(messages) {
|
|
39
|
+
const useVl = isVisualRequest(messages) && API_CONFIG.vlModel
|
|
40
|
+
return useVl
|
|
41
|
+
? {
|
|
42
|
+
model: API_CONFIG.vlModel,
|
|
43
|
+
baseURL: API_CONFIG.vlBaseURL || API_CONFIG.baseURL,
|
|
44
|
+
apiKey: API_CONFIG.vlApiKey || API_CONFIG.apiKey,
|
|
45
|
+
}
|
|
46
|
+
: {
|
|
47
|
+
model: API_CONFIG.model,
|
|
48
|
+
baseURL: API_CONFIG.baseURL,
|
|
49
|
+
apiKey: API_CONFIG.apiKey,
|
|
50
|
+
}
|
|
51
|
+
}
|
package/chatApi.js
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* chatApi.js
|
|
3
|
+
* 封装 OpenAI 兼容格式的流式对话接口(/v1/chat/completions)
|
|
4
|
+
* 支持多模态 content 数组格式(图片 / 文本混合)
|
|
5
|
+
*/
|
|
6
|
+
import { selectConfig } from '../config/api.js'
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* 发起流式对话请求
|
|
10
|
+
* @param {object} options
|
|
11
|
+
* @param {Array} options.messages - 消息历史,content 可为字符串或数组(多模态)
|
|
12
|
+
* @param {Function} options.onToken - (token: string) => void
|
|
13
|
+
* @param {Function} options.onDone - 流结束
|
|
14
|
+
* @param {Function} options.onError - (error: Error) => void
|
|
15
|
+
* @param {AbortSignal} options.signal
|
|
16
|
+
*/
|
|
17
|
+
export async function streamChat({ messages, onToken, onDone, onError, signal }) {
|
|
18
|
+
try {
|
|
19
|
+
const { model, baseURL, apiKey } = selectConfig(messages)
|
|
20
|
+
|
|
21
|
+
const res = await fetch(`${baseURL}/v1/chat/completions`, {
|
|
22
|
+
method: 'POST',
|
|
23
|
+
headers: {
|
|
24
|
+
'Content-Type': 'application/json',
|
|
25
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
26
|
+
},
|
|
27
|
+
body: JSON.stringify({ model, stream: true, messages }),
|
|
28
|
+
signal,
|
|
29
|
+
})
|
|
30
|
+
|
|
31
|
+
if (!res.ok) {
|
|
32
|
+
const errBody = await res.text()
|
|
33
|
+
throw new Error(`API 请求失败 ${res.status}: ${errBody}`)
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const reader = res.body.getReader()
|
|
37
|
+
const decoder = new TextDecoder('utf-8')
|
|
38
|
+
let buffer = ''
|
|
39
|
+
|
|
40
|
+
while (true) {
|
|
41
|
+
const { done, value } = await reader.read()
|
|
42
|
+
if (done) break
|
|
43
|
+
buffer += decoder.decode(value, { stream: true })
|
|
44
|
+
const lines = buffer.split('\n')
|
|
45
|
+
buffer = lines.pop()
|
|
46
|
+
|
|
47
|
+
for (const line of lines) {
|
|
48
|
+
const trimmed = line.trim()
|
|
49
|
+
if (!trimmed || trimmed === 'data: [DONE]') continue
|
|
50
|
+
if (!trimmed.startsWith('data: ')) continue
|
|
51
|
+
try {
|
|
52
|
+
const json = JSON.parse(trimmed.slice(6))
|
|
53
|
+
const token = json.choices?.[0]?.delta?.content
|
|
54
|
+
if (token) onToken(token)
|
|
55
|
+
} catch { /* 非 JSON 行跳过 */ }
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
onDone?.()
|
|
59
|
+
} catch (err) {
|
|
60
|
+
if (err.name === 'AbortError') return
|
|
61
|
+
onError?.(err)
|
|
62
|
+
}
|
|
63
|
+
}
|
package/package.json
CHANGED
package/guanwang.zip
DELETED
|
Binary file
|