vue2server7 7.0.99 → 7.0.101
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/1.js +218 -0
- package/2 +30 -0
- package/package.json +1 -1
- package/test/111 +39 -0
- package/11 +0 -160
package/1.js
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
import express from "express";
|
|
2
|
+
import cors from "cors";
|
|
3
|
+
import axios from "axios";
|
|
4
|
+
import dotenv from "dotenv";
|
|
5
|
+
|
|
6
|
+
dotenv.config();
|
|
7
|
+
|
|
8
|
+
const app = express();
|
|
9
|
+
|
|
10
|
+
app.use(cors());
|
|
11
|
+
app.use(express.json({ limit: "20mb" }));
|
|
12
|
+
|
|
13
|
+
const PORT = Number(process.env.PORT || 3000);
|
|
14
|
+
const TARGET_URL = process.env.TARGET_URL;
|
|
15
|
+
const TARGET_API_KEY = process.env.TARGET_API_KEY;
|
|
16
|
+
const TARGET_APP_TAG = process.env.TARGET_APP_TAG || "proxyai";
|
|
17
|
+
const TARGET_MODEL = process.env.TARGET_MODEL || "qwen15-32b";
|
|
18
|
+
const TARGET_TEMPERATURE = Number(process.env.TARGET_TEMPERATURE || 0.1);
|
|
19
|
+
const TARGET_MAX_TOKENS = Number(process.env.TARGET_MAX_TOKENS || 20000);
|
|
20
|
+
|
|
21
|
+
function normalizeMessages(messages) {
|
|
22
|
+
if (!Array.isArray(messages)) return [];
|
|
23
|
+
|
|
24
|
+
return messages.map((item) => {
|
|
25
|
+
// Continue / OpenAI 标准格式直接透传
|
|
26
|
+
if (typeof item?.content === "string") {
|
|
27
|
+
return {
|
|
28
|
+
role: item.role || "user",
|
|
29
|
+
content: item.content
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
// 兼容 content 为数组的情况
|
|
34
|
+
if (Array.isArray(item?.content)) {
|
|
35
|
+
const text = item.content
|
|
36
|
+
.map((part) => {
|
|
37
|
+
if (typeof part === "string") return part;
|
|
38
|
+
if (part?.type === "text") return part.text || "";
|
|
39
|
+
return "";
|
|
40
|
+
})
|
|
41
|
+
.join("");
|
|
42
|
+
|
|
43
|
+
return {
|
|
44
|
+
role: item.role || "user",
|
|
45
|
+
content: text
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
role: item?.role || "user",
|
|
51
|
+
content: ""
|
|
52
|
+
};
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
function buildOpenAIResponse({ model, content, usage }) {
|
|
57
|
+
return {
|
|
58
|
+
id: `chatcmpl-${Date.now()}`,
|
|
59
|
+
object: "chat.completion",
|
|
60
|
+
created: Math.floor(Date.now() / 1000),
|
|
61
|
+
model,
|
|
62
|
+
choices: [
|
|
63
|
+
{
|
|
64
|
+
index: 0,
|
|
65
|
+
message: {
|
|
66
|
+
role: "assistant",
|
|
67
|
+
content: content ?? ""
|
|
68
|
+
},
|
|
69
|
+
finish_reason: "stop"
|
|
70
|
+
}
|
|
71
|
+
],
|
|
72
|
+
usage: usage || {
|
|
73
|
+
prompt_tokens: 0,
|
|
74
|
+
completion_tokens: 0,
|
|
75
|
+
total_tokens: 0
|
|
76
|
+
}
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
app.get("/health", (req, res) => {
|
|
81
|
+
res.json({ ok: true });
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
app.get("/v1/models", (req, res) => {
|
|
85
|
+
res.json({
|
|
86
|
+
object: "list",
|
|
87
|
+
data: [
|
|
88
|
+
{
|
|
89
|
+
id: TARGET_MODEL,
|
|
90
|
+
object: "model",
|
|
91
|
+
created: Math.floor(Date.now() / 1000),
|
|
92
|
+
owned_by: "continue-proxy"
|
|
93
|
+
}
|
|
94
|
+
]
|
|
95
|
+
});
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
app.post("/v1/chat/completions", async (req, res) => {
|
|
99
|
+
const requestId = `req_${Date.now()}`;
|
|
100
|
+
|
|
101
|
+
try {
|
|
102
|
+
const incoming = req.body || {};
|
|
103
|
+
const incomingMessages = normalizeMessages(incoming.messages);
|
|
104
|
+
|
|
105
|
+
// 严格参考 ProxyAI 配置来组织上游 body
|
|
106
|
+
const upstreamBody = {
|
|
107
|
+
stream: typeof incoming.stream === "boolean" ? incoming.stream : true,
|
|
108
|
+
model: incoming.model || TARGET_MODEL,
|
|
109
|
+
messages: incomingMessages,
|
|
110
|
+
temperature:
|
|
111
|
+
typeof incoming.temperature === "number"
|
|
112
|
+
? incoming.temperature
|
|
113
|
+
: TARGET_TEMPERATURE,
|
|
114
|
+
max_tokens:
|
|
115
|
+
typeof incoming.max_tokens === "number"
|
|
116
|
+
? incoming.max_tokens
|
|
117
|
+
: typeof incoming.max_completion_tokens === "number"
|
|
118
|
+
? incoming.max_completion_tokens
|
|
119
|
+
: TARGET_MAX_TOKENS
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
const upstreamHeaders = {
|
|
123
|
+
Authorization: `Bearer ${TARGET_API_KEY}`,
|
|
124
|
+
"X-LLM-Application-Tag": TARGET_APP_TAG,
|
|
125
|
+
"Content-Type": "application/json"
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
console.log(`\n[${requestId}] ===== incoming body =====`);
|
|
129
|
+
console.log(JSON.stringify(incoming, null, 2));
|
|
130
|
+
|
|
131
|
+
console.log(`\n[${requestId}] ===== upstream headers =====`);
|
|
132
|
+
console.log({
|
|
133
|
+
Authorization: "Bearer ***",
|
|
134
|
+
"X-LLM-Application-Tag": TARGET_APP_TAG,
|
|
135
|
+
"Content-Type": "application/json"
|
|
136
|
+
});
|
|
137
|
+
|
|
138
|
+
console.log(`\n[${requestId}] ===== upstream body =====`);
|
|
139
|
+
console.log(JSON.stringify(upstreamBody, null, 2));
|
|
140
|
+
|
|
141
|
+
// 先按非流式转,最稳
|
|
142
|
+
// 因为很多内网服务 stream=true 时返回格式不一定被 Continue 正确识别
|
|
143
|
+
const forceNonStream = true;
|
|
144
|
+
if (forceNonStream) {
|
|
145
|
+
upstreamBody.stream = false;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
const upstreamResp = await axios.post(TARGET_URL, upstreamBody, {
|
|
149
|
+
headers: upstreamHeaders,
|
|
150
|
+
timeout: 120000,
|
|
151
|
+
validateStatus: () => true
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
console.log(`\n[${requestId}] ===== upstream status =====`);
|
|
155
|
+
console.log(upstreamResp.status);
|
|
156
|
+
|
|
157
|
+
console.log(`\n[${requestId}] ===== upstream data =====`);
|
|
158
|
+
console.log(
|
|
159
|
+
typeof upstreamResp.data === "string"
|
|
160
|
+
? upstreamResp.data
|
|
161
|
+
: JSON.stringify(upstreamResp.data, null, 2)
|
|
162
|
+
);
|
|
163
|
+
|
|
164
|
+
if (upstreamResp.status >= 400) {
|
|
165
|
+
return res.status(upstreamResp.status).json({
|
|
166
|
+
error: {
|
|
167
|
+
message:
|
|
168
|
+
typeof upstreamResp.data === "string"
|
|
169
|
+
? upstreamResp.data
|
|
170
|
+
: JSON.stringify(upstreamResp.data),
|
|
171
|
+
type: "upstream_error",
|
|
172
|
+
code: upstreamResp.status
|
|
173
|
+
}
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
const data = upstreamResp.data;
|
|
178
|
+
|
|
179
|
+
// 情况1:上游已经是 OpenAI 格式
|
|
180
|
+
if (data?.choices?.[0]?.message?.content !== undefined) {
|
|
181
|
+
return res.json(data);
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// 情况2:提取常见文本字段
|
|
185
|
+
const text =
|
|
186
|
+
data?.choices?.[0]?.text ??
|
|
187
|
+
data?.data?.content ??
|
|
188
|
+
data?.data?.text ??
|
|
189
|
+
data?.text ??
|
|
190
|
+
data?.reply ??
|
|
191
|
+
data?.result ??
|
|
192
|
+
"";
|
|
193
|
+
|
|
194
|
+
return res.json(
|
|
195
|
+
buildOpenAIResponse({
|
|
196
|
+
model: upstreamBody.model,
|
|
197
|
+
content: text,
|
|
198
|
+
usage: data?.usage
|
|
199
|
+
})
|
|
200
|
+
);
|
|
201
|
+
} catch (error) {
|
|
202
|
+
console.error("\n===== proxy exception =====");
|
|
203
|
+
console.error(error?.message);
|
|
204
|
+
console.error(error?.response?.data);
|
|
205
|
+
|
|
206
|
+
return res.status(500).json({
|
|
207
|
+
error: {
|
|
208
|
+
message: error?.message || "proxy internal error",
|
|
209
|
+
type: "proxy_internal_error",
|
|
210
|
+
code: 500
|
|
211
|
+
}
|
|
212
|
+
});
|
|
213
|
+
}
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
app.listen(PORT, () => {
|
|
217
|
+
console.log(`Proxy server running at http://127.0.0.1:${PORT}`);
|
|
218
|
+
});
|
package/2
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
name: Internal Qwen Proxy
|
|
2
|
+
version: 1.0.0
|
|
3
|
+
schema: v1
|
|
4
|
+
|
|
5
|
+
models:
|
|
6
|
+
- name: qwen15-32b
|
|
7
|
+
provider: openai
|
|
8
|
+
model: qwen15-32b
|
|
9
|
+
apiKey: anything
|
|
10
|
+
apiBase: http://127.0.0.1:3000/v1
|
|
11
|
+
|
|
12
|
+
defaultCompletionOptions:
|
|
13
|
+
temperature: 0.1
|
|
14
|
+
maxTokens: 20000
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
PORT=3000
|
|
18
|
+
|
|
19
|
+
TARGET_URL=http://maasapp.aip.bj.bob.test8080/apis/ais-v2/chat/completions
|
|
20
|
+
TARGET_API_KEY=你在ProxyAI顶部填写的那个真实API_KEY
|
|
21
|
+
TARGET_APP_TAG=proxyai
|
|
22
|
+
TARGET_MODEL=qwen15-32b
|
|
23
|
+
TARGET_TEMPERATURE=0.1
|
|
24
|
+
TARGET_MAX_TOKENS=20000
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
curl -X POST "http://127.0.0.1:3000/v1/chat/completions" ^
|
|
29
|
+
-H "Content-Type: application/json" ^
|
|
30
|
+
-d "{\"model\":\"qwen15-32b\",\"messages\":[{\"role\":\"user\",\"content\":\"你好\"}]}"
|
package/package.json
CHANGED
package/test/111
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
// src/utils/configWatcher.js
|
|
2
|
+
|
|
3
|
+
const CONFIG_VERSION_KEY = 'APP_CONFIG_VERSION'
|
|
4
|
+
|
|
5
|
+
function getConfigVersionFromText(text) {
|
|
6
|
+
const match = text.match(/VERSION:\s*['"](.+?)['"]/)
|
|
7
|
+
return match?.[1]
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
export function startConfigWatcher(interval = 10000) {
|
|
11
|
+
setInterval(async () => {
|
|
12
|
+
try {
|
|
13
|
+
const res = await fetch(`/config.js?t=${Date.now()}`, {
|
|
14
|
+
cache: 'no-store'
|
|
15
|
+
})
|
|
16
|
+
|
|
17
|
+
const text = await res.text()
|
|
18
|
+
const latestVersion = getConfigVersionFromText(text)
|
|
19
|
+
|
|
20
|
+
if (!latestVersion) return
|
|
21
|
+
|
|
22
|
+
const localVersion = localStorage.getItem(CONFIG_VERSION_KEY)
|
|
23
|
+
|
|
24
|
+
// 第一次没有版本号,先存起来
|
|
25
|
+
if (!localVersion) {
|
|
26
|
+
localStorage.setItem(CONFIG_VERSION_KEY, latestVersion)
|
|
27
|
+
return
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// 版本不一致,更新本地版本并刷新页面
|
|
31
|
+
if (latestVersion !== localVersion) {
|
|
32
|
+
localStorage.setItem(CONFIG_VERSION_KEY, latestVersion)
|
|
33
|
+
window.location.reload()
|
|
34
|
+
}
|
|
35
|
+
} catch (err) {
|
|
36
|
+
console.error('配置版本检查失败:', err)
|
|
37
|
+
}
|
|
38
|
+
}, interval)
|
|
39
|
+
}
|
package/11
DELETED
|
@@ -1,160 +0,0 @@
|
|
|
1
|
-
import express from "express";
|
|
2
|
-
import axios from "axios";
|
|
3
|
-
import cors from "cors";
|
|
4
|
-
|
|
5
|
-
const app = express();
|
|
6
|
-
|
|
7
|
-
app.use(cors());
|
|
8
|
-
app.use(express.json({ limit: "10mb" }));
|
|
9
|
-
|
|
10
|
-
// ===== 你的内网模型网关配置 =====
|
|
11
|
-
const TARGET_URL = "http://maasapp.aip.bj.bob.test8080/apis/ais-v2/chat/completions";
|
|
12
|
-
const TARGET_API_KEY = "你的真实内网API_KEY";
|
|
13
|
-
const TARGET_APP_TAG = "proxyai";
|
|
14
|
-
const DEFAULT_MODEL = "qwen15-32b";
|
|
15
|
-
|
|
16
|
-
// ===== 健康检查 =====
|
|
17
|
-
app.get("/health", (req, res) => {
|
|
18
|
-
res.json({ ok: true });
|
|
19
|
-
});
|
|
20
|
-
|
|
21
|
-
// ===== 可选:给 Continue 用的模型列表接口 =====
|
|
22
|
-
app.get("/v1/models", (req, res) => {
|
|
23
|
-
res.json({
|
|
24
|
-
object: "list",
|
|
25
|
-
data: [
|
|
26
|
-
{
|
|
27
|
-
id: DEFAULT_MODEL,
|
|
28
|
-
object: "model",
|
|
29
|
-
created: Math.floor(Date.now() / 1000),
|
|
30
|
-
owned_by: "local-proxy"
|
|
31
|
-
}
|
|
32
|
-
]
|
|
33
|
-
});
|
|
34
|
-
});
|
|
35
|
-
|
|
36
|
-
// ===== OpenAI Chat Completions 兼容接口 =====
|
|
37
|
-
app.post("/v1/chat/completions", async (req, res) => {
|
|
38
|
-
try {
|
|
39
|
-
const {
|
|
40
|
-
model,
|
|
41
|
-
messages,
|
|
42
|
-
temperature,
|
|
43
|
-
max_tokens,
|
|
44
|
-
stream
|
|
45
|
-
} = req.body;
|
|
46
|
-
|
|
47
|
-
const finalModel = model || DEFAULT_MODEL;
|
|
48
|
-
const finalStream = !!stream;
|
|
49
|
-
|
|
50
|
-
const requestBody = {
|
|
51
|
-
model: finalModel,
|
|
52
|
-
messages: Array.isArray(messages) ? messages : [],
|
|
53
|
-
temperature: typeof temperature === "number" ? temperature : 0.1,
|
|
54
|
-
max_tokens: typeof max_tokens === "number" ? max_tokens : 20000,
|
|
55
|
-
stream: finalStream
|
|
56
|
-
};
|
|
57
|
-
|
|
58
|
-
// ===== 非流式 =====
|
|
59
|
-
if (!finalStream) {
|
|
60
|
-
const response = await axios.post(TARGET_URL, requestBody, {
|
|
61
|
-
headers: {
|
|
62
|
-
Authorization: `Bearer ${TARGET_API_KEY}`,
|
|
63
|
-
"X-LLM-Application-Tag": TARGET_APP_TAG,
|
|
64
|
-
"Content-Type": "application/json"
|
|
65
|
-
},
|
|
66
|
-
timeout: 120000
|
|
67
|
-
});
|
|
68
|
-
|
|
69
|
-
// 如果你的内网返回已经接近 OpenAI 格式,可直接透传
|
|
70
|
-
// 这里做一层兼容处理,更稳
|
|
71
|
-
const data = response.data;
|
|
72
|
-
|
|
73
|
-
// 兼容:如果对方已经是 OpenAI 格式
|
|
74
|
-
if (data?.choices) {
|
|
75
|
-
return res.json(data);
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
// 兼容:如果对方返回的是别的结构,尝试提取文本
|
|
79
|
-
const text =
|
|
80
|
-
data?.choices?.[0]?.message?.content ??
|
|
81
|
-
data?.data?.content ??
|
|
82
|
-
data?.data?.text ??
|
|
83
|
-
data?.text ??
|
|
84
|
-
data?.reply ??
|
|
85
|
-
"";
|
|
86
|
-
|
|
87
|
-
return res.json({
|
|
88
|
-
id: `chatcmpl-${Date.now()}`,
|
|
89
|
-
object: "chat.completion",
|
|
90
|
-
created: Math.floor(Date.now() / 1000),
|
|
91
|
-
model: finalModel,
|
|
92
|
-
choices: [
|
|
93
|
-
{
|
|
94
|
-
index: 0,
|
|
95
|
-
message: {
|
|
96
|
-
role: "assistant",
|
|
97
|
-
content: text
|
|
98
|
-
},
|
|
99
|
-
finish_reason: "stop"
|
|
100
|
-
}
|
|
101
|
-
],
|
|
102
|
-
usage: data?.usage || {
|
|
103
|
-
prompt_tokens: 0,
|
|
104
|
-
completion_tokens: 0,
|
|
105
|
-
total_tokens: 0
|
|
106
|
-
}
|
|
107
|
-
});
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
// ===== 流式 =====
|
|
111
|
-
const response = await axios.post(TARGET_URL, requestBody, {
|
|
112
|
-
headers: {
|
|
113
|
-
Authorization: `Bearer ${TARGET_API_KEY}`,
|
|
114
|
-
"X-LLM-Application-Tag": TARGET_APP_TAG,
|
|
115
|
-
"Content-Type": "application/json"
|
|
116
|
-
},
|
|
117
|
-
responseType: "stream",
|
|
118
|
-
timeout: 120000
|
|
119
|
-
});
|
|
120
|
-
|
|
121
|
-
res.setHeader("Content-Type", "text/event-stream; charset=utf-8");
|
|
122
|
-
res.setHeader("Cache-Control", "no-cache, no-transform");
|
|
123
|
-
res.setHeader("Connection", "keep-alive");
|
|
124
|
-
|
|
125
|
-
response.data.on("data", (chunk) => {
|
|
126
|
-
const text = chunk.toString("utf8");
|
|
127
|
-
|
|
128
|
-
// 情况1:你的内网本来就是标准 SSE,直接透传
|
|
129
|
-
// 如果不是标准 SSE,可以在这里做转换
|
|
130
|
-
res.write(text);
|
|
131
|
-
});
|
|
132
|
-
|
|
133
|
-
response.data.on("end", () => {
|
|
134
|
-
res.end();
|
|
135
|
-
});
|
|
136
|
-
|
|
137
|
-
response.data.on("error", (err) => {
|
|
138
|
-
console.error("stream error:", err.message);
|
|
139
|
-
res.end();
|
|
140
|
-
});
|
|
141
|
-
} catch (error) {
|
|
142
|
-
const status = error.response?.status || 500;
|
|
143
|
-
const detail = error.response?.data || error.message;
|
|
144
|
-
|
|
145
|
-
console.error("proxy error:", detail);
|
|
146
|
-
|
|
147
|
-
res.status(status).json({
|
|
148
|
-
error: {
|
|
149
|
-
message: typeof detail === "string" ? detail : JSON.stringify(detail),
|
|
150
|
-
type: "proxy_error",
|
|
151
|
-
code: status
|
|
152
|
-
}
|
|
153
|
-
});
|
|
154
|
-
}
|
|
155
|
-
});
|
|
156
|
-
|
|
157
|
-
const PORT = 3000;
|
|
158
|
-
app.listen(PORT, () => {
|
|
159
|
-
console.log(`Proxy server running at http://127.0.0.1:${PORT}`);
|
|
160
|
-
});
|