vue2server7 7.0.63 → 7.0.65

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/test/2.txt +31 -8
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "vue2server7",
3
- "version": "7.0.63",
3
+ "version": "7.0.65",
4
4
  "description": "",
5
5
  "scripts": {
6
6
  "dev": "nodemon --watch src --ext ts --exec \"ts-node src/app.ts\"",
package/test/2.txt CHANGED
@@ -1,14 +1,37 @@
1
- # ~/.continue/config.yaml
1
+ curl -X POST "http://maasapp.aip.bj.bob.test:8080/apis/ais-v2/chat/completions" \
2
+ -H "Authorization: Bearer sk-f154d42f-6976-478f-b444-88b90f9dc67b" \
3
+ -H "X-LLM-Application-Tag: proxyai" \
4
+ -H "Content-Type: application/json" \
5
+ -d '{
6
+ "model": "qwen15-32b",
7
+ "messages": [{"role": "user", "content": "测试:请用一句话介绍自己"}],
8
+ "temperature": 0.1,
9
+ "max_tokens": 50,
10
+ "stream": false
11
+ }'
2
12
 
3
- name: Internal Qwen15-32B
13
+
14
+
15
+ name: Internal Qwen15-32B (proxyAI Verified)
4
16
  models:
5
- - title: "Qwen15-32B (Internal)"
17
+ - title: "Qwen15-32B"
6
18
  model: "qwen15-32b"
7
- provider: "openai"
8
- apiBase: "http://maasapp.aip.bj.bob.test:8080/apis/ais-v2"
9
- apiKey: "你的实际API密钥" # 替换为 $CUSTOM_SERVICE_API_KEY 的真实值
19
+ provider: "custom"
20
+ apiBase: "http://maasapp.aip.bj.bob.test:8080"
21
+ apiKey: "sk-f154d42f-6976-478f-b444-88b90f9dc67b" # 直接写明,避免变量问题
10
22
  headers:
23
+ Authorization: "Bearer sk-f154d42f-6976-478f-b444-88b90f9dc67b"
11
24
  X-LLM-Application-Tag: "proxyai"
25
+ Content-Type: "application/json"
12
26
  enableCaching: false
13
- maxTokens: 8192 # 建议先不要设太高,避免服务端 OOM
14
- temperature: 0.1
27
+ maxTokens: 512
28
+ temperature: 0.1
29
+ customRequest:
30
+ url: "/apis/ais-v2/chat/completions"
31
+ method: "POST"
32
+ body:
33
+ model: "{{model}}"
34
+ messages: "{{messages}}"
35
+ temperature: "{{temperature}}"
36
+ max_tokens: "{{maxTokens}}" # 用下划线(更安全)
37
+ stream: false # ❗先关掉流式!proxyAI 可能实际发的是 false