gitlab-ai-review 4.2.4 → 6.3.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/ai-client.js CHANGED
@@ -1,30 +1,75 @@
1
1
  /**
2
- * AI 客户端 - 基于 ARK API (豆包大模型)
3
- * 只负责调用 AI API,不处理业务逻辑
2
+ * AI 客户端 - 基于 LangChain ChatOpenAI (WPS LLM Proxy)
3
+ * 使用 @langchain/openai 实现
4
4
  */
5
5
 
6
- import OpenAI from 'openai';
6
+ import { ChatOpenAI } from '@langchain/openai';
7
+ import { HumanMessage, SystemMessage, AIMessage } from '@langchain/core/messages';
7
8
 
8
9
  /**
9
- * AI 客户端类 - 纯粹的 API 调用封装
10
+ * AI 客户端类 - 基于 LangChain ChatOpenAI
10
11
  */
11
12
  export class AIClient {
12
13
  constructor(config = {}) {
13
14
  const apiKey = config.apiKey || process.env.ARK_API_KEY;
14
- const baseURL = config.baseURL || 'https://ark.cn-beijing.volces.com/api/v3';
15
- const model = config.model || 'doubao-seed-1-6-251015';
15
+ const baseURL = config.baseURL || process.env.WPS_BASE_URL;
16
+ const model = config.model || '643045305/doubao/Doubao-Seed-1.6//public';
17
+ const userId = config.userId || process.env.WPS_USER_ID;
16
18
 
17
19
  if (!apiKey) {
18
20
  throw new Error('ARK_API_KEY 未配置');
19
21
  }
20
22
 
21
- this.openai = new OpenAI({
22
- apiKey,
23
- baseURL,
24
- });
23
+ if (!baseURL) {
24
+ throw new Error('WPS_BASE_URL 未配置');
25
+ }
26
+
27
+ if (!userId) {
28
+ throw new Error('WPS_USER_ID 未配置');
29
+ }
25
30
 
31
+ this.apiKey = apiKey;
32
+ this.baseURL = baseURL;
26
33
  this.model = model;
34
+ this.userId = userId;
27
35
  this.config = config;
36
+
37
+ // 创建 ChatOpenAI 实例
38
+ this.chatModel = new ChatOpenAI({
39
+ model: this.model,
40
+ temperature: config.temperature || 0.7,
41
+ openAIApiKey: this.apiKey,
42
+ configuration: {
43
+ baseURL: this.baseURL,
44
+ defaultHeaders: {
45
+ 'X-WPS-User-ID': this.userId,
46
+ 'X-Api-Key': this.apiKey,
47
+ },
48
+ },
49
+ });
50
+ }
51
+
52
+ /**
53
+ * 将消息数组转换为 LangChain 消息格式
54
+ * @param {Array|string} prompt - 消息数组或单个提示词
55
+ * @returns {Array} LangChain 消息数组
56
+ */
57
+ _convertToLangChainMessages(prompt) {
58
+ if (typeof prompt === 'string') {
59
+ return [new HumanMessage(prompt)];
60
+ }
61
+
62
+ return prompt.map((msg) => {
63
+ switch (msg.role) {
64
+ case 'system':
65
+ return new SystemMessage(msg.content);
66
+ case 'assistant':
67
+ return new AIMessage(msg.content);
68
+ case 'user':
69
+ default:
70
+ return new HumanMessage(msg.content);
71
+ }
72
+ });
28
73
  }
29
74
 
30
75
  /**
@@ -34,24 +79,39 @@ export class AIClient {
34
79
  * @returns {Promise<Object>} AI 响应
35
80
  */
36
81
  async sendMessage(prompt, options = {}) {
37
- // 如果传入的是字符串,转换为消息数组
38
- const messages = typeof prompt === 'string'
39
- ? [{ role: 'user', content: prompt }]
40
- : prompt;
41
-
42
- const completion = await this.openai.chat.completions.create({
43
- messages,
44
- model: options.model || this.model,
45
- reasoning_effort: options.reasoningEffort || this.config.reasoningEffort || 'medium',
46
- ...options,
47
- });
82
+ try {
83
+ const messages = this._convertToLangChainMessages(prompt);
84
+
85
+ // 如果有特定选项,创建临时模型实例
86
+ let model = this.chatModel;
87
+ if (options.model || options.temperature !== undefined || options.seed !== undefined) {
88
+ model = new ChatOpenAI({
89
+ model: options.model || this.model,
90
+ temperature: options.temperature ?? 0.7,
91
+ openAIApiKey: this.apiKey,
92
+ configuration: {
93
+ baseURL: this.baseURL,
94
+ defaultHeaders: {
95
+ 'X-WPS-User-ID': this.userId,
96
+ 'X-Api-Key': this.apiKey,
97
+ },
98
+ },
99
+ modelKwargs: options.seed ? { seed: options.seed } : undefined,
100
+ });
101
+ }
48
102
 
49
- return {
50
- reasoning: completion.choices[0]?.message?.reasoning_content || '',
51
- content: completion.choices[0]?.message?.content || '',
52
- usage: completion.usage,
53
- raw: completion,
54
- };
103
+ const response = await model.invoke(messages);
104
+
105
+ return {
106
+ reasoning: response.additional_kwargs?.reasoning_content || '',
107
+ content: response.content || '',
108
+ usage: response.response_metadata?.usage || {},
109
+ raw: response,
110
+ };
111
+ } catch (error) {
112
+ console.error('AI 调用失败:', error);
113
+ throw error;
114
+ }
55
115
  }
56
116
 
57
117
  /**
@@ -62,59 +122,83 @@ export class AIClient {
62
122
  * @returns {Promise<Object>} 完整 AI 响应
63
123
  */
64
124
  async sendMessageStream(prompt, onChunk, options = {}) {
65
- // 如果传入的是字符串,转换为消息数组
66
- const messages = typeof prompt === 'string'
67
- ? [{ role: 'user', content: prompt }]
68
- : prompt;
69
-
70
- const stream = await this.openai.chat.completions.create({
71
- messages,
72
- model: options.model || this.model,
73
- reasoning_effort: options.reasoningEffort || this.config.reasoningEffort || 'medium',
74
- stream: true,
75
- ...options,
76
- });
125
+ try {
126
+ const messages = this._convertToLangChainMessages(prompt);
127
+
128
+ // 如果有特定选项,创建临时模型实例
129
+ let model = this.chatModel;
130
+ if (options.model || options.temperature !== undefined || options.seed !== undefined) {
131
+ model = new ChatOpenAI({
132
+ model: options.model || this.model,
133
+ temperature: options.temperature ?? 0.7,
134
+ openAIApiKey: this.apiKey,
135
+ configuration: {
136
+ baseURL: this.baseURL,
137
+ defaultHeaders: {
138
+ 'X-WPS-User-ID': this.userId,
139
+ 'X-Api-Key': this.apiKey,
140
+ },
141
+ },
142
+ modelKwargs: options.seed ? { seed: options.seed } : undefined,
143
+ });
144
+ }
145
+
146
+ let fullReasoning = '';
147
+ let fullContent = '';
77
148
 
78
- let fullReasoning = '';
79
- let fullContent = '';
149
+ // 使用流式调用
150
+ const stream = await model.stream(messages);
80
151
 
81
- for await (const part of stream) {
82
- const reasoning = part.choices[0]?.delta?.reasoning_content || '';
83
- const content = part.choices[0]?.delta?.content || '';
152
+ for await (const chunk of stream) {
153
+ const reasoning = chunk.additional_kwargs?.reasoning_content || '';
154
+ const content = chunk.content || '';
84
155
 
85
- fullReasoning += reasoning;
86
- fullContent += content;
156
+ fullReasoning += reasoning;
157
+ fullContent += content;
87
158
 
88
- if (onChunk) {
89
- onChunk({ reasoning, content });
159
+ if (onChunk) {
160
+ onChunk({ reasoning, content });
161
+ }
90
162
  }
163
+
164
+ return {
165
+ reasoning: fullReasoning,
166
+ content: fullContent,
167
+ };
168
+ } catch (error) {
169
+ console.error('AI 流式调用失败:', error);
170
+ throw error;
91
171
  }
172
+ }
92
173
 
174
+ /**
175
+ * 获取当前配置
176
+ * @returns {Object} 配置对象
177
+ */
178
+ getConfig() {
93
179
  return {
94
- reasoning: fullReasoning,
95
- content: fullContent,
180
+ model: this.model,
181
+ baseURL: this.baseURL,
182
+ userId: this.userId,
183
+ ...this.config,
96
184
  };
97
185
  }
98
186
 
99
187
  /**
100
- * 获取原始 OpenAI 客户端(高级用法)
101
- * @returns {OpenAI} OpenAI 客户端实例
188
+ * 获取原始客户端(兼容性方法)
189
+ * @returns {AIClient} 返回自身
102
190
  */
103
191
  getClient() {
104
- return this.openai;
192
+ return this;
105
193
  }
106
194
 
107
195
  /**
108
- * 获取当前配置
109
- * @returns {Object} 配置对象
196
+ * 获取 LangChain ChatOpenAI 实例
197
+ * @returns {ChatOpenAI} LangChain 模型实例
110
198
  */
111
- getConfig() {
112
- return {
113
- model: this.model,
114
- ...this.config,
115
- };
199
+ getChatModel() {
200
+ return this.chatModel;
116
201
  }
117
202
  }
118
203
 
119
204
  export default AIClient;
120
-