markpdfdown 0.1.8-beta.6 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/main/index.js +398 -14
  2. package/dist/preload/index.js +20 -1
  3. package/dist/renderer/assets/{index-yTU2skrN.css → index-B_JfRqwM.css} +54 -0
  4. package/dist/renderer/assets/{index-iEK8qT5k.js → index-wHB9i2kW.js} +623 -320
  5. package/dist/renderer/index.html +2 -2
  6. package/package.json +10 -3
  7. package/dist/app/app.js +0 -49
  8. package/dist/app/controllers/completionController.js +0 -19
  9. package/dist/app/controllers/modelController.js +0 -53
  10. package/dist/app/controllers/providerController.js +0 -120
  11. package/dist/app/dal/modelDal.js +0 -44
  12. package/dist/app/dal/providerDal.js +0 -78
  13. package/dist/app/db/index.js +0 -56
  14. package/dist/app/db/migration.js +0 -157
  15. package/dist/app/logic/llm/AnthropicClient.js +0 -219
  16. package/dist/app/logic/llm/AzureOpenAIClient.js +0 -239
  17. package/dist/app/logic/llm/GeminiClient.js +0 -212
  18. package/dist/app/logic/llm/LLMClient.js +0 -80
  19. package/dist/app/logic/llm/OpenAIClient.js +0 -235
  20. package/dist/app/logic/llm/example-advanced.js +0 -232
  21. package/dist/app/logic/llm/index.js +0 -14
  22. package/dist/app/logic/model.js +0 -27
  23. package/dist/app/middleware/logger.js +0 -23
  24. package/dist/app/routes/routes.js +0 -16
  25. package/dist/app/types/Provider.js +0 -1
  26. package/dist/server/controllers/FileController.js +0 -64
  27. package/dist/server/controllers/TaskController.js +0 -57
  28. package/dist/server/controllers/completionController.js +0 -64
  29. package/dist/server/controllers/modelController.js +0 -74
  30. package/dist/server/controllers/providerController.js +0 -120
  31. package/dist/server/dal/TaskDal.js +0 -67
  32. package/dist/server/dal/modelDal.js +0 -44
  33. package/dist/server/dal/providerDal.js +0 -83
  34. package/dist/server/db/index.js +0 -57
  35. package/dist/server/db/migration.js +0 -157
  36. package/dist/server/index.js +0 -49
  37. package/dist/server/logic/File.js +0 -34
  38. package/dist/server/logic/Task.js +0 -21
  39. package/dist/server/logic/llm/AnthropicClient.js +0 -220
  40. package/dist/server/logic/llm/AzureOpenAIClient.js +0 -239
  41. package/dist/server/logic/llm/GeminiClient.js +0 -213
  42. package/dist/server/logic/llm/LLMClient.js +0 -83
  43. package/dist/server/logic/llm/OllamaClient.js +0 -220
  44. package/dist/server/logic/llm/OpenAIClient.js +0 -235
  45. package/dist/server/logic/llm/example-advanced.js +0 -231
  46. package/dist/server/logic/llm/index.js +0 -15
  47. package/dist/server/logic/model.js +0 -59
  48. package/dist/server/middleware/logger.js +0 -23
  49. package/dist/server/routes/routes.js +0 -30
  50. package/dist/server/types/Provider.js +0 -1
  51. package/dist/server/types/Task.js +0 -1
@@ -1,239 +0,0 @@
1
- import { LLMClient } from './LLMClient.js';
2
- /**
3
- * Azure OpenAI客户端实现
4
- */
5
- export class AzureOpenAIClient extends LLMClient {
6
- constructor(apiKey, baseUrl, deploymentName = 'gpt-35-turbo', apiVersion = '2023-05-15') {
7
- // Azure OpenAI需要完整的资源URL
8
- super(apiKey, baseUrl);
9
- this.deploymentName = deploymentName;
10
- this.apiVersion = apiVersion;
11
- }
12
- /**
13
- * 执行Azure OpenAI文本补全
14
- */
15
- async completion(options) {
16
- try {
17
- // 标准化选项,处理向后兼容
18
- const normalizedOptions = this.normalizeOptions(options);
19
- // Azure OpenAI端点格式
20
- const endpoint = `${this.baseUrl}${normalizedOptions.model || this.deploymentName}/chat/completions?api-version=${this.apiVersion}`;
21
- // 转换消息格式为OpenAI格式(Azure OpenAI API兼容OpenAI API)
22
- const openaiMessages = this.convertMessagesToOpenAIFormat(normalizedOptions.messages);
23
- const requestBody = {
24
- messages: openaiMessages,
25
- temperature: normalizedOptions.temperature ?? 0.7,
26
- max_tokens: normalizedOptions.maxTokens,
27
- stream: normalizedOptions.stream || false
28
- };
29
- // 添加工具配置(如果有)
30
- if (normalizedOptions.tools && normalizedOptions.tools.length > 0) {
31
- requestBody.tools = normalizedOptions.tools;
32
- if (normalizedOptions.tool_choice) {
33
- requestBody.tool_choice = normalizedOptions.tool_choice;
34
- }
35
- }
36
- // 添加响应格式(如果指定)
37
- if (normalizedOptions.response_format) {
38
- requestBody.response_format = normalizedOptions.response_format;
39
- }
40
- const response = await fetch(endpoint, {
41
- method: 'POST',
42
- headers: {
43
- 'Content-Type': 'application/json',
44
- 'api-key': normalizedOptions.apiKey || this.apiKey
45
- },
46
- body: JSON.stringify(requestBody)
47
- });
48
- console.log(`[${new Date().toISOString()}] POST ${endpoint} ${response.status} - ${response.statusText}`);
49
- if (!response.ok) {
50
- const error = await response.json();
51
- throw new Error(`Azure OpenAI API错误: ${error.error?.message || response.statusText}`);
52
- }
53
- if (normalizedOptions.stream && response.body && normalizedOptions.onUpdate) {
54
- // 处理流式响应
55
- const reader = response.body.getReader();
56
- const decoder = new TextDecoder('utf-8');
57
- let content = '';
58
- const toolCalls = [];
59
- const processStream = async () => {
60
- const { done, value } = await reader.read();
61
- if (done) {
62
- return {
63
- content,
64
- model: normalizedOptions.model || this.deploymentName,
65
- finishReason: 'stop',
66
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
67
- responseFormat: normalizedOptions.response_format?.type
68
- };
69
- }
70
- // 解析流式响应数据
71
- const chunk = decoder.decode(value);
72
- const lines = chunk
73
- .split('\n')
74
- .filter(line => line.trim() !== '' && line.trim() !== 'data: [DONE]');
75
- for (const line of lines) {
76
- if (line.startsWith('data: ')) {
77
- try {
78
- const data = JSON.parse(line.slice(6));
79
- // 处理常规文本内容
80
- if (data.choices && data.choices[0]?.delta?.content) {
81
- const newContent = data.choices[0].delta.content;
82
- content += newContent;
83
- if (normalizedOptions.onUpdate) {
84
- normalizedOptions.onUpdate(content);
85
- }
86
- }
87
- // 处理工具调用
88
- if (data.choices && data.choices[0]?.delta?.tool_calls) {
89
- const deltaToolCalls = data.choices[0].delta.tool_calls;
90
- for (const deltaTool of deltaToolCalls) {
91
- // 查找现有工具调用或创建新的
92
- let toolCall = toolCalls.find(tc => tc.id === deltaTool.id);
93
- if (!toolCall && deltaTool.id) {
94
- toolCall = {
95
- id: deltaTool.id,
96
- type: 'function',
97
- function: {
98
- name: '',
99
- arguments: ''
100
- }
101
- };
102
- toolCalls.push(toolCall);
103
- }
104
- if (toolCall && deltaTool.function) {
105
- if (deltaTool.function.name) {
106
- toolCall.function.name = deltaTool.function.name;
107
- }
108
- if (deltaTool.function.arguments) {
109
- toolCall.function.arguments += deltaTool.function.arguments;
110
- }
111
- }
112
- }
113
- // 如果有工具调用,同时更新内容
114
- if (normalizedOptions.onUpdate) {
115
- normalizedOptions.onUpdate(content);
116
- }
117
- }
118
- }
119
- catch (e) {
120
- // 忽略解析错误
121
- }
122
- }
123
- }
124
- return processStream();
125
- };
126
- return processStream();
127
- }
128
- else {
129
- // 处理普通响应
130
- const data = await response.json();
131
- // 提取响应内容
132
- let responseContent = '';
133
- const toolCalls = [];
134
- if (data.choices && data.choices[0]?.message) {
135
- const message = data.choices[0].message;
136
- // 提取文本内容
137
- if (typeof message.content === 'string') {
138
- responseContent = message.content;
139
- }
140
- // 提取工具调用
141
- if (message.tool_calls && message.tool_calls.length > 0) {
142
- for (const toolCall of message.tool_calls) {
143
- toolCalls.push({
144
- id: toolCall.id,
145
- type: toolCall.type,
146
- function: {
147
- name: toolCall.function.name,
148
- arguments: toolCall.function.arguments
149
- }
150
- });
151
- }
152
- }
153
- }
154
- return {
155
- content: responseContent,
156
- model: data.model || normalizedOptions.model || this.deploymentName,
157
- finishReason: data.choices[0]?.finish_reason,
158
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
159
- responseFormat: normalizedOptions.response_format?.type,
160
- rawResponse: data // 保留原始响应以便调试
161
- };
162
- }
163
- }
164
- catch (error) {
165
- const errorMessage = error instanceof Error ? error.message : String(error);
166
- throw new Error(`Azure OpenAI补全请求失败: ${errorMessage}`);
167
- }
168
- }
169
- /**
170
- * 将消息转换为OpenAI API格式
171
- */
172
- convertMessagesToOpenAIFormat(messages) {
173
- return messages.map(message => {
174
- const openaiMessage = {
175
- role: message.role
176
- };
177
- // 处理名称字段(如果存在)
178
- if (message.name) {
179
- openaiMessage.name = message.name;
180
- }
181
- // 处理内容
182
- if (Array.isArray(message.content)) {
183
- // 处理多部分内容
184
- openaiMessage.content = message.content.map(content => this.convertContentToOpenAIFormat(content));
185
- }
186
- else {
187
- // 处理单一内容
188
- const content = this.convertContentToOpenAIFormat(message.content);
189
- // 如果是简单的文本内容,则直接使用字符串
190
- if (content.type === 'text') {
191
- openaiMessage.content = content.text;
192
- }
193
- else {
194
- openaiMessage.content = [content];
195
- }
196
- }
197
- return openaiMessage;
198
- });
199
- }
200
- /**
201
- * 将内容对象转换为OpenAI API格式
202
- */
203
- convertContentToOpenAIFormat(content) {
204
- switch (content.type) {
205
- case 'text':
206
- return {
207
- type: 'text',
208
- text: content.text
209
- };
210
- case 'image_url':
211
- const imageContent = content;
212
- return {
213
- type: 'image_url',
214
- image_url: {
215
- url: imageContent.image_url.url,
216
- }
217
- };
218
- case 'tool_call':
219
- const toolCallContent = content;
220
- return {
221
- type: 'tool_call',
222
- tool_call_id: toolCallContent.tool_call_id,
223
- function: {
224
- name: toolCallContent.function.name,
225
- arguments: toolCallContent.function.arguments
226
- }
227
- };
228
- case 'tool_result':
229
- const toolResultContent = content;
230
- return {
231
- type: 'tool_result',
232
- tool_call_id: toolResultContent.tool_call_id,
233
- content: toolResultContent.content
234
- };
235
- default:
236
- throw new Error(`不支持的内容类型: ${content.type}`);
237
- }
238
- }
239
- }
@@ -1,213 +0,0 @@
1
- import { LLMClient } from './LLMClient.js';
2
- /**
3
- * Google Gemini客户端实现
4
- */
5
- export class GeminiClient extends LLMClient {
6
- constructor(apiKey, baseUrl) {
7
- super(apiKey, baseUrl || 'https://generativelanguage.googleapis.com/v1/models');
8
- }
9
- /**
10
- * 执行Gemini文本补全
11
- */
12
- async completion(options) {
13
- try {
14
- // 标准化选项,处理向后兼容
15
- const normalizedOptions = this.normalizeOptions(options);
16
- const modelName = normalizedOptions.model || 'gemini-1.5-pro';
17
- const endpoint = `${this.baseUrl}/${modelName}:generateContent?key=${normalizedOptions.apiKey || this.apiKey}`;
18
- // 将消息转换为Gemini格式
19
- const geminiContents = this.convertMessagesToGeminiFormat(normalizedOptions.messages);
20
- const requestBody = {
21
- contents: geminiContents,
22
- generationConfig: {
23
- temperature: normalizedOptions.temperature ?? 0.7,
24
- maxOutputTokens: normalizedOptions.maxTokens,
25
- topP: 0.95
26
- }
27
- };
28
- // 添加响应格式(如果指定)
29
- if (normalizedOptions.response_format?.type === 'json_object') {
30
- // Gemini使用不同的键来指定JSON输出
31
- requestBody.generationConfig.response_mime_type = 'application/json';
32
- }
33
- const response = await fetch(endpoint, {
34
- method: 'POST',
35
- headers: {
36
- 'Content-Type': 'application/json'
37
- },
38
- body: JSON.stringify(requestBody)
39
- });
40
- console.log(`[${new Date().toISOString()}] POST ${endpoint} ${response.status} - ${response.statusText}`);
41
- if (!response.ok) {
42
- const error = await response.json();
43
- throw new Error(`Gemini API错误: ${error.error?.message || response.statusText}`);
44
- }
45
- const data = await response.json();
46
- // Gemini API返回格式与其他模型不同
47
- if (data.candidates && data.candidates.length > 0) {
48
- const candidate = data.candidates[0];
49
- let content = '';
50
- if (candidate.content && candidate.content.parts) {
51
- for (const part of candidate.content.parts) {
52
- if (part.text) {
53
- content += part.text;
54
- }
55
- }
56
- }
57
- return {
58
- content,
59
- model: modelName,
60
- finishReason: candidate.finishReason,
61
- responseFormat: normalizedOptions.response_format?.type,
62
- rawResponse: data // 保留原始响应以便调试
63
- };
64
- }
65
- throw new Error('Gemini API返回格式错误');
66
- }
67
- catch (error) {
68
- const errorMessage = error instanceof Error ? error.message : String(error);
69
- throw new Error(`Gemini补全请求失败: ${errorMessage}`);
70
- }
71
- }
72
- /**
73
- * 将消息转换为Gemini API格式
74
- */
75
- convertMessagesToGeminiFormat(messages) {
76
- const geminiContents = [];
77
- let currentRole = null;
78
- let currentContent = [];
79
- // Gemini有不同的消息格式,需要相邻的相同角色消息合并
80
- for (const message of messages) {
81
- // 跳过System消息,因为Gemini不直接支持,稍后会特殊处理
82
- if (message.role === 'system') {
83
- continue;
84
- }
85
- // 如果角色变化,创建新的内容块
86
- const geminiRole = this.mapRoleToGemini(message.role);
87
- if (currentRole !== geminiRole && currentContent.length > 0) {
88
- geminiContents.push({
89
- role: currentRole,
90
- parts: currentContent
91
- });
92
- currentContent = [];
93
- }
94
- currentRole = geminiRole;
95
- // 处理消息内容
96
- if (Array.isArray(message.content)) {
97
- // 处理多部分内容
98
- for (const content of message.content) {
99
- currentContent.push(this.convertContentToGeminiFormat(content));
100
- }
101
- }
102
- else {
103
- // 处理单一内容
104
- currentContent.push(this.convertContentToGeminiFormat(message.content));
105
- }
106
- }
107
- // 添加最后一组消息
108
- if (currentRole && currentContent.length > 0) {
109
- geminiContents.push({
110
- role: currentRole,
111
- parts: currentContent
112
- });
113
- }
114
- // 特殊处理系统消息:将系统消息添加到第一个用户消息的前面
115
- const systemMessages = messages.filter(msg => msg.role === 'system');
116
- if (systemMessages.length > 0 && geminiContents.length > 0 && geminiContents[0].role === 'user') {
117
- for (const sysMsg of systemMessages) {
118
- const text = Array.isArray(sysMsg.content)
119
- ? sysMsg.content.map(c => c.type === 'text' ? c.text : '').join('\n')
120
- : sysMsg.content.type === 'text' ? sysMsg.content.text : '';
121
- if (text) {
122
- // 在用户消息前添加系统指令
123
- if (typeof geminiContents[0].parts[0] === 'object' && geminiContents[0].parts[0].text) {
124
- geminiContents[0].parts[0].text = `[System Instructions]: ${text}\n\n${geminiContents[0].parts[0].text}`;
125
- }
126
- }
127
- }
128
- }
129
- return geminiContents;
130
- }
131
- /**
132
- * 将内容对象转换为Gemini API格式
133
- */
134
- convertContentToGeminiFormat(content) {
135
- switch (content.type) {
136
- case 'text':
137
- return {
138
- text: content.text
139
- };
140
- case 'image_url':
141
- const imageContent = content;
142
- return {
143
- inline_data: {
144
- mime_type: this.getMimeTypeFromUrl(imageContent.image_url.url),
145
- data: this.extractBase64FromUrl(imageContent.image_url.url)
146
- }
147
- };
148
- // Gemini不直接支持工具调用和工具结果
149
- case 'tool_call':
150
- case 'tool_result':
151
- // 将工具相关内容转换为文本
152
- return {
153
- text: JSON.stringify(content)
154
- };
155
- default:
156
- throw new Error(`Gemini不支持的内容类型: ${content.type}`);
157
- }
158
- }
159
- /**
160
- * 从URL中提取MIME类型
161
- */
162
- getMimeTypeFromUrl(url) {
163
- if (url.startsWith('data:')) {
164
- const mimeMatch = url.match(/^data:([^;]+);/);
165
- return mimeMatch ? mimeMatch[1] : 'image/jpeg';
166
- }
167
- const extension = url.split('.').pop()?.toLowerCase();
168
- switch (extension) {
169
- case 'jpg':
170
- case 'jpeg':
171
- return 'image/jpeg';
172
- case 'png':
173
- return 'image/png';
174
- case 'gif':
175
- return 'image/gif';
176
- case 'webp':
177
- return 'image/webp';
178
- default:
179
- return 'image/jpeg';
180
- }
181
- }
182
- /**
183
- * 从数据URL中提取base64数据
184
- */
185
- extractBase64FromUrl(url) {
186
- if (url.startsWith('data:')) {
187
- return url.split(',')[1];
188
- }
189
- // 对于非数据URL,需要先获取图像数据
190
- // 在实际应用中,这里可能需要进行异步请求获取图像并转换为base64
191
- // 这里简化处理,返回空字符串
192
- return '';
193
- }
194
- /**
195
- * 将角色映射到Gemini支持的角色
196
- */
197
- mapRoleToGemini(role) {
198
- switch (role) {
199
- case 'user':
200
- return 'user';
201
- case 'assistant':
202
- return 'model';
203
- case 'system':
204
- // Gemini不直接支持系统角色,将在处理中特殊处理
205
- return 'user';
206
- case 'tool':
207
- // Gemini不直接支持工具角色,将其视为用户输入
208
- return 'user';
209
- default:
210
- return 'user';
211
- }
212
- }
213
- }
@@ -1,83 +0,0 @@
1
- /**
2
- * LLM客户端接口定义
3
- */
4
- /**
5
- * LLM客户端基类
6
- */
7
- export class LLMClient {
8
- constructor(apiKey, baseUrl = '') {
9
- this.apiKey = apiKey;
10
- this.baseUrl = baseUrl;
11
- }
12
- /**
13
- * 向后兼容处理单一prompt
14
- * @param options 可能包含旧格式prompt的选项
15
- * @returns 标准化后的选项
16
- */
17
- normalizeOptions(options) {
18
- const normalizedOptions = { ...options };
19
- // 如果存在旧的prompt字段,将其转换为消息格式
20
- if ('prompt' in normalizedOptions && normalizedOptions.prompt) {
21
- // 如果没有messages字段或为空数组,则使用prompt创建一个用户消息
22
- if (!normalizedOptions.messages || normalizedOptions.messages.length === 0) {
23
- normalizedOptions.messages = [{
24
- role: 'user',
25
- content: {
26
- type: 'text',
27
- text: normalizedOptions.prompt
28
- }
29
- }];
30
- }
31
- // 删除原始prompt字段
32
- delete normalizedOptions.prompt;
33
- }
34
- // 如果有systemPrompt但没有system角色消息,添加一个system角色消息
35
- if (normalizedOptions.systemPrompt &&
36
- (!normalizedOptions.messages || !normalizedOptions.messages.some(m => m.role === 'system'))) {
37
- const systemMessage = {
38
- role: 'system',
39
- content: {
40
- type: 'text',
41
- text: normalizedOptions.systemPrompt
42
- }
43
- };
44
- normalizedOptions.messages = normalizedOptions.messages || [];
45
- normalizedOptions.messages.unshift(systemMessage);
46
- // 删除原始systemPrompt字段
47
- delete normalizedOptions.systemPrompt;
48
- }
49
- return normalizedOptions;
50
- }
51
- }
52
- /**
53
- * LLM客户端工厂类,用于创建不同的LLM客户端实例
54
- */
55
- export class LLMClientFactory {
56
- /**
57
- * 创建LLM客户端实例
58
- * @param type LLM客户端类型
59
- * @param apiKey API密钥
60
- * @param baseUrl 基础URL,某些服务需要自定义
61
- */
62
- static async createClient(type, apiKey, baseUrl) {
63
- switch (type) {
64
- case 'openai':
65
- const OpenAIModule = await import('./OpenAIClient.js');
66
- return new OpenAIModule.OpenAIClient(apiKey, baseUrl || '');
67
- case 'azure-openai':
68
- const AzureOpenAIModule = await import('./AzureOpenAIClient.js');
69
- return new AzureOpenAIModule.AzureOpenAIClient(apiKey, baseUrl || '');
70
- case 'gemini':
71
- const GeminiModule = await import('./GeminiClient.js');
72
- return new GeminiModule.GeminiClient(apiKey, baseUrl || '');
73
- case 'anthropic':
74
- const AnthropicModule = await import('./AnthropicClient.js');
75
- return new AnthropicModule.AnthropicClient(apiKey, baseUrl || '');
76
- case 'ollama':
77
- const OllamaModule = await import('./OllamaClient.js');
78
- return new OllamaModule.OllamaClient(apiKey, baseUrl || '');
79
- default:
80
- throw new Error(`不支持的LLM客户端类型: ${type}`);
81
- }
82
- }
83
- }