markpdfdown 0.1.8-beta.6 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/main/index.js +398 -14
  2. package/dist/preload/index.js +20 -1
  3. package/dist/renderer/assets/{index-yTU2skrN.css → index-B_JfRqwM.css} +54 -0
  4. package/dist/renderer/assets/{index-iEK8qT5k.js → index-wHB9i2kW.js} +623 -320
  5. package/dist/renderer/index.html +2 -2
  6. package/package.json +10 -3
  7. package/dist/app/app.js +0 -49
  8. package/dist/app/controllers/completionController.js +0 -19
  9. package/dist/app/controllers/modelController.js +0 -53
  10. package/dist/app/controllers/providerController.js +0 -120
  11. package/dist/app/dal/modelDal.js +0 -44
  12. package/dist/app/dal/providerDal.js +0 -78
  13. package/dist/app/db/index.js +0 -56
  14. package/dist/app/db/migration.js +0 -157
  15. package/dist/app/logic/llm/AnthropicClient.js +0 -219
  16. package/dist/app/logic/llm/AzureOpenAIClient.js +0 -239
  17. package/dist/app/logic/llm/GeminiClient.js +0 -212
  18. package/dist/app/logic/llm/LLMClient.js +0 -80
  19. package/dist/app/logic/llm/OpenAIClient.js +0 -235
  20. package/dist/app/logic/llm/example-advanced.js +0 -232
  21. package/dist/app/logic/llm/index.js +0 -14
  22. package/dist/app/logic/model.js +0 -27
  23. package/dist/app/middleware/logger.js +0 -23
  24. package/dist/app/routes/routes.js +0 -16
  25. package/dist/app/types/Provider.js +0 -1
  26. package/dist/server/controllers/FileController.js +0 -64
  27. package/dist/server/controllers/TaskController.js +0 -57
  28. package/dist/server/controllers/completionController.js +0 -64
  29. package/dist/server/controllers/modelController.js +0 -74
  30. package/dist/server/controllers/providerController.js +0 -120
  31. package/dist/server/dal/TaskDal.js +0 -67
  32. package/dist/server/dal/modelDal.js +0 -44
  33. package/dist/server/dal/providerDal.js +0 -83
  34. package/dist/server/db/index.js +0 -57
  35. package/dist/server/db/migration.js +0 -157
  36. package/dist/server/index.js +0 -49
  37. package/dist/server/logic/File.js +0 -34
  38. package/dist/server/logic/Task.js +0 -21
  39. package/dist/server/logic/llm/AnthropicClient.js +0 -220
  40. package/dist/server/logic/llm/AzureOpenAIClient.js +0 -239
  41. package/dist/server/logic/llm/GeminiClient.js +0 -213
  42. package/dist/server/logic/llm/LLMClient.js +0 -83
  43. package/dist/server/logic/llm/OllamaClient.js +0 -220
  44. package/dist/server/logic/llm/OpenAIClient.js +0 -235
  45. package/dist/server/logic/llm/example-advanced.js +0 -231
  46. package/dist/server/logic/llm/index.js +0 -15
  47. package/dist/server/logic/model.js +0 -59
  48. package/dist/server/middleware/logger.js +0 -23
  49. package/dist/server/routes/routes.js +0 -30
  50. package/dist/server/types/Provider.js +0 -1
  51. package/dist/server/types/Task.js +0 -1
@@ -1,220 +0,0 @@
1
- import { LLMClient } from './LLMClient.js';
2
- /**
3
- * Ollama客户端实现
4
- */
5
- export class OllamaClient extends LLMClient {
6
- constructor(apiKey, baseUrl) {
7
- super(apiKey, baseUrl || 'http://localhost:11434/api/chat');
8
- }
9
- /**
10
- * 执行Ollama文本补全
11
- */
12
- async completion(options) {
13
- try {
14
- // 标准化选项,处理向后兼容
15
- const normalizedOptions = this.normalizeOptions(options);
16
- // 转换消息格式为Ollama格式
17
- const ollamaMessages = this.convertMessagesToOllamaFormat(normalizedOptions.messages);
18
- const requestBody = {
19
- model: normalizedOptions.model || 'llama3',
20
- messages: ollamaMessages,
21
- stream: normalizedOptions.stream !== false, // 默认为流式响应
22
- options: {}
23
- };
24
- // 添加额外选项
25
- if (normalizedOptions.temperature !== undefined) {
26
- requestBody.options.temperature = normalizedOptions.temperature;
27
- }
28
- if (normalizedOptions.maxTokens !== undefined) {
29
- requestBody.options.num_predict = normalizedOptions.maxTokens;
30
- }
31
- // 添加工具配置(如果有)
32
- if (normalizedOptions.tools && normalizedOptions.tools.length > 0) {
33
- requestBody.tools = normalizedOptions.tools;
34
- }
35
- // 添加响应格式(如果指定)
36
- if (normalizedOptions.response_format?.type === 'json_object') {
37
- requestBody.format = 'json';
38
- }
39
- const response = await fetch(`${this.baseUrl}`, {
40
- method: 'POST',
41
- headers: {
42
- 'Content-Type': 'application/json'
43
- },
44
- body: JSON.stringify(requestBody)
45
- });
46
- console.log(`[${new Date().toISOString()}] POST ${this.baseUrl} ${response.status} - ${response.statusText}`);
47
- // console.log(JSON.stringify(requestBody));
48
- if (!response.ok) {
49
- const error = await response.json();
50
- throw new Error(`Ollama API错误: ${error.error || response.statusText}`);
51
- }
52
- if (normalizedOptions.stream && response.body && normalizedOptions.onUpdate) {
53
- // 处理流式响应
54
- const reader = response.body.getReader();
55
- const decoder = new TextDecoder('utf-8');
56
- let content = '';
57
- let model = '';
58
- const processStream = async () => {
59
- const { done, value } = await reader.read();
60
- if (done) {
61
- return {
62
- content,
63
- model: model || normalizedOptions.model || 'llama3',
64
- finishReason: 'stop',
65
- responseFormat: normalizedOptions.response_format?.type
66
- };
67
- }
68
- // 解析流式响应数据
69
- const chunk = decoder.decode(value);
70
- const lines = chunk.split('\n').filter(line => line.trim() !== '');
71
- for (const line of lines) {
72
- try {
73
- const data = JSON.parse(line);
74
- // 存储模型名称
75
- if (data.model && !model) {
76
- model = data.model;
77
- }
78
- // 提取消息内容
79
- if (data.message && data.message.content) {
80
- if (!data.done) {
81
- // 累加流式内容
82
- content += data.message.content;
83
- if (normalizedOptions.onUpdate) {
84
- normalizedOptions.onUpdate(content);
85
- }
86
- }
87
- }
88
- // 如果是最后一条消息,可以直接返回结果
89
- if (data.done === true) {
90
- return {
91
- content,
92
- model: data.model || normalizedOptions.model || 'llama3',
93
- finishReason: 'stop',
94
- responseFormat: normalizedOptions.response_format?.type,
95
- rawResponse: data
96
- };
97
- }
98
- }
99
- catch (e) {
100
- // 忽略解析错误
101
- }
102
- }
103
- return processStream();
104
- };
105
- return processStream();
106
- }
107
- else {
108
- // 处理非流式响应
109
- const data = await response.json();
110
- return {
111
- content: data.message?.content || '',
112
- model: data.model,
113
- finishReason: data.done ? 'stop' : undefined,
114
- responseFormat: normalizedOptions.response_format?.type,
115
- rawResponse: data // 保留原始响应以便调试
116
- };
117
- }
118
- }
119
- catch (error) {
120
- const errorMessage = error instanceof Error ? error.message : String(error);
121
- throw new Error(`Ollama补全请求失败: ${errorMessage}`);
122
- }
123
- }
124
- /**
125
- * 将消息转换为Ollama API格式
126
- */
127
- convertMessagesToOllamaFormat(messages) {
128
- return messages.map(message => {
129
- const ollamaMessage = {
130
- role: message.role
131
- };
132
- // 处理内容
133
- if (Array.isArray(message.content)) {
134
- // 处理多部分内容
135
- ollamaMessage.content = this.convertContentArrayToOllamaFormat(message.content);
136
- }
137
- else {
138
- // 处理单一内容
139
- ollamaMessage.content = this.convertContentToOllamaFormat(message.content);
140
- }
141
- // 处理图片(如果是多模态模型)
142
- const images = this.extractImages(message.content);
143
- if (images.length > 0) {
144
- ollamaMessage.images = images;
145
- }
146
- return ollamaMessage;
147
- });
148
- }
149
- /**
150
- * 提取消息中的图片
151
- */
152
- extractImages(content) {
153
- const images = [];
154
- if (Array.isArray(content)) {
155
- // 从内容数组中提取图片
156
- for (const item of content) {
157
- if (item.type === 'image_url') {
158
- const imageContent = item;
159
- images.push(this.processImageUrl(imageContent.image_url.url));
160
- }
161
- }
162
- }
163
- else if (content.type === 'image_url') {
164
- // 从单个图片内容中提取
165
- const imageContent = content;
166
- images.push(this.processImageUrl(imageContent.image_url.url));
167
- }
168
- return images;
169
- }
170
- /**
171
- * 处理图片 URL,去掉 data URI 前缀
172
- */
173
- processImageUrl(url) {
174
- // 如果是 data URI,移除前缀
175
- if (url.startsWith('data:')) {
176
- const parts = url.split(';base64,');
177
- if (parts.length === 2) {
178
- return parts[1]; // 只返回 base64 编码部分
179
- }
180
- }
181
- return url;
182
- }
183
- /**
184
- * 将内容数组转换为Ollama内容格式
185
- */
186
- convertContentArrayToOllamaFormat(contentArray) {
187
- return contentArray
188
- .map(content => this.convertContentToOllamaFormat(content))
189
- .join('\n');
190
- }
191
- /**
192
- * 将单个内容转换为Ollama内容格式
193
- */
194
- convertContentToOllamaFormat(content) {
195
- switch (content.type) {
196
- case 'text':
197
- return content.text;
198
- case 'image_url':
199
- // 图片会单独处理到images字段,这里只返回提示
200
- return '';
201
- case 'tool_call':
202
- const toolCall = content;
203
- return JSON.stringify({
204
- tool_call_id: toolCall.tool_call_id,
205
- function: {
206
- name: toolCall.function.name,
207
- arguments: toolCall.function.arguments
208
- }
209
- });
210
- case 'tool_result':
211
- const toolResult = content;
212
- return JSON.stringify({
213
- tool_call_id: toolResult.tool_call_id,
214
- content: toolResult.content
215
- });
216
- default:
217
- return '';
218
- }
219
- }
220
- }
@@ -1,235 +0,0 @@
1
- import { LLMClient } from './LLMClient.js';
2
- /**
3
- * OpenAI客户端实现
4
- */
5
- export class OpenAIClient extends LLMClient {
6
- constructor(apiKey, baseUrl) {
7
- super(apiKey, baseUrl || 'https://api.openai.com/v1/chat/completions');
8
- }
9
- /**
10
- * 执行OpenAI文本补全
11
- */
12
- async completion(options) {
13
- try {
14
- // 标准化选项,处理向后兼容
15
- const normalizedOptions = this.normalizeOptions(options);
16
- // 转换消息格式为OpenAI格式
17
- const openaiMessages = this.convertMessagesToOpenAIFormat(normalizedOptions.messages);
18
- const requestBody = {
19
- model: normalizedOptions.model || 'gpt-3.5-turbo',
20
- messages: openaiMessages,
21
- temperature: normalizedOptions.temperature ?? 0.7,
22
- max_tokens: normalizedOptions.maxTokens,
23
- stream: normalizedOptions.stream || false
24
- };
25
- // 添加工具配置(如果有)
26
- if (normalizedOptions.tools && normalizedOptions.tools.length > 0) {
27
- requestBody.tools = normalizedOptions.tools;
28
- if (normalizedOptions.tool_choice) {
29
- requestBody.tool_choice = normalizedOptions.tool_choice;
30
- }
31
- }
32
- // 添加响应格式(如果指定)
33
- if (normalizedOptions.response_format) {
34
- requestBody.response_format = normalizedOptions.response_format;
35
- }
36
- const response = await fetch(`${this.baseUrl}`, {
37
- method: 'POST',
38
- headers: {
39
- 'Content-Type': 'application/json',
40
- 'Authorization': `Bearer ${normalizedOptions.apiKey || this.apiKey}`
41
- },
42
- body: JSON.stringify(requestBody)
43
- });
44
- console.log(`[${new Date().toISOString()}] POST ${this.baseUrl} ${response.status} - ${response.statusText}`);
45
- if (!response.ok) {
46
- const error = await response.json();
47
- throw new Error(`OpenAI API错误: ${error.error?.message || response.statusText}`);
48
- }
49
- if (normalizedOptions.stream && response.body && normalizedOptions.onUpdate) {
50
- // 处理流式响应
51
- const reader = response.body.getReader();
52
- const decoder = new TextDecoder('utf-8');
53
- let content = '';
54
- const toolCalls = [];
55
- const processStream = async () => {
56
- const { done, value } = await reader.read();
57
- if (done) {
58
- return {
59
- content,
60
- model: normalizedOptions.model || 'gpt-3.5-turbo',
61
- finishReason: 'stop',
62
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
63
- responseFormat: normalizedOptions.response_format?.type
64
- };
65
- }
66
- // 解析流式响应数据
67
- const chunk = decoder.decode(value);
68
- const lines = chunk
69
- .split('\n')
70
- .filter(line => line.trim() !== '' && line.trim() !== 'data: [DONE]');
71
- for (const line of lines) {
72
- if (line.startsWith('data: ')) {
73
- try {
74
- const data = JSON.parse(line.slice(6));
75
- // 处理常规文本内容
76
- if (data.choices && data.choices[0]?.delta?.content) {
77
- const newContent = data.choices[0].delta.content;
78
- content += newContent;
79
- if (normalizedOptions.onUpdate) {
80
- normalizedOptions.onUpdate(content);
81
- }
82
- }
83
- // 处理工具调用
84
- if (data.choices && data.choices[0]?.delta?.tool_calls) {
85
- const deltaToolCalls = data.choices[0].delta.tool_calls;
86
- for (const deltaTool of deltaToolCalls) {
87
- // 查找现有工具调用或创建新的
88
- let toolCall = toolCalls.find(tc => tc.id === deltaTool.id);
89
- if (!toolCall && deltaTool.id) {
90
- toolCall = {
91
- id: deltaTool.id,
92
- type: 'function',
93
- function: {
94
- name: '',
95
- arguments: ''
96
- }
97
- };
98
- toolCalls.push(toolCall);
99
- }
100
- if (toolCall && deltaTool.function) {
101
- if (deltaTool.function.name) {
102
- toolCall.function.name = deltaTool.function.name;
103
- }
104
- if (deltaTool.function.arguments) {
105
- toolCall.function.arguments += deltaTool.function.arguments;
106
- }
107
- }
108
- }
109
- // 如果有工具调用,同时更新内容
110
- if (normalizedOptions.onUpdate) {
111
- normalizedOptions.onUpdate(content);
112
- }
113
- }
114
- }
115
- catch (e) {
116
- // 忽略解析错误
117
- }
118
- }
119
- }
120
- return processStream();
121
- };
122
- return processStream();
123
- }
124
- else {
125
- // 处理普通响应
126
- const data = await response.json();
127
- // 提取响应内容
128
- let responseContent = '';
129
- const toolCalls = [];
130
- if (data.choices && data.choices[0]?.message) {
131
- const message = data.choices[0].message;
132
- // 提取文本内容
133
- if (typeof message.content === 'string') {
134
- responseContent = message.content;
135
- }
136
- // 提取工具调用
137
- if (message.tool_calls && message.tool_calls.length > 0) {
138
- for (const toolCall of message.tool_calls) {
139
- toolCalls.push({
140
- id: toolCall.id,
141
- type: toolCall.type,
142
- function: {
143
- name: toolCall.function.name,
144
- arguments: toolCall.function.arguments
145
- }
146
- });
147
- }
148
- }
149
- }
150
- return {
151
- content: responseContent,
152
- model: data.model,
153
- finishReason: data.choices[0]?.finish_reason,
154
- toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
155
- responseFormat: normalizedOptions.response_format?.type,
156
- rawResponse: data // 保留原始响应以便调试
157
- };
158
- }
159
- }
160
- catch (error) {
161
- const errorMessage = error instanceof Error ? error.message : String(error);
162
- throw new Error(`OpenAI补全请求失败: ${errorMessage}`);
163
- }
164
- }
165
- /**
166
- * 将消息转换为OpenAI API格式
167
- */
168
- convertMessagesToOpenAIFormat(messages) {
169
- return messages.map(message => {
170
- const openaiMessage = {
171
- role: message.role
172
- };
173
- // 处理名称字段(如果存在)
174
- if (message.name) {
175
- openaiMessage.name = message.name;
176
- }
177
- // 处理内容
178
- if (Array.isArray(message.content)) {
179
- // 处理多部分内容
180
- openaiMessage.content = message.content.map(content => this.convertContentToOpenAIFormat(content));
181
- }
182
- else {
183
- // 处理单一内容
184
- const content = this.convertContentToOpenAIFormat(message.content);
185
- // 如果是简单的文本内容,则直接使用字符串
186
- if (content.type === 'text') {
187
- openaiMessage.content = content.text;
188
- }
189
- else {
190
- openaiMessage.content = [content];
191
- }
192
- }
193
- return openaiMessage;
194
- });
195
- }
196
- /**
197
- * 将内容对象转换为OpenAI API格式
198
- */
199
- convertContentToOpenAIFormat(content) {
200
- switch (content.type) {
201
- case 'text':
202
- return {
203
- type: 'text',
204
- text: content.text
205
- };
206
- case 'image_url':
207
- const imageContent = content;
208
- return {
209
- type: 'image_url',
210
- image_url: {
211
- url: imageContent.image_url.url,
212
- }
213
- };
214
- case 'tool_call':
215
- const toolCallContent = content;
216
- return {
217
- type: 'tool_call',
218
- tool_call_id: toolCallContent.tool_call_id,
219
- function: {
220
- name: toolCallContent.function.name,
221
- arguments: toolCallContent.function.arguments
222
- }
223
- };
224
- case 'tool_result':
225
- const toolResultContent = content;
226
- return {
227
- type: 'tool_result',
228
- tool_call_id: toolResultContent.tool_call_id,
229
- content: toolResultContent.content
230
- };
231
- default:
232
- throw new Error(`不支持的内容类型: ${content.type}`);
233
- }
234
- }
235
- }
@@ -1,231 +0,0 @@
1
- import { LLMClientFactory } from './LLMClient.js';
2
- /**
3
- * 高级功能示例:多轮对话、多角色和图片支持
4
- */
5
- async function advancedExample() {
6
- try {
7
- // 创建OpenAI客户端
8
- const openaiClient = await LLMClientFactory.createClient('openai', 'your-openai-api-key');
9
- // 1. 基本多轮对话示例
10
- console.log('=== 多轮对话示例 ===');
11
- const chatMessages = [
12
- {
13
- role: 'system',
14
- content: {
15
- type: 'text',
16
- text: '你是一个专业的AI助手,善于回答用户的问题。请简短、直接地回答。'
17
- }
18
- },
19
- {
20
- role: 'user',
21
- content: {
22
- type: 'text',
23
- text: '你好,我想了解一下人工智能。'
24
- }
25
- },
26
- {
27
- role: 'assistant',
28
- content: {
29
- type: 'text',
30
- text: '人工智能是计算机科学的一个分支,旨在创建能够像人类一样思考和学习的智能机器。它包括机器学习、深度学习、自然语言处理等领域。有什么具体方面你想了解的吗?'
31
- }
32
- },
33
- {
34
- role: 'user',
35
- content: {
36
- type: 'text',
37
- text: '自然语言处理是什么?'
38
- }
39
- }
40
- ];
41
- const chatResponse = await openaiClient.completion({
42
- messages: chatMessages,
43
- model: 'gpt-4'
44
- });
45
- console.log('对话响应:', chatResponse.content);
46
- // 2. 图片分析示例
47
- console.log('\n=== 图片分析示例 ===');
48
- const imageMessages = [
49
- {
50
- role: 'user',
51
- content: [
52
- {
53
- type: 'text',
54
- text: '这张图片是什么?请详细描述一下。'
55
- },
56
- {
57
- type: 'image_url',
58
- image_url: { url: 'https://example.com/sample-image.jpg' }, // 请替换为实际图片URL
59
- }
60
- ]
61
- }
62
- ];
63
- const imageResponse = await openaiClient.completion({
64
- messages: imageMessages,
65
- model: 'gpt-4-vision-preview' // 支持图像的模型
66
- });
67
- console.log('图片分析响应:', imageResponse.content);
68
- // 3. 工具调用示例
69
- console.log('\n=== 工具调用示例 ===');
70
- // 定义一个简单的计算器工具
71
- const calculatorTool = {
72
- type: 'function',
73
- function: {
74
- name: 'calculate',
75
- description: '执行基本数学计算',
76
- parameters: {
77
- type: 'object',
78
- properties: {
79
- operation: {
80
- type: 'string',
81
- enum: ['add', 'subtract', 'multiply', 'divide'],
82
- description: '要执行的操作'
83
- },
84
- a: {
85
- type: 'number',
86
- description: '第一个操作数'
87
- },
88
- b: {
89
- type: 'number',
90
- description: '第二个操作数'
91
- }
92
- },
93
- required: ['operation', 'a', 'b']
94
- }
95
- }
96
- };
97
- const toolMessages = [
98
- {
99
- role: 'user',
100
- content: {
101
- type: 'text',
102
- text: '我需要计算123乘以456是多少'
103
- }
104
- }
105
- ];
106
- const toolResponse = await openaiClient.completion({
107
- messages: toolMessages,
108
- model: 'gpt-4',
109
- tools: [calculatorTool],
110
- tool_choice: 'auto'
111
- });
112
- console.log('工具调用响应:', toolResponse);
113
- // 如果有工具调用,处理它并继续对话
114
- if (toolResponse.toolCalls && toolResponse.toolCalls.length > 0) {
115
- const toolCall = toolResponse.toolCalls[0];
116
- console.log('工具调用:', toolCall.function.name);
117
- console.log('参数:', toolCall.function.arguments);
118
- // 解析参数
119
- const args = JSON.parse(toolCall.function.arguments);
120
- // 执行计算
121
- let result;
122
- switch (args.operation) {
123
- case 'add':
124
- result = args.a + args.b;
125
- break;
126
- case 'subtract':
127
- result = args.a - args.b;
128
- break;
129
- case 'multiply':
130
- result = args.a * args.b;
131
- break;
132
- case 'divide':
133
- result = args.a / args.b;
134
- break;
135
- default:
136
- throw new Error(`不支持的操作: ${args.operation}`);
137
- }
138
- // 继续对话,添加工具响应
139
- const toolResultMessages = [
140
- ...toolMessages,
141
- {
142
- role: 'assistant',
143
- content: {
144
- type: 'text',
145
- text: toolResponse.content
146
- }
147
- },
148
- {
149
- role: 'tool',
150
- content: {
151
- type: 'tool_result',
152
- tool_call_id: toolCall.id,
153
- content: JSON.stringify({ result })
154
- }
155
- }
156
- ];
157
- const finalResponse = await openaiClient.completion({
158
- messages: toolResultMessages,
159
- model: 'gpt-4'
160
- });
161
- console.log('最终响应:', finalResponse.content);
162
- }
163
- // 4. JSON 响应格式示例
164
- console.log('\n=== JSON响应格式示例 ===');
165
- const jsonMessages = [
166
- {
167
- role: 'system',
168
- content: {
169
- type: 'text',
170
- text: '你是一个API服务,返回结构化的JSON数据。'
171
- }
172
- },
173
- {
174
- role: 'user',
175
- content: {
176
- type: 'text',
177
- text: '给我三本经典科幻小说的信息,包括书名、作者和出版年份。'
178
- }
179
- }
180
- ];
181
- const jsonResponse = await openaiClient.completion({
182
- messages: jsonMessages,
183
- model: 'gpt-4',
184
- response_format: { type: 'json_object' }
185
- });
186
- console.log('JSON响应:', jsonResponse.content);
187
- console.log('解析后的JSON:', JSON.parse(jsonResponse.content));
188
- }
189
- catch (error) {
190
- console.error('示例运行出错:', error);
191
- }
192
- }
193
- // 辅助函数:将图片文件转换为base64数据URI(用于本地文件)
194
- export function imageToDataURI(filePath) {
195
- return new Promise((resolve, reject) => {
196
- const fs = require('fs');
197
- fs.readFile(filePath, (err, data) => {
198
- if (err) {
199
- reject(err);
200
- return;
201
- }
202
- // 获取文件扩展名
203
- const extension = filePath.split('.').pop()?.toLowerCase();
204
- let mimeType;
205
- switch (extension) {
206
- case 'jpg':
207
- case 'jpeg':
208
- mimeType = 'image/jpeg';
209
- break;
210
- case 'png':
211
- mimeType = 'image/png';
212
- break;
213
- case 'gif':
214
- mimeType = 'image/gif';
215
- break;
216
- case 'webp':
217
- mimeType = 'image/webp';
218
- break;
219
- default:
220
- mimeType = 'application/octet-stream';
221
- }
222
- const base64Data = data.toString('base64');
223
- const dataURI = `data:${mimeType};base64,${base64Data}`;
224
- resolve(dataURI);
225
- });
226
- });
227
- }
228
- // 运行示例
229
- // advancedExample().catch(console.error);
230
- // 导出函数
231
- export { advancedExample };