@lobehub/chat 1.99.1 → 1.99.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/rules/project-introduce.mdc +1 -56
- package/.cursor/rules/testing-guide/db-model-test.mdc +453 -0
- package/.cursor/rules/testing-guide/electron-ipc-test.mdc +80 -0
- package/.cursor/rules/testing-guide/testing-guide.mdc +401 -0
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/docs/usage/providers/ai21.mdx +1 -1
- package/docs/usage/providers/ai21.zh-CN.mdx +1 -1
- package/docs/usage/providers/ai360.mdx +1 -1
- package/docs/usage/providers/ai360.zh-CN.mdx +1 -1
- package/docs/usage/providers/anthropic.mdx +1 -1
- package/docs/usage/providers/anthropic.zh-CN.mdx +1 -1
- package/docs/usage/providers/azure.mdx +1 -1
- package/docs/usage/providers/azure.zh-CN.mdx +1 -1
- package/docs/usage/providers/baichuan.mdx +1 -1
- package/docs/usage/providers/baichuan.zh-CN.mdx +1 -1
- package/docs/usage/providers/bedrock.mdx +1 -1
- package/docs/usage/providers/bedrock.zh-CN.mdx +1 -1
- package/docs/usage/providers/cloudflare.mdx +1 -1
- package/docs/usage/providers/cloudflare.zh-CN.mdx +1 -1
- package/docs/usage/providers/deepseek.mdx +1 -1
- package/docs/usage/providers/deepseek.zh-CN.mdx +1 -1
- package/docs/usage/providers/fal.mdx +69 -0
- package/docs/usage/providers/fal.zh-CN.mdx +68 -0
- package/docs/usage/providers/fireworksai.mdx +1 -1
- package/docs/usage/providers/fireworksai.zh-CN.mdx +1 -1
- package/docs/usage/providers/giteeai.mdx +1 -1
- package/docs/usage/providers/giteeai.zh-CN.mdx +1 -1
- package/docs/usage/providers/github.mdx +1 -1
- package/docs/usage/providers/github.zh-CN.mdx +1 -1
- package/docs/usage/providers/google.mdx +1 -1
- package/docs/usage/providers/google.zh-CN.mdx +1 -1
- package/docs/usage/providers/groq.mdx +1 -1
- package/docs/usage/providers/groq.zh-CN.mdx +1 -1
- package/docs/usage/providers/hunyuan.mdx +1 -1
- package/docs/usage/providers/hunyuan.zh-CN.mdx +1 -1
- package/docs/usage/providers/internlm.mdx +1 -1
- package/docs/usage/providers/internlm.zh-CN.mdx +1 -1
- package/docs/usage/providers/jina.mdx +1 -1
- package/docs/usage/providers/jina.zh-CN.mdx +1 -1
- package/docs/usage/providers/minimax.mdx +1 -1
- package/docs/usage/providers/minimax.zh-CN.mdx +1 -1
- package/docs/usage/providers/mistral.mdx +1 -1
- package/docs/usage/providers/mistral.zh-CN.mdx +1 -1
- package/docs/usage/providers/moonshot.mdx +1 -1
- package/docs/usage/providers/moonshot.zh-CN.mdx +1 -1
- package/docs/usage/providers/novita.mdx +1 -1
- package/docs/usage/providers/novita.zh-CN.mdx +1 -1
- package/docs/usage/providers/ollama.mdx +1 -1
- package/docs/usage/providers/ollama.zh-CN.mdx +1 -1
- package/docs/usage/providers/openai.mdx +4 -4
- package/docs/usage/providers/openai.zh-CN.mdx +4 -4
- package/docs/usage/providers/openrouter.mdx +1 -1
- package/docs/usage/providers/openrouter.zh-CN.mdx +1 -1
- package/docs/usage/providers/perplexity.mdx +1 -1
- package/docs/usage/providers/perplexity.zh-CN.mdx +1 -1
- package/docs/usage/providers/ppio.mdx +1 -1
- package/docs/usage/providers/ppio.zh-CN.mdx +1 -1
- package/docs/usage/providers/qiniu.mdx +1 -1
- package/docs/usage/providers/qiniu.zh-CN.mdx +1 -1
- package/docs/usage/providers/qwen.mdx +1 -1
- package/docs/usage/providers/qwen.zh-CN.mdx +1 -1
- package/docs/usage/providers/sambanova.mdx +1 -1
- package/docs/usage/providers/sambanova.zh-CN.mdx +1 -1
- package/docs/usage/providers/sensenova.mdx +1 -1
- package/docs/usage/providers/sensenova.zh-CN.mdx +1 -1
- package/docs/usage/providers/siliconcloud.mdx +1 -1
- package/docs/usage/providers/siliconcloud.zh-CN.mdx +1 -1
- package/docs/usage/providers/spark.mdx +1 -1
- package/docs/usage/providers/spark.zh-CN.mdx +1 -1
- package/docs/usage/providers/stepfun.mdx +1 -1
- package/docs/usage/providers/stepfun.zh-CN.mdx +1 -1
- package/docs/usage/providers/taichu.mdx +1 -1
- package/docs/usage/providers/taichu.zh-CN.mdx +1 -1
- package/docs/usage/providers/togetherai.mdx +1 -1
- package/docs/usage/providers/togetherai.zh-CN.mdx +1 -1
- package/docs/usage/providers/upstage.mdx +1 -1
- package/docs/usage/providers/upstage.zh-CN.mdx +1 -1
- package/docs/usage/providers/vllm.mdx +1 -1
- package/docs/usage/providers/vllm.zh-CN.mdx +1 -1
- package/docs/usage/providers/wenxin.mdx +1 -1
- package/docs/usage/providers/wenxin.zh-CN.mdx +1 -1
- package/docs/usage/providers/xai.mdx +1 -1
- package/docs/usage/providers/xai.zh-CN.mdx +1 -1
- package/docs/usage/providers/zeroone.mdx +1 -1
- package/docs/usage/providers/zeroone.zh-CN.mdx +1 -1
- package/docs/usage/providers/zhipu.mdx +1 -1
- package/docs/usage/providers/zhipu.zh-CN.mdx +1 -1
- package/package.json +3 -3
- package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/components/MultiImagesUpload/index.tsx +9 -4
- package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/index.tsx +1 -1
- package/src/app/[variants]/(main)/image/features/ImageWorkspace/EmptyState.tsx +12 -26
- package/src/config/aiModels/openai.ts +24 -9
- package/src/libs/model-runtime/BaseAI.ts +1 -0
- package/src/libs/model-runtime/hunyuan/index.ts +4 -6
- package/src/libs/model-runtime/novita/__snapshots__/index.test.ts.snap +18 -0
- package/src/libs/model-runtime/openai/__snapshots__/index.test.ts.snap +28 -0
- package/src/libs/model-runtime/openai/index.test.ts +1 -338
- package/src/libs/model-runtime/openai/index.ts +0 -127
- package/src/libs/model-runtime/openrouter/__snapshots__/index.test.ts.snap +3 -0
- package/src/libs/model-runtime/ppio/__snapshots__/index.test.ts.snap +2 -0
- package/src/libs/model-runtime/utils/modelParse.ts +1 -0
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +364 -12
- package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +145 -43
- package/src/libs/model-runtime/utils/openaiHelpers.test.ts +151 -0
- package/src/libs/model-runtime/utils/openaiHelpers.ts +26 -1
- package/src/libs/model-runtime/xai/index.ts +1 -4
- package/src/store/aiInfra/slices/aiModel/action.ts +1 -1
- package/src/store/aiInfra/slices/aiProvider/action.ts +5 -2
- package/src/types/aiModel.ts +1 -0
- package/src/types/llm.ts +3 -1
- package/.cursor/rules/testing-guide.mdc +0 -881
@@ -28,7 +28,7 @@ tags:
|
|
28
28
|
### 步骤二:在 LobeChat 中配置 SiliconCloud
|
29
29
|
|
30
30
|
- 访问 LobeChat 的`设置`界面
|
31
|
-
-
|
31
|
+
- 在`AI 服务商`下找到 `SiliconFlow` 的设置项
|
32
32
|
|
33
33
|
<Image alt={'填入 API 密钥'} inStep src={'https://github.com/user-attachments/assets/4c792f62-5203-4f13-8f23-df228f70d67f'} />
|
34
34
|
|
@@ -33,7 +33,7 @@ This guide will instruct you on how to use iFLYTEK Spark in LobeChat.
|
|
33
33
|
### Step 2: Configure iFLYTEK Spark in LobeChat
|
34
34
|
|
35
35
|
- Access the `Settings` menu in LobeChat
|
36
|
-
- Find the iFLYTEK Spark settings under `
|
36
|
+
- Find the iFLYTEK Spark settings under `AI Service Provider`
|
37
37
|
|
38
38
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/user-attachments/assets/d693be02-e08c-43ae-8bde-1294f180aaf6'} />
|
39
39
|
|
@@ -29,7 +29,7 @@ This document will guide you on how to use Stepfun in LobeChat:
|
|
29
29
|
### Step 2: Configure Stepfun in LobeChat
|
30
30
|
|
31
31
|
- Visit the `Settings` interface in LobeChat
|
32
|
-
- Find the setting for Stepfun under `
|
32
|
+
- Find the setting for Stepfun under `AI Service Provider`
|
33
33
|
|
34
34
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/470e5669-650b-46cf-8024-a1476c166059'} />
|
35
35
|
|
@@ -27,7 +27,7 @@ tags:
|
|
27
27
|
### 步骤二:在 LobeChat 中配置 Stepfun Stepfun 阶跃星辰
|
28
28
|
|
29
29
|
- 访问 LobeChat 的`设置`界面
|
30
|
-
-
|
30
|
+
- 在`AI 服务商`下找到` Stepfun 阶跃星辰`的设置项
|
31
31
|
|
32
32
|
<Image alt={'填写 API 密钥'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/470e5669-650b-46cf-8024-a1476c166059'} />
|
33
33
|
|
@@ -27,7 +27,7 @@ This article will guide you on how to use Taichu in LobeChat:
|
|
27
27
|
### Step 2: Configure Taichu in LobeChat
|
28
28
|
|
29
29
|
- Go to the `Settings` interface in LobeChat
|
30
|
-
- Find the setting for `Taichu` under `
|
30
|
+
- Find the setting for `Taichu` under `AI Service Provider`
|
31
31
|
|
32
32
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/user-attachments/assets/55028fe5-44db-49e2-93c5-5dabbd664f10'} />
|
33
33
|
|
@@ -32,7 +32,7 @@ This document will guide you on how to use Together AI in LobeChat:
|
|
32
32
|
### Step 2: Configure Together AI in LobeChat
|
33
33
|
|
34
34
|
- Visit the `Settings` interface in LobeChat
|
35
|
-
- Find the setting for `together.ai` under `
|
35
|
+
- Find the setting for `together.ai` under `AI Service Provider`
|
36
36
|
|
37
37
|
<Image alt={'Enter Together AI API key in LobeChat'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/0cc6c9b8-4688-472b-a80f-f84c5ebbc719'} />
|
38
38
|
|
@@ -31,7 +31,7 @@ tags:
|
|
31
31
|
### 步骤二:在 LobeChat 中配置 Together AI
|
32
32
|
|
33
33
|
- 访问 LobeChat 的`设置`界面
|
34
|
-
-
|
34
|
+
- 在`AI 服务商`下找到`together.ai`的设置项
|
35
35
|
|
36
36
|
<Image alt={'LobeChat 中填写 Together AI API 密钥'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/0cc6c9b8-4688-472b-a80f-f84c5ebbc719'} />
|
37
37
|
|
@@ -29,7 +29,7 @@ This article will guide you on how to use Upstage in LobeChat.
|
|
29
29
|
### Step 2: Configure Upstage in LobeChat
|
30
30
|
|
31
31
|
- Access the `Settings` interface in LobeChat
|
32
|
-
- Locate the `Upstage` settings under `
|
32
|
+
- Locate the `Upstage` settings under `AI Service Provider`
|
33
33
|
|
34
34
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/user-attachments/assets/e89d2a56-4bf0-4bff-ac39-0d44789fa858'} />
|
35
35
|
|
@@ -29,7 +29,7 @@ tags:
|
|
29
29
|
### 步骤二:在 LobeChat 中配置 Upstage
|
30
30
|
|
31
31
|
- 访问 LobeChat 的`设置`界面
|
32
|
-
-
|
32
|
+
- 在`AI 服务商`下找到 `Upstage` 的设置项
|
33
33
|
|
34
34
|
<Image alt={'填入 API 密钥'} inStep src={'https://github.com/user-attachments/assets/e89d2a56-4bf0-4bff-ac39-0d44789fa858'} />
|
35
35
|
|
@@ -81,7 +81,7 @@ This document will guide you on how to use vLLM in LobeChat:
|
|
81
81
|
### Step 4: Configure vLLM in LobeChat
|
82
82
|
|
83
83
|
- Access the `Application Settings` interface of LobeChat.
|
84
|
-
- Find the `vLLM` settings item under `
|
84
|
+
- Find the `vLLM` settings item under `AI Service Provider`.
|
85
85
|
|
86
86
|
<Image alt={'Fill in the vLLM API Key'} inStep src={'https://github.com/user-attachments/assets/669c68bf-3f85-4a6f-bb08-d0d7fb7f7417'} />
|
87
87
|
|
@@ -80,7 +80,7 @@ tags:
|
|
80
80
|
### 步骤四:在 LobeChat 中配置 vLLM
|
81
81
|
|
82
82
|
- 访问 LobeChat 的 `应用设置`界面
|
83
|
-
- 在
|
83
|
+
- 在 `AI 服务商` 下找到 `vLLM` 的设置项
|
84
84
|
|
85
85
|
<Image alt={'填写 vLLM API 密钥'} inStep src={'https://github.com/user-attachments/assets/669c68bf-3f85-4a6f-bb08-d0d7fb7f7417'} />
|
86
86
|
|
@@ -41,7 +41,7 @@ This article will guide you on how to use Wenxin Qianfan in LobeChat.
|
|
41
41
|
### Step 2: Configure Wenxin Qianfan in LobeChat
|
42
42
|
|
43
43
|
- Go to the `Settings` page of LobeChat
|
44
|
-
- Under `
|
44
|
+
- Under `AI Service Provider`, find the `Wenxin Qianfan` settings
|
45
45
|
|
46
46
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/user-attachments/assets/d7666e2a-0202-4b45-8338-9806ddffa44e'} />
|
47
47
|
|
@@ -35,7 +35,7 @@ This article will guide you on how to use xAI in LobeChat.
|
|
35
35
|
### Step 2: Configure xAI in LobeChat
|
36
36
|
|
37
37
|
- Go to the `Settings` menu in LobeChat
|
38
|
-
- Locate the `xAI` settings under `
|
38
|
+
- Locate the `xAI` settings under `AI Service Provider`
|
39
39
|
|
40
40
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/user-attachments/assets/12863a0e-a1ee-406d-8dee-011b20701fd6'} />
|
41
41
|
|
@@ -39,7 +39,7 @@ This document will guide you on how to use 01 AI in LobeChat:
|
|
39
39
|
### Step 2: Configure 01 AI in LobeChat
|
40
40
|
|
41
41
|
- Access the `Settings` interface in LobeChat
|
42
|
-
- Find the setting for `01 AI` under `
|
42
|
+
- Find the setting for `01 AI` under `AI Service Provider`
|
43
43
|
|
44
44
|
<Image alt={'Enter API Key'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/f539d104-6d64-4cc7-8781-3b36b00d32d0'} />
|
45
45
|
|
@@ -30,7 +30,7 @@ This document will guide you on how to use Zhipu AI in LobeChat:
|
|
30
30
|
### Step 2: Configure Zhipu AI in LobeChat
|
31
31
|
|
32
32
|
- Visit the `Settings` interface in LobeChat
|
33
|
-
- Under `
|
33
|
+
- Under `AI Service Provider`, locate the settings for Zhipu AI
|
34
34
|
|
35
35
|
<Image alt={'Enter Zhipu AI API Key in LobeChat'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/2afffe79-1d37-423c-9363-f09605d5e640'} />
|
36
36
|
|
@@ -28,7 +28,7 @@ tags:
|
|
28
28
|
### 步骤二:在 LobeChat 中配置智谱 AI
|
29
29
|
|
30
30
|
- 访问 LobeChat 的`设置`界面
|
31
|
-
-
|
31
|
+
- 在`AI 服务商`下找到`智谱AI`的设置项
|
32
32
|
|
33
33
|
<Image alt={'LobeChat 中填写智谱AI API 密钥'} inStep src={'https://github.com/lobehub/lobe-chat/assets/34400653/2afffe79-1d37-423c-9363-f09605d5e640'} />
|
34
34
|
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.99.
|
3
|
+
"version": "1.99.3",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -210,7 +210,7 @@
|
|
210
210
|
"mdast-util-to-markdown": "^2.1.2",
|
211
211
|
"modern-screenshot": "^4.6.0",
|
212
212
|
"nanoid": "^5.1.5",
|
213
|
-
"next": "
|
213
|
+
"next": "~15.3.3",
|
214
214
|
"next-auth": "5.0.0-beta.25",
|
215
215
|
"next-mdx-remote": "^5.0.0",
|
216
216
|
"nextjs-toploader": "^3.8.16",
|
@@ -326,7 +326,7 @@
|
|
326
326
|
"crypto-js": "^4.2.0",
|
327
327
|
"dbdocs": "^0.14.4",
|
328
328
|
"dotenv": "^16.5.0",
|
329
|
-
"dpdm-fast": "
|
329
|
+
"dpdm-fast": "1.0.7",
|
330
330
|
"drizzle-dbml-generator": "^0.10.0",
|
331
331
|
"drizzle-kit": "^0.31.0",
|
332
332
|
"eslint": "^8.57.1",
|
@@ -3,6 +3,7 @@
|
|
3
3
|
// Removed Image import - using img tags instead
|
4
4
|
import { createStyles, useTheme } from 'antd-style';
|
5
5
|
import { Image as ImageIcon, X } from 'lucide-react';
|
6
|
+
import Image from 'next/image';
|
6
7
|
import React, { type FC, memo, useEffect, useRef, useState } from 'react';
|
7
8
|
import { useTranslation } from 'react-i18next';
|
8
9
|
import { Center } from 'react-layout-kit';
|
@@ -429,10 +430,12 @@ const ImageThumbnails: FC<ImageThumbnailsProps> = memo(({ images, onClick, onDel
|
|
429
430
|
|
430
431
|
return (
|
431
432
|
<div className={styles.imageItem} key={imageUrl}>
|
432
|
-
<
|
433
|
+
<Image
|
433
434
|
alt={`Uploaded image ${index + 1}`}
|
435
|
+
fill
|
434
436
|
src={imageUrl}
|
435
|
-
style={{
|
437
|
+
style={{ objectFit: 'cover' }}
|
438
|
+
unoptimized
|
436
439
|
/>
|
437
440
|
{!showOverlay && (
|
438
441
|
<div
|
@@ -478,10 +481,12 @@ const SingleImageDisplay: FC<SingleImageDisplayProps> = memo(({ imageUrl, onClic
|
|
478
481
|
|
479
482
|
return (
|
480
483
|
<div className={styles.singleImageDisplay}>
|
481
|
-
<
|
484
|
+
<Image
|
482
485
|
alt="Uploaded image"
|
486
|
+
fill
|
483
487
|
src={imageUrl}
|
484
|
-
style={{
|
488
|
+
style={{ objectFit: 'contain' }}
|
489
|
+
unoptimized
|
485
490
|
/>
|
486
491
|
|
487
492
|
{/* Delete button */}
|
@@ -26,7 +26,7 @@ interface ConfigItemLayoutProps {
|
|
26
26
|
const ConfigItemLayout = memo<ConfigItemLayoutProps>(({ label, children }) => {
|
27
27
|
return (
|
28
28
|
<Flexbox gap={8}>
|
29
|
-
{label && <Text weight={500}>{label
|
29
|
+
{label && <Text weight={500}>{label}</Text>}
|
30
30
|
{children}
|
31
31
|
</Flexbox>
|
32
32
|
);
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import { AuroraBackground } from '@lobehub/ui/awesome';
|
2
1
|
import { memo } from 'react';
|
3
2
|
import { Center, Flexbox } from 'react-layout-kit';
|
4
3
|
|
@@ -6,31 +5,18 @@ import PromptInput from '../PromptInput';
|
|
6
5
|
|
7
6
|
const EmptyState = memo(() => {
|
8
7
|
return (
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
<Flexbox
|
22
|
-
flex={1}
|
23
|
-
height="100%"
|
24
|
-
style={{
|
25
|
-
overflow: 'hidden',
|
26
|
-
zIndex: 1,
|
27
|
-
}}
|
28
|
-
>
|
29
|
-
<Center flex={1} padding={24}>
|
30
|
-
<PromptInput showTitle={true} />
|
31
|
-
</Center>
|
32
|
-
</Flexbox>
|
33
|
-
</>
|
8
|
+
<Flexbox
|
9
|
+
flex={1}
|
10
|
+
height="100%"
|
11
|
+
style={{
|
12
|
+
overflow: 'hidden',
|
13
|
+
zIndex: 1,
|
14
|
+
}}
|
15
|
+
>
|
16
|
+
<Center flex={1} padding={24}>
|
17
|
+
<PromptInput showTitle={true} />
|
18
|
+
</Center>
|
19
|
+
</Flexbox>
|
34
20
|
);
|
35
21
|
});
|
36
22
|
|
@@ -835,11 +835,27 @@ export const openaiSTTModels: AISTTModelCard[] = [
|
|
835
835
|
|
836
836
|
// 图像生成模型
|
837
837
|
export const openaiImageModels: AIImageModelCard[] = [
|
838
|
+
// https://platform.openai.com/docs/models/gpt-image-1
|
839
|
+
{
|
840
|
+
description: 'ChatGPT 原生多模态图片生成模型',
|
841
|
+
displayName: 'GPT Image 1',
|
842
|
+
enabled: true,
|
843
|
+
id: 'gpt-image-1',
|
844
|
+
parameters: gptImage1ParamsSchema,
|
845
|
+
type: 'image',
|
846
|
+
},
|
838
847
|
{
|
839
848
|
description:
|
840
849
|
'最新的 DALL·E 模型,于2023年11月发布。支持更真实、准确的图像生成,具有更强的细节表现力',
|
841
850
|
displayName: 'DALL·E 3',
|
842
851
|
id: 'dall-e-3',
|
852
|
+
parameters: {
|
853
|
+
prompt: { default: '' },
|
854
|
+
size: {
|
855
|
+
default: '1024x1024',
|
856
|
+
enum: ['1024x1024', '1792x1024', '1024x1792'],
|
857
|
+
},
|
858
|
+
},
|
843
859
|
pricing: {
|
844
860
|
hd: 0.08,
|
845
861
|
standard: 0.04,
|
@@ -851,21 +867,20 @@ export const openaiImageModels: AIImageModelCard[] = [
|
|
851
867
|
description: '第二代 DALL·E 模型,支持更真实、准确的图像生成,分辨率是第一代的4倍',
|
852
868
|
displayName: 'DALL·E 2',
|
853
869
|
id: 'dall-e-2',
|
870
|
+
parameters: {
|
871
|
+
imageUrl: { default: null },
|
872
|
+
prompt: { default: '' },
|
873
|
+
size: {
|
874
|
+
default: '1024x1024',
|
875
|
+
enum: ['256x256', '512x512', '1024x1024'],
|
876
|
+
},
|
877
|
+
},
|
854
878
|
pricing: {
|
855
879
|
input: 0.02, // $0.020 per image (1024×1024)
|
856
880
|
},
|
857
881
|
resolutions: ['256x256', '512x512', '1024x1024'],
|
858
882
|
type: 'image',
|
859
883
|
},
|
860
|
-
// https://platform.openai.com/docs/models/gpt-image-1
|
861
|
-
{
|
862
|
-
description: 'ChatGPT 原生多模态图片生成模型',
|
863
|
-
displayName: 'GPT Image 1',
|
864
|
-
enabled: true,
|
865
|
-
id: 'gpt-image-1',
|
866
|
-
parameters: gptImage1ParamsSchema,
|
867
|
-
type: 'image',
|
868
|
-
},
|
869
884
|
];
|
870
885
|
|
871
886
|
// GPT-4o 和 GPT-4o-mini 实时模型
|
@@ -43,6 +43,7 @@ export abstract class LobeOpenAICompatibleRuntime {
|
|
43
43
|
abstract client: OpenAI;
|
44
44
|
|
45
45
|
abstract chat(payload: ChatStreamPayload, options?: ChatMethodOptions): Promise<Response>;
|
46
|
+
abstract createImage(payload: CreateImagePayload): Promise<CreateImageResponse>;
|
46
47
|
|
47
48
|
abstract models(): Promise<ChatModelCard[]>;
|
48
49
|
|
@@ -12,7 +12,8 @@ export const LobeHunyuanAI = createOpenAICompatibleRuntime({
|
|
12
12
|
chatCompletion: {
|
13
13
|
handlePayload: (payload) => {
|
14
14
|
// eslint-disable-next-line unused-imports/no-unused-vars, @typescript-eslint/no-unused-vars
|
15
|
-
const { enabledSearch, frequency_penalty, model, presence_penalty, thinking, ...rest } =
|
15
|
+
const { enabledSearch, frequency_penalty, model, presence_penalty, thinking, ...rest } =
|
16
|
+
payload;
|
16
17
|
|
17
18
|
return {
|
18
19
|
...rest,
|
@@ -30,11 +31,8 @@ export const LobeHunyuanAI = createOpenAICompatibleRuntime({
|
|
30
31
|
search_info: true,
|
31
32
|
}),
|
32
33
|
...(model === 'hunyuan-a13b' && {
|
33
|
-
enable_thinking:
|
34
|
-
? true
|
35
|
-
: thinking?.type === 'disabled'
|
36
|
-
? false
|
37
|
-
: undefined
|
34
|
+
enable_thinking:
|
35
|
+
thinking?.type === 'enabled' ? true : thinking?.type === 'disabled' ? false : undefined,
|
38
36
|
}),
|
39
37
|
} as any;
|
40
38
|
},
|
@@ -11,6 +11,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
11
11
|
"id": "meta-llama/llama-3-8b-instruct",
|
12
12
|
"maxOutput": undefined,
|
13
13
|
"reasoning": false,
|
14
|
+
"type": "chat",
|
14
15
|
"vision": false,
|
15
16
|
},
|
16
17
|
{
|
@@ -22,6 +23,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
22
23
|
"id": "meta-llama/llama-3-70b-instruct",
|
23
24
|
"maxOutput": undefined,
|
24
25
|
"reasoning": false,
|
26
|
+
"type": "chat",
|
25
27
|
"vision": false,
|
26
28
|
},
|
27
29
|
{
|
@@ -33,6 +35,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
33
35
|
"id": "meta-llama/llama-3.1-8b-instruct",
|
34
36
|
"maxOutput": undefined,
|
35
37
|
"reasoning": false,
|
38
|
+
"type": "chat",
|
36
39
|
"vision": false,
|
37
40
|
},
|
38
41
|
{
|
@@ -44,6 +47,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
44
47
|
"id": "meta-llama/llama-3.1-70b-instruct",
|
45
48
|
"maxOutput": undefined,
|
46
49
|
"reasoning": false,
|
50
|
+
"type": "chat",
|
47
51
|
"vision": false,
|
48
52
|
},
|
49
53
|
{
|
@@ -55,6 +59,7 @@ exports[`NovitaAI > models > should get models 1`] = `
|
|
55
59
|
"id": "meta-llama/llama-3.1-405b-instruct",
|
56
60
|
"maxOutput": undefined,
|
57
61
|
"reasoning": false,
|
62
|
+
"type": "chat",
|
58
63
|
"vision": false,
|
59
64
|
},
|
60
65
|
{
|
@@ -67,6 +72,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
67
72
|
"id": "google/gemma-2-9b-it",
|
68
73
|
"maxOutput": undefined,
|
69
74
|
"reasoning": false,
|
75
|
+
"type": "chat",
|
70
76
|
"vision": false,
|
71
77
|
},
|
72
78
|
{
|
@@ -78,6 +84,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
78
84
|
"id": "jondurbin/airoboros-l2-70b",
|
79
85
|
"maxOutput": undefined,
|
80
86
|
"reasoning": false,
|
87
|
+
"type": "chat",
|
81
88
|
"vision": false,
|
82
89
|
},
|
83
90
|
{
|
@@ -89,6 +96,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
89
96
|
"id": "nousresearch/hermes-2-pro-llama-3-8b",
|
90
97
|
"maxOutput": undefined,
|
91
98
|
"reasoning": false,
|
99
|
+
"type": "chat",
|
92
100
|
"vision": false,
|
93
101
|
},
|
94
102
|
{
|
@@ -100,6 +108,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
100
108
|
"id": "mistralai/mistral-7b-instruct",
|
101
109
|
"maxOutput": undefined,
|
102
110
|
"reasoning": false,
|
111
|
+
"type": "chat",
|
103
112
|
"vision": false,
|
104
113
|
},
|
105
114
|
{
|
@@ -111,6 +120,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
111
120
|
"id": "cognitivecomputations/dolphin-mixtral-8x22b",
|
112
121
|
"maxOutput": undefined,
|
113
122
|
"reasoning": false,
|
123
|
+
"type": "chat",
|
114
124
|
"vision": false,
|
115
125
|
},
|
116
126
|
{
|
@@ -122,6 +132,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
122
132
|
"id": "sao10k/l3-70b-euryale-v2.1",
|
123
133
|
"maxOutput": undefined,
|
124
134
|
"reasoning": true,
|
135
|
+
"type": "chat",
|
125
136
|
"vision": false,
|
126
137
|
},
|
127
138
|
{
|
@@ -133,6 +144,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
133
144
|
"id": "sophosympatheia/midnight-rose-70b",
|
134
145
|
"maxOutput": undefined,
|
135
146
|
"reasoning": false,
|
147
|
+
"type": "chat",
|
136
148
|
"vision": false,
|
137
149
|
},
|
138
150
|
{
|
@@ -144,6 +156,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
144
156
|
"id": "gryphe/mythomax-l2-13b",
|
145
157
|
"maxOutput": undefined,
|
146
158
|
"reasoning": false,
|
159
|
+
"type": "chat",
|
147
160
|
"vision": false,
|
148
161
|
},
|
149
162
|
{
|
@@ -155,6 +168,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
155
168
|
"id": "nousresearch/nous-hermes-llama2-13b",
|
156
169
|
"maxOutput": undefined,
|
157
170
|
"reasoning": false,
|
171
|
+
"type": "chat",
|
158
172
|
"vision": false,
|
159
173
|
},
|
160
174
|
{
|
@@ -166,6 +180,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
166
180
|
"id": "Nous-Hermes-2-Mixtral-8x7B-DPO",
|
167
181
|
"maxOutput": undefined,
|
168
182
|
"reasoning": false,
|
183
|
+
"type": "chat",
|
169
184
|
"vision": false,
|
170
185
|
},
|
171
186
|
{
|
@@ -177,6 +192,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
177
192
|
"id": "lzlv_70b",
|
178
193
|
"maxOutput": undefined,
|
179
194
|
"reasoning": false,
|
195
|
+
"type": "chat",
|
180
196
|
"vision": false,
|
181
197
|
},
|
182
198
|
{
|
@@ -188,6 +204,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
188
204
|
"id": "teknium/openhermes-2.5-mistral-7b",
|
189
205
|
"maxOutput": undefined,
|
190
206
|
"reasoning": false,
|
207
|
+
"type": "chat",
|
191
208
|
"vision": false,
|
192
209
|
},
|
193
210
|
{
|
@@ -199,6 +216,7 @@ Designed for a wide variety of tasks, it empowers developers and researchers to
|
|
199
216
|
"id": "microsoft/wizardlm-2-8x22b",
|
200
217
|
"maxOutput": undefined,
|
201
218
|
"reasoning": false,
|
219
|
+
"type": "chat",
|
202
220
|
"vision": false,
|
203
221
|
},
|
204
222
|
]
|