@lobehub/chat 1.128.9 → 1.129.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.env.example +5 -0
  2. package/.github/workflows/sync-database-schema.yml +0 -3
  3. package/CHANGELOG.md +50 -0
  4. package/Dockerfile +3 -1
  5. package/Dockerfile.database +3 -1
  6. package/Dockerfile.pglite +3 -1
  7. package/changelog/v1.json +18 -0
  8. package/docs/development/database-schema.dbml +2 -2
  9. package/docs/self-hosting/environment-variables/model-provider.mdx +24 -0
  10. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +27 -1
  11. package/docs/usage/providers/vercel-ai-gateway.mdx +62 -0
  12. package/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx +61 -0
  13. package/next.config.ts +1 -46
  14. package/package.json +1 -2
  15. package/packages/agent-runtime/examples/tools-calling.ts +1 -1
  16. package/packages/const/src/layoutTokens.ts +1 -1
  17. package/packages/context-engine/src/base/BaseProcessor.ts +2 -1
  18. package/packages/database/migrations/0031_add_agent_index.sql +6 -2
  19. package/packages/database/migrations/0032_improve_agents_field.sql +6 -0
  20. package/packages/database/migrations/meta/0032_snapshot.json +6447 -0
  21. package/packages/database/migrations/meta/_journal.json +7 -0
  22. package/packages/database/src/core/migrations.json +14 -3
  23. package/packages/database/src/schemas/agent.ts +2 -2
  24. package/packages/database/src/server/models/__tests__/adapter.test.ts +1 -1
  25. package/packages/model-bank/package.json +2 -1
  26. package/packages/model-bank/src/aiModels/index.ts +3 -0
  27. package/packages/model-bank/src/aiModels/vercelaigateway.ts +1803 -0
  28. package/packages/model-runtime/src/const/modelProvider.ts +1 -0
  29. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +62 -0
  30. package/packages/model-runtime/src/runtimeMap.ts +2 -0
  31. package/packages/types/src/user/settings/keyVaults.ts +1 -0
  32. package/src/app/(backend)/webapi/chat/azureai/route.test.ts +25 -0
  33. package/src/app/(backend)/webapi/chat/azureai/route.ts +6 -0
  34. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/index.tsx +8 -1
  35. package/src/components/Error/index.tsx +3 -7
  36. package/src/config/modelProviders/index.ts +4 -0
  37. package/src/config/modelProviders/vercelaigateway.ts +21 -0
  38. package/src/envs/llm.ts +6 -0
  39. package/sentry.client.config.ts +0 -30
  40. package/sentry.edge.config.ts +0 -17
  41. package/sentry.server.config.ts +0 -19
  42. package/src/app/[variants]/global-error.tsx +0 -20
  43. package/src/components/Error/sentryCaptureException.ts +0 -9
@@ -55,6 +55,7 @@ export enum ModelProvider {
55
55
  Upstage = 'upstage',
56
56
  V0 = 'v0',
57
57
  VLLM = 'vllm',
58
+ VercelAIGateway = 'vercelaigateway',
58
59
  VertexAI = 'vertexai',
59
60
  Volcengine = 'volcengine',
60
61
  Wenxin = 'wenxin',
@@ -0,0 +1,62 @@
1
+ import { createOpenAICompatibleRuntime } from '../../core/openaiCompatibleFactory';
2
+ import { AgentRuntimeErrorType, ChatCompletionErrorPayload, ModelProvider } from '../../types';
3
+ import { processMultiProviderModelList } from '../../utils/modelParse';
4
+
5
+ export interface VercelAIGatewayModelCard {
6
+ id: string;
7
+ }
8
+
9
+ export const LobeVercelAIGatewayAI = createOpenAICompatibleRuntime({
10
+ baseURL: 'https://ai-gateway.vercel.sh/v1',
11
+ chatCompletion: {
12
+ handleError: (error: any): Omit<ChatCompletionErrorPayload, 'provider'> | undefined => {
13
+ let errorResponse: Response | undefined;
14
+ if (error instanceof Response) {
15
+ errorResponse = error;
16
+ } else if ('status' in (error as any)) {
17
+ errorResponse = error as Response;
18
+ }
19
+ if (errorResponse) {
20
+ if (errorResponse.status === 401) {
21
+ return {
22
+ error: errorResponse.status,
23
+ errorType: AgentRuntimeErrorType.InvalidProviderAPIKey,
24
+ };
25
+ }
26
+
27
+ if (errorResponse.status === 403) {
28
+ return {
29
+ error: errorResponse.status,
30
+ errorType: AgentRuntimeErrorType.ProviderBizError,
31
+ message:
32
+ 'Please check if your API key has sufficient balance or if you have access to the requested model.',
33
+ };
34
+ }
35
+ }
36
+ return {
37
+ error,
38
+ };
39
+ },
40
+ handlePayload: (payload) => {
41
+ const { model, ...rest } = payload;
42
+ return {
43
+ ...rest,
44
+ model,
45
+ } as any;
46
+ },
47
+ },
48
+ debug: {
49
+ chatCompletion: () => process.env.DEBUG_VERCELAIGATEWAY_CHAT_COMPLETION === '1',
50
+ },
51
+ errorType: {
52
+ bizError: AgentRuntimeErrorType.ProviderBizError,
53
+ invalidAPIKey: AgentRuntimeErrorType.InvalidProviderAPIKey,
54
+ },
55
+ models: async ({ client }) => {
56
+ const modelsPage = (await client.models.list()) as any;
57
+ const modelList: VercelAIGatewayModelCard[] = modelsPage.data;
58
+
59
+ return processMultiProviderModelList(modelList, 'vercelaigateway');
60
+ },
61
+ provider: ModelProvider.VercelAIGateway,
62
+ });
@@ -52,6 +52,7 @@ import { LobeTencentCloudAI } from './providers/tencentcloud';
52
52
  import { LobeTogetherAI } from './providers/togetherai';
53
53
  import { LobeUpstageAI } from './providers/upstage';
54
54
  import { LobeV0AI } from './providers/v0';
55
+ import { LobeVercelAIGatewayAI } from './providers/vercelaigateway';
55
56
  import { LobeVLLMAI } from './providers/vllm';
56
57
  import { LobeVolcengineAI } from './providers/volcengine';
57
58
  import { LobeWenxinAI } from './providers/wenxin';
@@ -115,6 +116,7 @@ export const providerRuntimeMap = {
115
116
  togetherai: LobeTogetherAI,
116
117
  upstage: LobeUpstageAI,
117
118
  v0: LobeV0AI,
119
+ vercelaigateway: LobeVercelAIGatewayAI,
118
120
  vllm: LobeVLLMAI,
119
121
  volcengine: LobeVolcengineAI,
120
122
  wenxin: LobeWenxinAI,
@@ -93,6 +93,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
93
93
  togetherai?: OpenAICompatibleKeyVault;
94
94
  upstage?: OpenAICompatibleKeyVault;
95
95
  v0?: OpenAICompatibleKeyVault;
96
+ vercelaigateway?: OpenAICompatibleKeyVault;
96
97
  vertexai?: OpenAICompatibleKeyVault;
97
98
  vllm?: OpenAICompatibleKeyVault;
98
99
  volcengine?: OpenAICompatibleKeyVault;
@@ -0,0 +1,25 @@
1
+ // @vitest-environment edge-runtime
2
+ import { describe, expect, it, vi } from 'vitest';
3
+
4
+ import { POST as UniverseRoute } from '../[provider]/route';
5
+ import { POST, runtime } from './route';
6
+
7
+ vi.mock('../[provider]/route', () => ({
8
+ POST: vi.fn().mockResolvedValue('mocked response'),
9
+ }));
10
+
11
+ describe('Configuration tests', () => {
12
+ it('should have runtime set to "edge"', () => {
13
+ expect(runtime).toBe('edge');
14
+ });
15
+ });
16
+
17
+ describe('Groq POST function tests', () => {
18
+ it('should call UniverseRoute with correct parameters', async () => {
19
+ const mockRequest = new Request('https://example.com', { method: 'POST' });
20
+ await POST(mockRequest);
21
+ expect(UniverseRoute).toHaveBeenCalledWith(mockRequest, {
22
+ params: Promise.resolve({ provider: 'azureai' }),
23
+ });
24
+ });
25
+ });
@@ -0,0 +1,6 @@
1
+ import { POST as UniverseRoute } from '../[provider]/route';
2
+
3
+ export const runtime = 'edge';
4
+
5
+ export const POST = async (req: Request) =>
6
+ UniverseRoute(req, { params: Promise.resolve({ provider: 'azureai' }) });
@@ -16,7 +16,14 @@ const Header = () => {
16
16
  <ChatHeader
17
17
  left={<Main />}
18
18
  right={<HeaderAction />}
19
- style={{ paddingInline: 8, position: 'initial', zIndex: 11 }}
19
+ style={{
20
+ height: 40,
21
+ maxHeight: 40,
22
+ minHeight: 40,
23
+ paddingInline: 8,
24
+ position: 'initial',
25
+ zIndex: 11,
26
+ }}
20
27
  />
21
28
  )
22
29
  );
@@ -2,26 +2,22 @@
2
2
 
3
3
  import { Button, FluentEmoji } from '@lobehub/ui';
4
4
  import Link from 'next/link';
5
- import { memo, useLayoutEffect } from 'react';
5
+ import { memo } from 'react';
6
6
  import { useTranslation } from 'react-i18next';
7
7
  import { Flexbox } from 'react-layout-kit';
8
8
 
9
9
  import { MAX_WIDTH } from '@/const/layoutTokens';
10
10
 
11
- import { type ErrorType, sentryCaptureException } from './sentryCaptureException';
11
+ export type ErrorType = Error & { digest?: string };
12
12
 
13
13
  interface ErrorCaptureProps {
14
14
  error: ErrorType;
15
15
  reset: () => void;
16
16
  }
17
17
 
18
- const ErrorCapture = memo<ErrorCaptureProps>(({ reset, error }) => {
18
+ const ErrorCapture = memo<ErrorCaptureProps>(({ reset }) => {
19
19
  const { t } = useTranslation('error');
20
20
 
21
- useLayoutEffect(() => {
22
- sentryCaptureException(error);
23
- }, [error]);
24
-
25
21
  return (
26
22
  <Flexbox align={'center'} justify={'center'} style={{ minHeight: '100%', width: '100%' }}>
27
23
  <h1
@@ -54,6 +54,7 @@ import TencentcloudProvider from './tencentcloud';
54
54
  import TogetherAIProvider from './togetherai';
55
55
  import UpstageProvider from './upstage';
56
56
  import V0Provider from './v0';
57
+ import VercelAIGatewayProvider from './vercelaigateway';
57
58
  import VertexAIProvider from './vertexai';
58
59
  import VLLMProvider from './vllm';
59
60
  import VolcengineProvider from './volcengine';
@@ -116,6 +117,7 @@ export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
116
117
  Search1APIProvider.chatModels,
117
118
  InfiniAIProvider.chatModels,
118
119
  QiniuProvider.chatModels,
120
+ VercelAIGatewayProvider.chatModels,
119
121
  ].flat();
120
122
 
121
123
  export const DEFAULT_MODEL_PROVIDER_LIST = [
@@ -181,6 +183,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
181
183
  QiniuProvider,
182
184
  NebiusProvider,
183
185
  CometAPIProvider,
186
+ VercelAIGatewayProvider,
184
187
  ];
185
188
 
186
189
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -247,6 +250,7 @@ export { default as TencentCloudProviderCard } from './tencentcloud';
247
250
  export { default as TogetherAIProviderCard } from './togetherai';
248
251
  export { default as UpstageProviderCard } from './upstage';
249
252
  export { default as V0ProviderCard } from './v0';
253
+ export { default as VercelAIGatewayProviderCard } from './vercelaigateway';
250
254
  export { default as VertexAIProviderCard } from './vertexai';
251
255
  export { default as VLLMProviderCard } from './vllm';
252
256
  export { default as VolcengineProviderCard } from './volcengine';
@@ -0,0 +1,21 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ const VercelAIGateway: ModelProviderCard = {
4
+ apiKeyUrl: 'https://vercel.com/dashboard/ai-gateway',
5
+ chatModels: [],
6
+ checkModel: 'openai/gpt-4o-mini',
7
+ description:
8
+ 'Vercel AI Gateway 提供统一的 API 来访问 100+ 模型,通过单一端点即可使用 OpenAI、Anthropic、Google 等多个提供商的模型。支持预算设置、使用监控、请求负载均衡和故障转移。',
9
+ enabled: true,
10
+ id: 'vercelaigateway',
11
+ modelList: { showModelFetcher: true },
12
+ modelsUrl: 'https://vercel.com/ai-gateway/models',
13
+ name: 'Vercel AI Gateway',
14
+ settings: {
15
+ responseAnimation: 'smooth',
16
+ showModelFetcher: true,
17
+ },
18
+ url: 'https://vercel.com/ai-gateway',
19
+ };
20
+
21
+ export default VercelAIGateway;
package/src/envs/llm.ts CHANGED
@@ -178,6 +178,9 @@ export const getLLMConfig = () => {
178
178
  ENABLED_V0: z.boolean(),
179
179
  V0_API_KEY: z.string().optional(),
180
180
 
181
+ ENABLED_VERCELAIGATEWAY: z.boolean(),
182
+ VERCELAIGATEWAY_API_KEY: z.string().optional(),
183
+
181
184
  ENABLED_AI302: z.boolean(),
182
185
  AI302_API_KEY: z.string().optional(),
183
186
 
@@ -366,6 +369,9 @@ export const getLLMConfig = () => {
366
369
  ENABLED_V0: !!process.env.V0_API_KEY,
367
370
  V0_API_KEY: process.env.V0_API_KEY,
368
371
 
372
+ ENABLED_VERCELAIGATEWAY: !!process.env.VERCELAIGATEWAY_API_KEY,
373
+ VERCELAIGATEWAY_API_KEY: process.env.VERCELAIGATEWAY_API_KEY,
374
+
369
375
  ENABLED_AI302: !!process.env.AI302_API_KEY,
370
376
  AI302_API_KEY: process.env.AI302_API_KEY,
371
377
 
@@ -1,30 +0,0 @@
1
- // This file configures the initialization of Sentry on the client.
2
- // The config you add here will be used whenever a users loads a page in their browser.
3
- // https://docs.sentry.io/platforms/javascript/guides/nextjs/
4
- import * as Sentry from '@sentry/nextjs';
5
-
6
- if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
7
- Sentry.init({
8
- // Setting this option to true will print useful information to the console while you're setting up Sentry.
9
- debug: false,
10
-
11
- dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
12
- // You can remove this option if you're not planning to use the Sentry Session Replay feature:
13
- integrations: [
14
- Sentry.replayIntegration({
15
- blockAllMedia: true,
16
- // Additional Replay configuration goes in here, for example:
17
- maskAllText: true,
18
- }),
19
- ],
20
-
21
- replaysOnErrorSampleRate: 1,
22
-
23
- // This sets the sample rate to be 10%. You may want this to be 100% while
24
- // in development and sample at a lower rate in production
25
- replaysSessionSampleRate: 0.1,
26
-
27
- // Adjust this value in production, or use tracesSampler for greater control
28
- tracesSampleRate: 1,
29
- });
30
- }
@@ -1,17 +0,0 @@
1
- // This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on).
2
- // The config you add here will be used whenever one of the edge features is loaded.
3
- // Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally.
4
- // https://docs.sentry.io/platforms/javascript/guides/nextjs/
5
- import * as Sentry from '@sentry/nextjs';
6
-
7
- if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
8
- Sentry.init({
9
- // Setting this option to true will print useful information to the console while you're setting up Sentry.
10
- debug: false,
11
-
12
- dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
13
-
14
- // Adjust this value in production, or use tracesSampler for greater control
15
- tracesSampleRate: 1,
16
- });
17
- }
@@ -1,19 +0,0 @@
1
- // This file configures the initialization of Sentry on the server.
2
- // The config you add here will be used whenever the server handles a request.
3
- // https://docs.sentry.io/platforms/javascript/guides/nextjs/
4
- import * as Sentry from '@sentry/nextjs';
5
-
6
- if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
7
- Sentry.init({
8
- // Setting this option to true will print useful information to the console while you're setting up Sentry.
9
- debug: false,
10
-
11
- dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
12
-
13
- // Adjust this value in production, or use tracesSampler for greater control
14
- tracesSampleRate: 1,
15
-
16
- // uncomment the line below to enable Spotlight (https://spotlightjs.com)
17
- // spotlight: process.env.NODE_ENV === 'development',
18
- });
19
- }
@@ -1,20 +0,0 @@
1
- 'use client';
2
-
3
- import Error from 'next/error';
4
- import { useLayoutEffect } from 'react';
5
-
6
- import { type ErrorType, sentryCaptureException } from '@/components/Error/sentryCaptureException';
7
-
8
- export default function GlobalError({ error }: { error: ErrorType; reset: () => void }) {
9
- useLayoutEffect(() => {
10
- sentryCaptureException(error);
11
- }, [error]);
12
-
13
- return (
14
- <html>
15
- <body>
16
- <Error statusCode={undefined as any} />
17
- </body>
18
- </html>
19
- );
20
- }
@@ -1,9 +0,0 @@
1
- import { appEnv } from '@/envs/app';
2
-
3
- export type ErrorType = Error & { digest?: string };
4
-
5
- export const sentryCaptureException = async (error: Error & { digest?: string }) => {
6
- if (!appEnv.NEXT_PUBLIC_ENABLE_SENTRY) return;
7
- const { captureException } = await import('@sentry/nextjs');
8
- return captureException(error);
9
- };