@lobehub/chat 1.128.9 → 1.129.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.env.example +5 -0
  2. package/.github/workflows/sync-database-schema.yml +0 -3
  3. package/CHANGELOG.md +50 -0
  4. package/Dockerfile +3 -1
  5. package/Dockerfile.database +3 -1
  6. package/Dockerfile.pglite +3 -1
  7. package/changelog/v1.json +18 -0
  8. package/docs/development/database-schema.dbml +2 -2
  9. package/docs/self-hosting/environment-variables/model-provider.mdx +24 -0
  10. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +27 -1
  11. package/docs/usage/providers/vercel-ai-gateway.mdx +62 -0
  12. package/docs/usage/providers/vercel-ai-gateway.zh-CN.mdx +61 -0
  13. package/next.config.ts +1 -46
  14. package/package.json +1 -2
  15. package/packages/agent-runtime/examples/tools-calling.ts +1 -1
  16. package/packages/const/src/layoutTokens.ts +1 -1
  17. package/packages/context-engine/src/base/BaseProcessor.ts +2 -1
  18. package/packages/database/migrations/0031_add_agent_index.sql +6 -2
  19. package/packages/database/migrations/0032_improve_agents_field.sql +6 -0
  20. package/packages/database/migrations/meta/0032_snapshot.json +6447 -0
  21. package/packages/database/migrations/meta/_journal.json +7 -0
  22. package/packages/database/src/core/migrations.json +14 -3
  23. package/packages/database/src/schemas/agent.ts +2 -2
  24. package/packages/database/src/server/models/__tests__/adapter.test.ts +1 -1
  25. package/packages/model-bank/package.json +2 -1
  26. package/packages/model-bank/src/aiModels/index.ts +3 -0
  27. package/packages/model-bank/src/aiModels/vercelaigateway.ts +1803 -0
  28. package/packages/model-runtime/src/const/modelProvider.ts +1 -0
  29. package/packages/model-runtime/src/providers/vercelaigateway/index.ts +62 -0
  30. package/packages/model-runtime/src/runtimeMap.ts +2 -0
  31. package/packages/types/src/user/settings/keyVaults.ts +1 -0
  32. package/src/app/(backend)/webapi/chat/azureai/route.test.ts +25 -0
  33. package/src/app/(backend)/webapi/chat/azureai/route.ts +6 -0
  34. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/index.tsx +8 -1
  35. package/src/components/Error/index.tsx +3 -7
  36. package/src/config/modelProviders/index.ts +4 -0
  37. package/src/config/modelProviders/vercelaigateway.ts +21 -0
  38. package/src/envs/llm.ts +6 -0
  39. package/sentry.client.config.ts +0 -30
  40. package/sentry.edge.config.ts +0 -17
  41. package/sentry.server.config.ts +0 -19
  42. package/src/app/[variants]/global-error.tsx +0 -20
  43. package/src/components/Error/sentryCaptureException.ts +0 -9
package/.env.example CHANGED
@@ -178,6 +178,11 @@ OPENAI_API_KEY=sk-xxxxxxxxx
178
178
  # NEWAPI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
179
179
  # NEWAPI_PROXY_URL=https://your-newapi-server.com
180
180
 
181
+ ### Vercel AI Gateway ###
182
+
183
+ # VERCELAIGATEWAY_API_KEY=your_vercel_ai_gateway_api_key
184
+
185
+
181
186
  ########################################
182
187
  ############ Market Service ############
183
188
  ########################################
@@ -21,9 +21,6 @@ jobs:
21
21
  - name: Install deps
22
22
  run: bun i
23
23
 
24
- - name: Check dbdocs
25
- run: dbdocs
26
-
27
24
  - name: sync database schema to dbdocs
28
25
  env:
29
26
  DBDOCS_TOKEN: ${{ secrets.DBDOCS_TOKEN }}
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 1.129.0](https://github.com/lobehub/lobe-chat/compare/v1.128.10...v1.129.0)
6
+
7
+ <sup>Released on **2025-09-16**</sup>
8
+
9
+ #### ✨ Features
10
+
11
+ - **misc**: Support Vercel AI Gateway provider.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's improved
19
+
20
+ - **misc**: Support Vercel AI Gateway provider, closes [#8883](https://github.com/lobehub/lobe-chat/issues/8883) ([5a4b0fd](https://github.com/lobehub/lobe-chat/commit/5a4b0fd))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.128.10](https://github.com/lobehub/lobe-chat/compare/v1.128.9...v1.128.10)
31
+
32
+ <sup>Released on **2025-09-16**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix azure ai runtime error.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Fix azure ai runtime error, closes [#9276](https://github.com/lobehub/lobe-chat/issues/9276) ([c21c14e](https://github.com/lobehub/lobe-chat/commit/c21c14e))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.128.9](https://github.com/lobehub/lobe-chat/compare/v1.128.8...v1.128.9)
6
56
 
7
57
  <sup>Released on **2025-09-15**</sup>
package/Dockerfile CHANGED
@@ -257,7 +257,9 @@ ENV \
257
257
  # FAL
258
258
  FAL_API_KEY="" FAL_MODEL_LIST="" \
259
259
  # BFL
260
- BFL_API_KEY="" BFL_MODEL_LIST=""
260
+ BFL_API_KEY="" BFL_MODEL_LIST="" \
261
+ # Vercel AI Gateway
262
+ VERCELAIGATEWAY_API_KEY="" VERCELAIGATEWAY_MODEL_LIST=""
261
263
 
262
264
  USER nextjs
263
265
 
@@ -299,7 +299,9 @@ ENV \
299
299
  # FAL
300
300
  FAL_API_KEY="" FAL_MODEL_LIST="" \
301
301
  # BFL
302
- BFL_API_KEY="" BFL_MODEL_LIST=""
302
+ BFL_API_KEY="" BFL_MODEL_LIST="" \
303
+ # Vercel AI Gateway
304
+ VERCELAIGATEWAY_API_KEY="" VERCELAIGATEWAY_MODEL_LIST=""
303
305
 
304
306
  USER nextjs
305
307
 
package/Dockerfile.pglite CHANGED
@@ -255,7 +255,9 @@ ENV \
255
255
  # FAL
256
256
  FAL_API_KEY="" FAL_MODEL_LIST="" \
257
257
  # BFL
258
- BFL_API_KEY="" BFL_MODEL_LIST=""
258
+ BFL_API_KEY="" BFL_MODEL_LIST="" \
259
+ # Vercel AI Gateway
260
+ VERCELAIGATEWAY_API_KEY="" VERCELAIGATEWAY_MODEL_LIST=""
259
261
 
260
262
  USER nextjs
261
263
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "features": [
5
+ "Support Vercel AI Gateway provider."
6
+ ]
7
+ },
8
+ "date": "2025-09-16",
9
+ "version": "1.129.0"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Fix azure ai runtime error."
15
+ ]
16
+ },
17
+ "date": "2025-09-16",
18
+ "version": "1.128.10"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
@@ -1,8 +1,8 @@
1
1
  table agents {
2
2
  id text [pk, not null]
3
3
  slug varchar(100) [unique]
4
- title text
5
- description text
4
+ title varchar(255)
5
+ description varchar(1000)
6
6
  tags jsonb [default: `[]`]
7
7
  avatar text
8
8
  background_color text
@@ -3,6 +3,7 @@ title: LobeChat Model Service Providers - Environment Variables and Configuratio
3
3
  description: >-
4
4
  Learn about the environment variables and configuration settings for various model service providers like OpenAI, Google AI, AWS Bedrock, Ollama, Perplexity AI, Anthropic AI, Mistral AI, Groq AI, OpenRouter AI, and 01.AI.
5
5
 
6
+
6
7
  tags:
7
8
  - Model Service Providers
8
9
  - Environment Variables
@@ -693,4 +694,27 @@ The above example disables all models first, then enables `flux-pro-1.1` and `fl
693
694
 
694
695
  NewAPI is a multi-provider model aggregation service that supports automatic model routing based on provider detection. It offers cost management features and provides a single endpoint for accessing models from multiple providers including OpenAI, Anthropic, Google, and more. Learn more about NewAPI at [https://github.com/Calcium-Ion/new-api](https://github.com/Calcium-Ion/new-api).
695
696
 
697
+ ## Vercel AI Gateway
698
+
699
+ ### `ENABLED_VERCELAIGATEWAY`
700
+
701
+ - Type: Optional
702
+ - Description: Enables Vercel AI Gateway as a model provider by default. Set to `0` to disable the Vercel AI Gateway service.
703
+ - Default: `1`
704
+ - Example: `0`
705
+
706
+ ### `VERCELAIGATEWAY_API_KEY`
707
+
708
+ - Type: Required
709
+ - Description: This is the API key you applied for in the Vercel AI Gateway service.
710
+ - Default: -
711
+ - Example: `vck_xxxxxx...xxxxxx`
712
+
713
+ ### `VERCELAIGATEWAY_MODEL_LIST`
714
+
715
+ - Type: Optional
716
+ - Description: Used to control the Vercel AI Gateway model list. Use `+` to add a model, `-` to hide a model, and `model_name=display_name` to customize the display name of a model. Separate multiple entries with commas. The definition syntax follows the same rules as other providers' model lists.
717
+ - Default: `-`
718
+ - Example: `-all,+vercel-model-1,+vercel-model-2=vercel-special`
719
+
696
720
  [model-list]: /docs/self-hosting/advanced/model-list
@@ -691,7 +691,33 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
691
691
  - 示例:`https://your-newapi-server.com`
692
692
 
693
693
  <Callout type={'info'}>
694
- NewAPI 是一个多供应商模型聚合服务,支持基于供应商检测的自动模型路由。它提供成本管理功能,并为访问包括 OpenAI、Anthropic、Google 等多个供应商的模型提供单一端点。了解更多关于 NewAPI 的信息请访问 [https://github.com/Calcium-Ion/new-api](https://github.com/Calcium-Ion/new-api)。
694
+ NewAPI
695
+ 是一个多供应商模型聚合服务,支持基于供应商检测的自动模型路由。它提供成本管理功能,并为访问包括
696
+ OpenAI、Anthropic、Google 等多个供应商的模型提供单一端点。了解更多关于 NewAPI 的信息请访问
697
+ [https://github.com/Calcium-Ion/new-api](https://github.com/Calcium-Ion/new-api)。
695
698
  </Callout>
696
699
 
700
+ ## Vercel AI Gateway
701
+
702
+ ### `ENABLED_VERCELAIGATEWAY`
703
+
704
+ - 类型:可选
705
+ - 描述:默认启用 Vercel AI Gateway 作为模型供应商,当设为 0 时关闭 Vercel AI Gateway 服务
706
+ - 默认值:`1`
707
+ - 示例:`0`
708
+
709
+ ### `VERCELAIGATEWAY_API_KEY`
710
+
711
+ - 类型:必选
712
+ - 描述:这是你在 Vercel AI Gateway 服务中申请的 API 密钥
713
+ - 默认值:-
714
+ - 示例:`vck_xxxxxx...xxxxxx`
715
+
716
+ ### `VERCELAIGATEWAY_MODEL_LIST`
717
+
718
+ - 类型:可选
719
+ - 描述:用来控制 Vercel AI Gateway 模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则与其他 provider 保持一致。
720
+ - 默认值:`-`
721
+ - 示例:`-all,+vercel-model-1,+vercel-model-2=vercel-special`
722
+
697
723
  [model-list]: /zh/docs/self-hosting/advanced/model-list
@@ -0,0 +1,62 @@
1
+ ---
2
+ title: Using Vercel AI Gateway in LobeChat
3
+ description: >-
4
+ Learn how to integrate and utilize Vercel AI Gateway's unified API in LobeChat.
5
+
6
+ tags:
7
+ - LobeChat
8
+ - Vercel AI Gateway
9
+ - API Key
10
+ - Web UI
11
+ ---
12
+
13
+ # Using Vercel AI Gateway in LobeChat
14
+
15
+ [Vercel AI Gateway](https://vercel.com/ai-gateway) is a unified API that provides access to 100+ AI models through a single endpoint. It offers features like budget management, usage monitoring, load balancing, and fallback handling.
16
+
17
+ This article will guide you on how to use Vercel AI Gateway in LobeChat.
18
+
19
+ <Steps>
20
+ ### Step 1: Create an API Key in Vercel AI Gateway
21
+
22
+ - Go to [Vercel Dashboard](https://vercel.com/dashboard)
23
+ - Click on the **AI Gateway** tab on the left side
24
+ - Click on **API keys** in the left sidebar
25
+ - Click **Create key** and then **Create key** in the dialog to complete
26
+
27
+ ### Step 2: Configure Vercel AI Gateway in LobeChat
28
+
29
+ - Go to the `Settings` page in LobeChat
30
+ - Under `AI Service Provider`, find the setting for `Vercel AI Gateway`
31
+ - Enter the API Key you obtained
32
+ - Choose a model from Vercel AI Gateway for your AI assistant to start the conversation
33
+
34
+ <Callout type={'warning'}>
35
+ During usage, you may need to pay the API service provider, so please refer to Vercel AI Gateway's
36
+ [pricing policy](https://vercel.com/docs/ai-gateway/models).
37
+ </Callout>
38
+ </Steps>
39
+
40
+ At this point, you can start chatting using the models provided by Vercel AI Gateway in LobeChat.
41
+
42
+ ## Model Selection
43
+
44
+ Vercel AI Gateway supports various model providers including:
45
+
46
+ - **OpenAI**: `openai/gpt-4o`, `openai/gpt-4o-mini`, `openai/o1`, etc.
47
+ - **Anthropic**: `anthropic/claude-3-5-sonnet`, `anthropic/claude-3-opus`, etc.
48
+ - **Google**: `google/gemini-2.5-pro`, `google/gemini-2.0-flash`, etc.
49
+ - **DeepSeek**: `deepseek/deepseek-chat`, `deepseek/deepseek-reasoner`, etc.
50
+ - And many more...
51
+
52
+ For a complete list of supported models, visit [Vercel AI Gateway Models](https://vercel.com/ai-gateway/models).
53
+
54
+ ## API Configuration
55
+
56
+ Vercel AI Gateway uses OpenAI-compatible API format. The base URL is:
57
+
58
+ ```
59
+ https://ai-gateway.vercel.sh/v1
60
+ ```
61
+
62
+ You can use any OpenAI-compatible client with this endpoint and your API key.
@@ -0,0 +1,61 @@
1
+ ---
2
+ title: 在 LobeChat 中使用 Vercel AI Gateway
3
+ description: 了解如何在 LobeChat 中集成和使用 Vercel AI Gateway 的统一 API
4
+
5
+ tags:
6
+ - LobeChat
7
+ - Vercel AI Gateway
8
+ - API 密钥
9
+ - Web 界面
10
+ ---
11
+
12
+ # 在 LobeChat 中使用 Vercel AI Gateway
13
+
14
+ [Vercel AI Gateway](https://vercel.com/ai-gateway) 是一个统一的 API,通过单一端点提供对 100+ AI 模型的访问。它提供预算管理、使用监控、负载均衡和回退处理等功能。
15
+
16
+ 本文将指导您如何在 LobeChat 中使用 Vercel AI Gateway。
17
+
18
+ <Steps>
19
+ ### 第一步:在 Vercel AI Gateway 中创建 API 密钥
20
+
21
+ - 访问 [Vercel 控制台](https://vercel.com/dashboard)
22
+ - 点击左侧的 **AI Gateway** 标签
23
+ - 点击左侧边栏的 **API 密钥**
24
+ - 点击 **创建密钥**,然后在对话框中点击 **创建密钥** 完成创建
25
+
26
+ ### 第二步:在 LobeChat 中配置 Vercel AI Gateway
27
+
28
+ - 进入 LobeChat 的 `设置` 页面
29
+ - 在 `AI 服务提供商` 下,找到 `Vercel AI Gateway` 设置
30
+ - 输入您获得的 API 密钥
31
+ - 选择 Vercel AI Gateway 的模型,开始与 AI 助手对话
32
+
33
+ <Callout type={'warning'}>
34
+ 使用过程中可能需要向 API 服务提供商付费,请参考 Vercel AI Gateway 的
35
+ [定价政策](https://vercel.com/docs/ai-gateway/models)。
36
+ </Callout>
37
+ </Steps>
38
+
39
+ 至此,您可以在 LobeChat 中使用 Vercel AI Gateway 提供的模型开始聊天了。
40
+
41
+ ## 模型选择
42
+
43
+ Vercel AI Gateway 支持多种模型提供商,包括:
44
+
45
+ - **OpenAI**: `openai/gpt-4o`、`openai/gpt-4o-mini`、`openai/o1` 等
46
+ - **Anthropic**: `anthropic/claude-3-5-sonnet`、`anthropic/claude-3-opus` 等
47
+ - **Google**: `google/gemini-2.5-pro`、`google/gemini-2.0-flash` 等
48
+ - **DeepSeek**: `deepseek/deepseek-chat`、`deepseek/deepseek-reasoner` 等
49
+ - 以及更多...
50
+
51
+ 如需查看完整的支持模型列表,请访问 [Vercel AI Gateway 模型](https://vercel.com/ai-gateway/models)。
52
+
53
+ ## API 配置
54
+
55
+ Vercel AI Gateway 使用 OpenAI 兼容的 API 格式。基础 URL 为:
56
+
57
+ ```
58
+ https://ai-gateway.vercel.sh/v1
59
+ ```
60
+
61
+ 您可以使用任何 OpenAI 兼容的客户端与此端点和您的 API 密钥一起使用。
package/next.config.ts CHANGED
@@ -1,5 +1,4 @@
1
1
  import analyzer from '@next/bundle-analyzer';
2
- import { withSentryConfig } from '@sentry/nextjs';
3
2
  import withSerwistInit from '@serwist/next';
4
3
  import type { NextConfig } from 'next';
5
4
  import ReactComponentName from 'react-scan/react-component-name/webpack';
@@ -326,48 +325,4 @@ const withPWA =
326
325
  })
327
326
  : noWrapper;
328
327
 
329
- const hasSentry = !!process.env.NEXT_PUBLIC_SENTRY_DSN;
330
- const withSentry =
331
- isProd && hasSentry
332
- ? (c: NextConfig) =>
333
- withSentryConfig(
334
- c,
335
- {
336
- org: process.env.SENTRY_ORG,
337
-
338
- project: process.env.SENTRY_PROJECT,
339
- // For all available options, see:
340
- // https://github.com/getsentry/sentry-webpack-plugin#options
341
- // Suppresses source map uploading logs during build
342
- silent: true,
343
- },
344
- {
345
- // Enables automatic instrumentation of Vercel Cron Monitors.
346
- // See the following for more information:
347
- // https://docs.sentry.io/product/crons/
348
- // https://vercel.com/docs/cron-jobs
349
- automaticVercelMonitors: true,
350
-
351
- // Automatically tree-shake Sentry logger statements to reduce bundle size
352
- disableLogger: true,
353
-
354
- // Hides source maps from generated client bundles
355
- hideSourceMaps: true,
356
-
357
- // Transpiles SDK to be compatible with IE11 (increases bundle size)
358
- transpileClientSDK: true,
359
-
360
- // Routes browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. (increases server load)
361
- // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
362
- // side errors will fail.
363
- tunnelRoute: '/monitoring',
364
-
365
- // For all available options, see:
366
- // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
367
- // Upload a larger set of source maps for prettier stack traces (increases build time)
368
- widenClientFileUpload: true,
369
- },
370
- )
371
- : noWrapper;
372
-
373
- export default withBundleAnalyzer(withPWA(withSentry(nextConfig) as NextConfig));
328
+ export default withBundleAnalyzer(withPWA(nextConfig as NextConfig));
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.128.9",
3
+ "version": "1.129.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -167,7 +167,6 @@
167
167
  "@neondatabase/serverless": "^1.0.1",
168
168
  "@next/third-parties": "^15.5.3",
169
169
  "@react-spring/web": "^9.7.5",
170
- "@sentry/nextjs": "^7.120.4",
171
170
  "@serwist/next": "^9.2.1",
172
171
  "@t3-oss/env-nextjs": "^0.13.8",
173
172
  "@tanstack/react-query": "^5.87.4",
@@ -27,7 +27,7 @@ async function* openaiRuntime(payload: any) {
27
27
 
28
28
  if (delta?.content) {
29
29
  content += delta.content;
30
- yield { content: delta.content };
30
+ yield { content };
31
31
  }
32
32
 
33
33
  if (delta?.tool_calls) {
@@ -21,7 +21,7 @@ export const FORM_STYLE: FormProps = {
21
21
  style: { maxWidth: MAX_WIDTH, width: '100%' },
22
22
  };
23
23
  export const MOBILE_HEADER_ICON_SIZE: ActionIconProps['size'] = { blockSize: 36, size: 22 };
24
- export const DESKTOP_HEADER_ICON_SIZE: ActionIconProps['size'] = { blockSize: 36, size: 22 };
24
+ export const DESKTOP_HEADER_ICON_SIZE: ActionIconProps['size'] = { blockSize: 32, size: 20 };
25
25
  export const HEADER_ICON_SIZE = (mobile?: boolean) =>
26
26
  mobile ? MOBILE_HEADER_ICON_SIZE : DESKTOP_HEADER_ICON_SIZE;
27
27
  export const PWA_INSTALL_ID = 'pwa-install';
@@ -9,6 +9,7 @@ export abstract class BaseProcessor implements ContextProcessor {
9
9
  abstract readonly name: string;
10
10
 
11
11
  // 为了兼容现有子类构造函数签名,保留参数但不做任何处理
12
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
12
13
  constructor(_options: ProcessorOptions = {}) {}
13
14
 
14
15
  /**
@@ -69,8 +70,8 @@ export abstract class BaseProcessor implements ContextProcessor {
69
70
  protected abort(context: PipelineContext, reason: string): PipelineContext {
70
71
  return {
71
72
  ...context,
72
- isAborted: true,
73
73
  abortReason: reason,
74
+ isAborted: true,
74
75
  };
75
76
  }
76
77
 
@@ -1,2 +1,6 @@
1
- CREATE INDEX "agents_title_idx" ON "agents" USING btree ("title");--> statement-breakpoint
2
- CREATE INDEX "agents_description_idx" ON "agents" USING btree ("description");
1
+ -- 将超过 1000 字符的 description 截断为 1000 字符
2
+ UPDATE agents
3
+ SET description = LEFT(description, 1000)
4
+ WHERE LENGTH(description) > 1000;--> statement-breakpoint
5
+ CREATE INDEX IF NOT EXISTS "agents_title_idx" ON "agents" USING btree ("title");--> statement-breakpoint
6
+ CREATE INDEX IF NOT EXISTS "agents_description_idx" ON "agents" USING btree ("description");
@@ -0,0 +1,6 @@
1
+ -- 将超过 255 字符的 title 截断为 255 字符
2
+ UPDATE agents
3
+ SET title = LEFT(title, 255)
4
+ WHERE LENGTH(title) > 255;--> statement-breakpoint
5
+ ALTER TABLE "agents" ALTER COLUMN "title" SET DATA TYPE varchar(255);--> statement-breakpoint
6
+ ALTER TABLE "agents" ALTER COLUMN "description" SET DATA TYPE varchar(1000);