@lobehub/chat 1.124.1 → 1.124.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/.github/scripts/pr-comment.js +11 -2
  2. package/.github/workflows/desktop-pr-build.yml +86 -12
  3. package/.github/workflows/release-desktop-beta.yml +91 -20
  4. package/CHANGELOG.md +25 -0
  5. package/apps/desktop/electron-builder.js +8 -4
  6. package/changelog/v1.json +9 -0
  7. package/package.json +1 -1
  8. package/packages/const/src/hotkeys.ts +1 -1
  9. package/packages/const/src/index.ts +1 -0
  10. package/packages/const/src/settings/hotkey.ts +3 -2
  11. package/packages/const/src/trace.ts +1 -1
  12. package/packages/const/src/user.ts +1 -2
  13. package/packages/database/src/client/db.test.ts +19 -13
  14. package/packages/electron-server-ipc/src/ipcClient.test.ts +783 -1
  15. package/packages/file-loaders/src/loadFile.test.ts +61 -0
  16. package/packages/file-loaders/src/utils/isTextReadableFile.test.ts +43 -0
  17. package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
  18. package/packages/model-runtime/package.json +2 -1
  19. package/packages/model-runtime/src/ai21/index.test.ts +2 -2
  20. package/packages/model-runtime/src/ai360/index.test.ts +2 -2
  21. package/packages/model-runtime/src/akashchat/index.test.ts +19 -0
  22. package/packages/model-runtime/src/anthropic/index.test.ts +1 -2
  23. package/packages/model-runtime/src/baichuan/index.test.ts +1 -2
  24. package/packages/model-runtime/src/bedrock/index.test.ts +1 -2
  25. package/packages/model-runtime/src/bfl/createImage.test.ts +1 -2
  26. package/packages/model-runtime/src/bfl/index.test.ts +1 -2
  27. package/packages/model-runtime/src/cloudflare/index.test.ts +1 -2
  28. package/packages/model-runtime/src/cohere/index.test.ts +19 -0
  29. package/packages/model-runtime/src/deepseek/index.test.ts +2 -2
  30. package/packages/model-runtime/src/fireworksai/index.test.ts +2 -2
  31. package/packages/model-runtime/src/giteeai/index.test.ts +2 -2
  32. package/packages/model-runtime/src/github/index.test.ts +2 -2
  33. package/packages/model-runtime/src/google/createImage.test.ts +1 -2
  34. package/packages/model-runtime/src/google/index.test.ts +1 -1
  35. package/packages/model-runtime/src/groq/index.test.ts +2 -3
  36. package/packages/model-runtime/src/huggingface/index.test.ts +40 -0
  37. package/packages/model-runtime/src/hunyuan/index.test.ts +2 -3
  38. package/packages/model-runtime/src/internlm/index.test.ts +2 -2
  39. package/packages/model-runtime/src/jina/index.test.ts +19 -0
  40. package/packages/model-runtime/src/lmstudio/index.test.ts +2 -2
  41. package/packages/model-runtime/src/minimax/index.test.ts +19 -0
  42. package/packages/model-runtime/src/mistral/index.test.ts +2 -3
  43. package/packages/model-runtime/src/modelscope/index.test.ts +19 -0
  44. package/packages/model-runtime/src/moonshot/index.test.ts +1 -2
  45. package/packages/model-runtime/src/nebius/index.test.ts +19 -0
  46. package/packages/model-runtime/src/novita/index.test.ts +3 -4
  47. package/packages/model-runtime/src/nvidia/index.test.ts +19 -0
  48. package/packages/model-runtime/src/openrouter/index.test.ts +2 -3
  49. package/packages/model-runtime/src/perplexity/index.test.ts +2 -3
  50. package/packages/model-runtime/src/ppio/index.test.ts +3 -4
  51. package/packages/model-runtime/src/qwen/index.test.ts +2 -2
  52. package/packages/model-runtime/src/sambanova/index.test.ts +19 -0
  53. package/packages/model-runtime/src/search1api/index.test.ts +19 -0
  54. package/packages/model-runtime/src/sensenova/index.test.ts +2 -2
  55. package/packages/model-runtime/src/spark/index.test.ts +2 -2
  56. package/packages/model-runtime/src/stepfun/index.test.ts +2 -2
  57. package/packages/model-runtime/src/taichu/index.test.ts +4 -5
  58. package/packages/model-runtime/src/tencentcloud/index.test.ts +1 -1
  59. package/packages/model-runtime/src/togetherai/index.test.ts +1 -2
  60. package/packages/model-runtime/src/upstage/index.test.ts +1 -2
  61. package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +9 -7
  62. package/packages/model-runtime/src/utils/streams/anthropic.ts +2 -2
  63. package/packages/model-runtime/src/utils/streams/openai/openai.ts +20 -13
  64. package/packages/model-runtime/src/utils/streams/openai/responsesStream.test.ts +1 -2
  65. package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +2 -2
  66. package/packages/model-runtime/src/utils/streams/protocol.ts +2 -2
  67. package/packages/model-runtime/src/wenxin/index.test.ts +2 -3
  68. package/packages/model-runtime/src/xai/index.test.ts +2 -2
  69. package/packages/model-runtime/src/zeroone/index.test.ts +1 -2
  70. package/packages/model-runtime/src/zhipu/index.test.ts +2 -3
  71. package/packages/model-runtime/vitest.config.mts +0 -7
  72. package/packages/types/src/index.ts +2 -0
  73. package/packages/types/src/message/base.ts +1 -1
  74. package/packages/types/src/openai/chat.ts +2 -3
  75. package/packages/utils/package.json +2 -1
  76. package/packages/utils/src/_deprecated/parseModels.test.ts +1 -1
  77. package/packages/utils/src/_deprecated/parseModels.ts +1 -1
  78. package/packages/utils/src/client/topic.test.ts +1 -2
  79. package/packages/utils/src/client/topic.ts +1 -2
  80. package/packages/utils/src/electron/desktopRemoteRPCFetch.ts +1 -1
  81. package/packages/utils/src/fetch/fetchSSE.ts +7 -8
  82. package/packages/utils/src/fetch/parseError.ts +1 -3
  83. package/packages/utils/src/format.test.ts +1 -2
  84. package/packages/utils/src/index.ts +1 -0
  85. package/packages/utils/src/toolManifest.ts +1 -2
  86. package/packages/utils/src/trace.ts +1 -1
  87. package/packages/utils/vitest.config.mts +1 -1
  88. package/packages/web-crawler/src/__tests__/urlRules.test.ts +275 -0
  89. package/packages/web-crawler/src/crawImpl/__tests__/exa.test.ts +269 -0
  90. package/packages/web-crawler/src/crawImpl/__tests__/firecrawl.test.ts +284 -0
  91. package/packages/web-crawler/src/crawImpl/__tests__/naive.test.ts +234 -0
  92. package/packages/web-crawler/src/crawImpl/__tests__/tavily.test.ts +359 -0
  93. package/packages/web-crawler/src/utils/__tests__/errorType.test.ts +217 -0
  94. package/packages/web-crawler/vitest.config.mts +3 -0
  95. package/scripts/electronWorkflow/mergeMacReleaseFiles.ts +207 -0
  96. package/src/components/Thinking/index.tsx +2 -3
  97. package/src/features/ChatInput/StoreUpdater.tsx +2 -0
  98. package/src/libs/traces/index.ts +1 -1
  99. package/src/server/modules/ModelRuntime/trace.ts +1 -2
  100. package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +0 -113
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
+ import { ModelProvider } from '@lobechat/model-runtime';
2
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { ModelProvider } from '@/libs/model-runtime';
6
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
-
6
+ import { testProvider } from '../providerTestUtils';
8
7
  import models from './fixtures/models.json';
9
8
  import { LobeNovitaAI } from './index';
10
9
 
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeNvidiaAI } from './index';
6
+
7
+ const provider = ModelProvider.Nvidia;
8
+ const defaultBaseURL = 'https://integrate.api.nvidia.com/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeNvidiaAI,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_NVIDIA_CHAT_COMPLETION',
15
+ chatModel: 'meta/llama-3.1-8b-instruct',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
2
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
6
-
5
+ import { testProvider } from '../providerTestUtils';
7
6
  import models from './fixtures/models.json';
8
7
  import { LobeOpenRouterAI } from './index';
9
8
 
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
2
3
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { LobeOpenAICompatibleRuntime, ModelProvider } from '@/libs/model-runtime';
5
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
6
-
5
+ import { testProvider } from '../providerTestUtils';
7
6
  import { LobePerplexityAI } from './index';
8
7
 
9
8
  testProvider({
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
3
+ import { ModelProvider } from '@lobechat/model-runtime';
2
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
5
 
4
- import { LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
5
- import { ModelProvider } from '@/libs/model-runtime';
6
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
-
6
+ import { testProvider } from '../providerTestUtils';
8
7
  import models from './fixtures/models.json';
9
8
  import { LobePPIOAI } from './index';
10
9
 
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeQwenAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Qwen;
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeSambaNovaAI } from './index';
6
+
7
+ const provider = ModelProvider.SambaNova;
8
+ const defaultBaseURL = 'https://api.sambanova.ai/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeSambaNovaAI,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_SAMBANOVA_CHAT_COMPLETION',
15
+ chatModel: 'Meta-Llama-3.1-8B-Instruct',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -0,0 +1,19 @@
1
+ // @vitest-environment node
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
+
4
+ import { testProvider } from '../providerTestUtils';
5
+ import { LobeSearch1API } from './index';
6
+
7
+ const provider = ModelProvider.Search1API;
8
+ const defaultBaseURL = 'https://api.search1api.com/v1';
9
+
10
+ testProvider({
11
+ Runtime: LobeSearch1API,
12
+ provider,
13
+ defaultBaseURL,
14
+ chatDebugEnv: 'DEBUG_SEARCH1API_CHAT_COMPLETION',
15
+ chatModel: 'gpt-4o-mini',
16
+ test: {
17
+ skipAPICall: true,
18
+ },
19
+ });
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeSenseNovaAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.SenseNova;
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeSparkAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Spark;
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeStepfunAI } from './index';
6
6
 
7
7
  const provider = ModelProvider.Stepfun;
@@ -1,15 +1,14 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
-
5
2
  import {
6
3
  ChatStreamCallbacks,
7
4
  LobeMoonshotAI,
8
5
  LobeOpenAICompatibleRuntime,
9
6
  ModelProvider,
10
- } from '@/libs/model-runtime';
11
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
7
+ } from '@lobechat/model-runtime';
8
+ import OpenAI from 'openai';
9
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
12
10
 
11
+ import { testProvider } from '../providerTestUtils';
13
12
  import * as debugStreamModule from '../utils/debugStream';
14
13
  import { LobeTaichuAI } from './index';
15
14
 
@@ -1,5 +1,5 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
3
3
 
4
4
  import { testProvider } from '../providerTestUtils';
5
5
  import { LobeTencentCloudAI } from './index';
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment node
2
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
3
-
2
+ import { testProvider } from '../providerTestUtils';
4
3
  import { LobeTogetherAI } from './index';
5
4
 
6
5
  testProvider({
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment node
2
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
3
-
2
+ import { testProvider } from '../providerTestUtils';
4
3
  import { LobeUpstageAI } from './index';
5
4
 
6
5
  testProvider({
@@ -1,22 +1,24 @@
1
1
  // @vitest-environment node
2
- import OpenAI from 'openai';
3
- import type { Stream } from 'openai/streaming';
4
- import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
5
-
6
2
  import {
7
3
  AgentRuntimeErrorType,
8
4
  ChatStreamCallbacks,
9
5
  ChatStreamPayload,
10
6
  LobeOpenAICompatibleRuntime,
11
7
  ModelProvider,
12
- } from '@/libs/model-runtime';
13
- import officalOpenAIModels from '@/libs/model-runtime/openai/fixtures/openai-models.json';
14
- import { sleep } from '@/utils/sleep';
8
+ } from '@lobechat/model-runtime';
9
+ import OpenAI from 'openai';
10
+ import type { Stream } from 'openai/streaming';
11
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
15
12
 
16
13
  import * as debugStreamModule from '../debugStream';
17
14
  import * as openaiHelpers from '../openaiHelpers';
18
15
  import { createOpenAICompatibleRuntime } from './index';
19
16
 
17
+ const sleep = async (ms: number) =>
18
+ await new Promise((resolve) => {
19
+ setTimeout(resolve, ms);
20
+ });
21
+
20
22
  const provider = 'groq';
21
23
  const defaultBaseURL = 'https://api.groq.com/openai/v1';
22
24
  const bizErrorType = 'ProviderBizError';
@@ -1,7 +1,7 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
2
  import type { Stream } from '@anthropic-ai/sdk/streaming';
3
3
 
4
- import { CitationItem, ModelTokensUsage } from '@/types/message';
4
+ import { ChatCitationItem, ModelTokensUsage } from '@/types/message';
5
5
 
6
6
  import { ChatStreamCallbacks } from '../../types';
7
7
  import {
@@ -180,7 +180,7 @@ export const transformAnthropicStream = (
180
180
  context.returnedCitationArray.push({
181
181
  title: citations.title,
182
182
  url: citations.url,
183
- } as CitationItem);
183
+ } as ChatCitationItem);
184
184
  }
185
185
 
186
186
  return { data: null, id: context.id, type: 'text' };
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { Stream } from 'openai/streaming';
3
3
 
4
- import { ChatMessageError, CitationItem } from '@/types/message';
4
+ import { ChatCitationItem, ChatMessageError } from '@/types/message';
5
5
 
6
6
  import { ChatStreamCallbacks } from '../../../types';
7
7
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../../types/error';
@@ -21,24 +21,24 @@ import {
21
21
  } from '../protocol';
22
22
 
23
23
  // Process markdown base64 images: extract URLs and clean text in one pass
24
- const processMarkdownBase64Images = (text: string): { cleanedText: string, urls: string[]; } => {
24
+ const processMarkdownBase64Images = (text: string): { cleanedText: string; urls: string[] } => {
25
25
  if (!text) return { cleanedText: text, urls: [] };
26
-
26
+
27
27
  const urls: string[] = [];
28
28
  const mdRegex = /!\[[^\]]*]\(\s*(data:image\/[\d+.A-Za-z-]+;base64,[^\s)]+)\s*\)/g;
29
29
  let cleanedText = text;
30
30
  let m: RegExpExecArray | null;
31
-
31
+
32
32
  // Reset regex lastIndex to ensure we start from the beginning
33
33
  mdRegex.lastIndex = 0;
34
-
34
+
35
35
  while ((m = mdRegex.exec(text)) !== null) {
36
36
  if (m[1]) urls.push(m[1]);
37
37
  }
38
-
38
+
39
39
  // Remove all markdown base64 image segments
40
40
  cleanedText = text.replaceAll(mdRegex, '').trim();
41
-
41
+
42
42
  return { cleanedText, urls };
43
43
  };
44
44
 
@@ -159,14 +159,17 @@ const transformOpenAIStream = (
159
159
  return { data: null, id: chunk.id, type: 'text' };
160
160
  }
161
161
 
162
-
163
162
  const text = item.delta.content as string;
164
163
  const { urls: images, cleanedText: cleaned } = processMarkdownBase64Images(text);
165
164
  if (images.length > 0) {
166
165
  const arr: StreamProtocolChunk[] = [];
167
166
  if (cleaned) arr.push({ data: cleaned, id: chunk.id, type: 'text' });
168
167
  arr.push(
169
- ...images.map((url: string) => ({ data: url, id: chunk.id, type: 'base64_image' as const })),
168
+ ...images.map((url: string) => ({
169
+ data: url,
170
+ id: chunk.id,
171
+ type: 'base64_image' as const,
172
+ })),
170
173
  );
171
174
  return arr;
172
175
  }
@@ -187,7 +190,7 @@ const transformOpenAIStream = (
187
190
  ({
188
191
  title: item.url_citation.title,
189
192
  url: item.url_citation.url,
190
- }) as CitationItem,
193
+ }) as ChatCitationItem,
191
194
  ),
192
195
  },
193
196
  id: chunk.id,
@@ -209,7 +212,7 @@ const transformOpenAIStream = (
209
212
  ({
210
213
  title: item.url,
211
214
  url: item.url,
212
- }) as CitationItem,
215
+ }) as ChatCitationItem,
213
216
  ),
214
217
  },
215
218
  id: chunk.id,
@@ -236,7 +239,7 @@ const transformOpenAIStream = (
236
239
  ({
237
240
  title: item,
238
241
  url: item,
239
- }) as CitationItem,
242
+ }) as ChatCitationItem,
240
243
  ),
241
244
  },
242
245
  id: chunk.id,
@@ -348,7 +351,11 @@ const transformOpenAIStream = (
348
351
  const arr: StreamProtocolChunk[] = [];
349
352
  if (cleaned) arr.push({ data: cleaned, id: chunk.id, type: 'text' });
350
353
  arr.push(
351
- ...urls.map((url: string) => ({ data: url, id: chunk.id, type: 'base64_image' as const })),
354
+ ...urls.map((url: string) => ({
355
+ data: url,
356
+ id: chunk.id,
357
+ type: 'base64_image' as const,
358
+ })),
352
359
  );
353
360
  return arr;
354
361
  }
@@ -1,7 +1,6 @@
1
+ import { AgentRuntimeErrorType } from '@lobechat/model-runtime';
1
2
  import { describe, expect, it, vi } from 'vitest';
2
3
 
3
- import { AgentRuntimeErrorType } from '@/libs/model-runtime';
4
-
5
4
  import { FIRST_CHUNK_ERROR_KEY } from '../protocol';
6
5
  import { createReadableStream, readStreamChunk } from '../utils';
7
6
  import { OpenAIResponsesStream } from './responsesStream';
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { Stream } from 'openai/streaming';
3
3
 
4
- import { ChatMessageError, CitationItem } from '@/types/message';
4
+ import { ChatCitationItem, ChatMessageError } from '@/types/message';
5
5
 
6
6
  import { AgentRuntimeErrorType } from '../../../types/error';
7
7
  import { convertResponseUsage } from '../../usageConverter';
@@ -126,7 +126,7 @@ const transformOpenAIStream = (
126
126
  streamContext.returnedCitationArray.push({
127
127
  title: citations.title,
128
128
  url: citations.url,
129
- } as CitationItem);
129
+ } as ChatCitationItem);
130
130
  }
131
131
 
132
132
  return { data: null, id: chunk.item_id, type: 'text' };
@@ -1,4 +1,4 @@
1
- import { CitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
1
+ import { ChatCitationItem, ModelSpeed, ModelTokensUsage } from '@/types/message';
2
2
 
3
3
  import { parseToolCalls } from '../../helpers';
4
4
  import { ChatStreamCallbacks } from '../../types';
@@ -23,7 +23,7 @@ export interface StreamContext {
23
23
  * relevant to that specific portion of the generated content.
24
24
  * This array accumulates all citation items received during the streaming response.
25
25
  */
26
- returnedCitationArray?: CitationItem[];
26
+ returnedCitationArray?: ChatCitationItem[];
27
27
  /**
28
28
  * O series models need a condition to separate part
29
29
  */
@@ -1,9 +1,8 @@
1
1
  // @vitest-environment node
2
+ import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
2
3
  import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { LobeOpenAICompatibleRuntime, ModelProvider } from '@/libs/model-runtime';
5
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
6
-
5
+ import { testProvider } from '../providerTestUtils';
7
6
  import { LobeWenxinAI } from './index';
8
7
 
9
8
  testProvider({
@@ -1,7 +1,7 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from '@/libs/model-runtime';
3
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
2
+ import { ModelProvider } from '@lobechat/model-runtime';
4
3
 
4
+ import { testProvider } from '../providerTestUtils';
5
5
  import { LobeXAI } from './index';
6
6
 
7
7
  testProvider({
@@ -1,6 +1,5 @@
1
1
  // @vitest-environment node
2
- import { testProvider } from '@/libs/model-runtime/providerTestUtils';
3
-
2
+ import { testProvider } from '../providerTestUtils';
4
3
  import { LobeZeroOneAI } from './index';
5
4
 
6
5
  testProvider({
@@ -1,10 +1,9 @@
1
1
  // @vitest-environment node
2
+ import { ChatStreamCallbacks, LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
2
3
  import { OpenAI } from 'openai';
3
4
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
5
 
5
- import { ChatStreamCallbacks, LobeOpenAI, LobeOpenAICompatibleRuntime } from '@/libs/model-runtime';
6
- import * as debugStreamModule from '@/libs/model-runtime/utils/debugStream';
7
-
6
+ import * as debugStreamModule from '../utils/debugStream';
8
7
  import { LobeZhipuAI } from './index';
9
8
 
10
9
  const bizErrorType = 'ProviderBizError';
@@ -4,14 +4,7 @@ import { defineConfig } from 'vitest/config';
4
4
  export default defineConfig({
5
5
  test: {
6
6
  alias: {
7
- /* eslint-disable sort-keys-fix/sort-keys-fix */
8
- '@/libs/model-runtime': resolve(__dirname, './src'),
9
- '@/types': resolve(__dirname, '../types/src'),
10
- '@/utils/errorResponse': resolve(__dirname, '../../src/utils/errorResponse'),
11
- '@/utils': resolve(__dirname, '../utils/src'),
12
- '@/const': resolve(__dirname, '../const/src'),
13
7
  '@': resolve(__dirname, '../../src'),
14
- /* eslint-enable */
15
8
  },
16
9
  coverage: {
17
10
  reporter: ['text', 'json', 'lcov', 'text-summary'],
@@ -14,6 +14,7 @@ export * from './llm';
14
14
  export * from './message';
15
15
  export * from './meta';
16
16
  export * from './rag';
17
+ export * from './search';
17
18
  export * from './serverConfig';
18
19
  export * from './session';
19
20
  export * from './topic';
@@ -22,5 +23,6 @@ export * from './user/settings';
22
23
  // FIXME: I think we need a refactor for the "openai" types
23
24
  // it more likes the UI message payload
24
25
  export * from './openai/chat';
26
+ export * from './openai/plugin';
25
27
  export * from './trace';
26
28
  export * from './zustand';
@@ -16,7 +16,7 @@ export interface ChatMessageError {
16
16
  type: ErrorType | IPluginErrorType | ILobeAgentRuntimeErrorType;
17
17
  }
18
18
 
19
- export interface CitationItem {
19
+ export interface ChatCitationItem {
20
20
  id?: string;
21
21
  onlyUrl?: boolean;
22
22
  title?: string;
@@ -1,6 +1,5 @@
1
- import { LLMRoleType } from '@/types/llm';
2
- import { MessageToolCall } from '@/types/message';
3
-
1
+ import { LLMRoleType } from '../llm';
2
+ import { MessageToolCall } from '../message';
4
3
  import { OpenAIFunctionCall } from './functionCall';
5
4
 
6
5
  interface UserMessageContentPartText {
@@ -13,7 +13,8 @@
13
13
  },
14
14
  "dependencies": {
15
15
  "@lobechat/const": "workspace:*",
16
- "@lobechat/types": "workspace:*"
16
+ "@lobechat/types": "workspace:*",
17
+ "dayjs": "^1.11.18"
17
18
  },
18
19
  "devDependencies": {
19
20
  "vitest-canvas-mock": "^0.3.3"
@@ -1,7 +1,7 @@
1
+ import { ChatModelCard } from '@lobechat/types';
1
2
  import { describe, expect, it } from 'vitest';
2
3
 
3
4
  import { LOBE_DEFAULT_MODEL_LIST, OpenAIProviderCard } from '@/config/modelProviders';
4
- import { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  import { parseModelString, transformToChatModelCards } from './parseModels';
7
7
 
@@ -1,7 +1,7 @@
1
+ import { ChatModelCard } from '@lobechat/types';
1
2
  import { produce } from 'immer';
2
3
 
3
4
  import { LOBE_DEFAULT_MODEL_LIST } from '@/config/modelProviders';
4
- import { ChatModelCard } from '@/types/llm';
5
5
 
6
6
  /**
7
7
  * Parse model string to add or remove models.
@@ -1,8 +1,7 @@
1
+ import { ChatTopic } from '@lobechat/types';
1
2
  import dayjs from 'dayjs';
2
3
  import { afterAll, beforeAll, describe, expect, it, vi } from 'vitest';
3
4
 
4
- import { ChatTopic } from '@/types/topic';
5
-
6
5
  import { groupTopicsByTime } from './topic';
7
6
 
8
7
  // Mock current date to ensure consistent test results
@@ -1,9 +1,8 @@
1
+ import { ChatTopic, GroupedTopic, TimeGroupId } from '@lobechat/types';
1
2
  import dayjs from 'dayjs';
2
3
  import isToday from 'dayjs/plugin/isToday';
3
4
  import isYesterday from 'dayjs/plugin/isYesterday';
4
5
 
5
- import { ChatTopic, GroupedTopic, TimeGroupId } from '@/types/topic';
6
-
7
6
  // 初始化 dayjs 插件
8
7
  dayjs.extend(isToday);
9
8
  dayjs.extend(isYesterday);
@@ -1,7 +1,7 @@
1
+ import { isDesktop } from '@lobechat/const';
1
2
  import { ProxyTRPCRequestParams, dispatch, streamInvoke } from '@lobechat/electron-client-ipc';
2
3
  import debug from 'debug';
3
4
 
4
- import { isDesktop } from '@/const/version';
5
5
  import { getElectronStoreState } from '@/store/electron';
6
6
  import { electronSyncSelectors } from '@/store/electron/selectors';
7
7
  import { getRequestBody, headersToRecord } from '@/utils/fetch';
@@ -1,20 +1,19 @@
1
+ import { LOBE_CHAT_OBSERVATION_ID, LOBE_CHAT_TRACE_ID, MESSAGE_CANCEL_FLAT } from '@lobechat/const';
1
2
  import { parseToolCalls } from '@lobechat/model-runtime';
2
- import { ChatErrorType } from '@lobechat/types';
3
-
4
- import { MESSAGE_CANCEL_FLAT } from '@/const/message';
5
- import { LOBE_CHAT_OBSERVATION_ID, LOBE_CHAT_TRACE_ID } from '@/const/trace';
6
- import { ResponseAnimation, ResponseAnimationStyle } from '@lobechat/types';
7
3
  import {
4
+ ChatErrorType,
5
+ ChatImageChunk,
8
6
  ChatMessageError,
7
+ GroundingSearch,
9
8
  MessageToolCall,
10
9
  MessageToolCallChunk,
11
10
  MessageToolCallSchema,
12
11
  ModelReasoning,
13
12
  ModelSpeed,
14
13
  ModelTokensUsage,
15
- } from '@/types/message';
16
- import { ChatImageChunk } from '@/types/message/image';
17
- import { GroundingSearch } from '@/types/search';
14
+ ResponseAnimation,
15
+ ResponseAnimationStyle,
16
+ } from '@lobechat/types';
18
17
 
19
18
  import { nanoid } from '../uuid';
20
19
  import { fetchEventSource } from './fetchEventSource';
@@ -1,8 +1,6 @@
1
- import { ErrorResponse, ErrorType } from '@lobechat/types';
1
+ import { ChatMessageError, ErrorResponse, ErrorType } from '@lobechat/types';
2
2
  import { t } from 'i18next';
3
3
 
4
- import { ChatMessageError } from '@/types/message';
5
-
6
4
  export const getMessageError = async (response: Response) => {
7
5
  let chatMessageError: ChatMessageError;
8
6
 
@@ -1,8 +1,7 @@
1
+ import { USD_TO_CNY } from '@lobechat/const';
1
2
  import dayjs from 'dayjs';
2
3
  import { describe, expect, it } from 'vitest';
3
4
 
4
- import { USD_TO_CNY } from '@/const/currency';
5
-
6
5
  import {
7
6
  formatDate,
8
7
  formatIntergerNumber,
@@ -7,4 +7,5 @@ export * from './object';
7
7
  export * from './parseModels';
8
8
  export * from './pricing';
9
9
  export * from './safeParseJSON';
10
+ export * from './sleep';
10
11
  export * from './uuid';
@@ -1,9 +1,8 @@
1
+ import { ChatCompletionTool , OpenAIPluginManifest } from '@lobechat/types';
1
2
  import { LobeChatPluginManifest, pluginManifestSchema } from '@lobehub/chat-plugin-sdk';
2
3
  import { uniqBy } from 'lodash-es';
3
4
 
4
5
  import { API_ENDPOINTS } from '@/services/_url';
5
- import { ChatCompletionTool } from '@/types/openai/chat';
6
- import { OpenAIPluginManifest } from '@/types/openai/plugin';
7
6
  import { genToolCallingName } from '@/utils/toolCall';
8
7
 
9
8
  const fetchJSON = async <T = any>(url: string, proxy = false): Promise<T> => {
@@ -1,4 +1,4 @@
1
- import { LOBE_CHAT_TRACE_HEADER, LOBE_CHAT_TRACE_ID, TracePayload } from '@/const/trace';
1
+ import { LOBE_CHAT_TRACE_HEADER, LOBE_CHAT_TRACE_ID, TracePayload } from '@lobechat/const';
2
2
 
3
3
  export const getTracePayload = (req: Request): TracePayload | undefined => {
4
4
  const header = req.headers.get(LOBE_CHAT_TRACE_HEADER);