@lobehub/chat 1.40.4 → 1.42.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +52 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/self-hosting/advanced/auth/next-auth/wechat.mdx +46 -0
  4. package/docs/self-hosting/advanced/auth/next-auth/wechat.zh-CN.mdx +43 -0
  5. package/package.json +3 -3
  6. package/src/app/(backend)/webapi/assistant/store/route.ts +2 -11
  7. package/src/app/(main)/discover/(detail)/provider/[slug]/features/ProviderConfig.tsx +7 -4
  8. package/src/config/app.ts +4 -0
  9. package/src/config/modelProviders/spark.ts +3 -6
  10. package/src/features/MobileTabBar/index.tsx +3 -2
  11. package/src/features/User/UserAvatar.tsx +2 -2
  12. package/src/features/User/UserPanel/useMenu.tsx +5 -20
  13. package/src/hooks/useInterceptingRoutes.test.ts +2 -16
  14. package/src/hooks/useInterceptingRoutes.ts +2 -18
  15. package/src/libs/agent-runtime/qwen/index.test.ts +13 -188
  16. package/src/libs/agent-runtime/qwen/index.ts +47 -126
  17. package/src/libs/agent-runtime/spark/index.test.ts +24 -28
  18. package/src/libs/agent-runtime/spark/index.ts +4 -0
  19. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +131 -0
  20. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +14 -3
  21. package/src/libs/agent-runtime/utils/streams/index.ts +1 -0
  22. package/src/libs/agent-runtime/utils/streams/spark.test.ts +199 -0
  23. package/src/libs/agent-runtime/utils/streams/spark.ts +134 -0
  24. package/src/libs/next-auth/sso-providers/index.ts +2 -0
  25. package/src/libs/next-auth/sso-providers/wechat.ts +24 -0
  26. package/src/server/modules/AssistantStore/index.test.ts +5 -5
  27. package/src/server/modules/AssistantStore/index.ts +39 -1
  28. package/src/server/modules/EdgeConfig/index.ts +23 -0
  29. package/src/server/services/discover/index.ts +2 -13
  30. package/src/types/discover.ts +20 -0
  31. package/src/app/@modal/(.)settings/modal/index.tsx +0 -45
  32. package/src/app/@modal/(.)settings/modal/layout.tsx +0 -47
  33. package/src/app/@modal/(.)settings/modal/loading.tsx +0 -5
  34. package/src/app/@modal/(.)settings/modal/page.tsx +0 -19
@@ -1,10 +1,13 @@
1
1
  // @vitest-environment node
2
2
  import OpenAI from 'openai';
3
+ import type { Stream } from 'openai/streaming';
4
+
3
5
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
6
 
5
7
  import {
6
8
  AgentRuntimeErrorType,
7
9
  ChatStreamCallbacks,
10
+ ChatStreamPayload,
8
11
  LobeOpenAICompatibleRuntime,
9
12
  ModelProvider,
10
13
  } from '@/libs/agent-runtime';
@@ -797,6 +800,134 @@ describe('LobeOpenAICompatibleFactory', () => {
797
800
  });
798
801
  });
799
802
 
803
+ it('should use custom stream handler when provided', async () => {
804
+ // Create a custom stream handler that handles both ReadableStream and OpenAI Stream
805
+ const customStreamHandler = vi.fn((stream: ReadableStream | Stream<OpenAI.ChatCompletionChunk>) => {
806
+ const readableStream = stream instanceof ReadableStream ? stream : stream.toReadableStream();
807
+ return new ReadableStream({
808
+ start(controller) {
809
+ const reader = readableStream.getReader();
810
+ const process = async () => {
811
+ try {
812
+ while (true) {
813
+ const { done, value } = await reader.read();
814
+ if (done) break;
815
+ controller.enqueue(value);
816
+ }
817
+ } finally {
818
+ controller.close();
819
+ }
820
+ };
821
+ process();
822
+ },
823
+ });
824
+ });
825
+
826
+ const LobeMockProvider = LobeOpenAICompatibleFactory({
827
+ baseURL: 'https://api.test.com/v1',
828
+ chatCompletion: {
829
+ handleStream: customStreamHandler,
830
+ },
831
+ provider: ModelProvider.OpenAI,
832
+ });
833
+
834
+ const instance = new LobeMockProvider({ apiKey: 'test' });
835
+
836
+ // Create a mock stream
837
+ const mockStream = new ReadableStream({
838
+ start(controller) {
839
+ controller.enqueue({
840
+ id: 'test-id',
841
+ choices: [{ delta: { content: 'Hello' }, index: 0 }],
842
+ created: Date.now(),
843
+ model: 'test-model',
844
+ object: 'chat.completion.chunk',
845
+ });
846
+ controller.close();
847
+ },
848
+ });
849
+
850
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
851
+ tee: () => [mockStream, mockStream],
852
+ } as any);
853
+
854
+ const payload: ChatStreamPayload = {
855
+ messages: [{ content: 'Test', role: 'user' }],
856
+ model: 'test-model',
857
+ temperature: 0.7,
858
+ };
859
+
860
+ await instance.chat(payload);
861
+
862
+ expect(customStreamHandler).toHaveBeenCalled();
863
+ });
864
+
865
+ it('should use custom transform handler for non-streaming response', async () => {
866
+ const customTransformHandler = vi.fn((data: OpenAI.ChatCompletion): ReadableStream => {
867
+ return new ReadableStream({
868
+ start(controller) {
869
+ // Transform the completion to chunk format
870
+ controller.enqueue({
871
+ id: data.id,
872
+ choices: data.choices.map((choice) => ({
873
+ delta: { content: choice.message.content },
874
+ index: choice.index,
875
+ })),
876
+ created: data.created,
877
+ model: data.model,
878
+ object: 'chat.completion.chunk',
879
+ });
880
+ controller.close();
881
+ },
882
+ });
883
+ });
884
+
885
+ const LobeMockProvider = LobeOpenAICompatibleFactory({
886
+ baseURL: 'https://api.test.com/v1',
887
+ chatCompletion: {
888
+ handleTransformResponseToStream: customTransformHandler,
889
+ },
890
+ provider: ModelProvider.OpenAI,
891
+ });
892
+
893
+ const instance = new LobeMockProvider({ apiKey: 'test' });
894
+
895
+ const mockResponse: OpenAI.ChatCompletion = {
896
+ id: 'test-id',
897
+ choices: [
898
+ {
899
+ index: 0,
900
+ message: {
901
+ role: 'assistant',
902
+ content: 'Test response',
903
+ refusal: null
904
+ },
905
+ logprobs: null,
906
+ finish_reason: 'stop',
907
+ },
908
+ ],
909
+ created: Date.now(),
910
+ model: 'test-model',
911
+ object: 'chat.completion',
912
+ usage: { completion_tokens: 2, prompt_tokens: 1, total_tokens: 3 },
913
+ };
914
+
915
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
916
+ mockResponse as any,
917
+ );
918
+
919
+ const payload: ChatStreamPayload = {
920
+ messages: [{ content: 'Test', role: 'user' }],
921
+ model: 'test-model',
922
+ temperature: 0.7,
923
+ stream: false,
924
+ };
925
+
926
+ await instance.chat(payload);
927
+
928
+ expect(customTransformHandler).toHaveBeenCalledWith(mockResponse);
929
+ });
930
+
800
931
  describe('DEBUG', () => {
801
932
  it('should call debugStream and return StreamingTextResponse when DEBUG_OPENROUTER_CHAT_COMPLETION is 1', async () => {
802
933
  // Arrange
@@ -25,6 +25,7 @@ import { handleOpenAIError } from '../handleOpenAIError';
25
25
  import { convertOpenAIMessages } from '../openaiHelpers';
26
26
  import { StreamingResponse } from '../response';
27
27
  import { OpenAIStream, OpenAIStreamOptions } from '../streams';
28
+ import { ChatStreamCallbacks } from '../../types';
28
29
 
29
30
  // the model contains the following keywords is not a chat model, so we should filter them out
30
31
  export const CHAT_MODELS_BLOCK_LIST = [
@@ -62,10 +63,17 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
62
63
  payload: ChatStreamPayload,
63
64
  options: ConstructorOptions<T>,
64
65
  ) => OpenAI.ChatCompletionCreateParamsStreaming;
66
+ handleStream?: (
67
+ stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
68
+ callbacks?: ChatStreamCallbacks,
69
+ ) => ReadableStream;
65
70
  handleStreamBizErrorType?: (error: {
66
71
  message: string;
67
72
  name: string;
68
73
  }) => ILobeAgentRuntimeErrorType | undefined;
74
+ handleTransformResponseToStream?: (
75
+ data: OpenAI.ChatCompletion,
76
+ ) => ReadableStream<OpenAI.ChatCompletionChunk>;
69
77
  noUserId?: boolean;
70
78
  };
71
79
  constructorOptions?: ConstructorOptions<T>;
@@ -228,7 +236,8 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
228
236
  debugStream(useForDebugStream).catch(console.error);
229
237
  }
230
238
 
231
- return StreamingResponse(OpenAIStream(prod, streamOptions), {
239
+ const streamHandler = chatCompletion?.handleStream || OpenAIStream;
240
+ return StreamingResponse(streamHandler(prod, streamOptions), {
232
241
  headers: options?.headers,
233
242
  });
234
243
  }
@@ -239,9 +248,11 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
239
248
 
240
249
  if (responseMode === 'json') return Response.json(response);
241
250
 
242
- const stream = transformResponseToStream(response as unknown as OpenAI.ChatCompletion);
251
+ const transformHandler = chatCompletion?.handleTransformResponseToStream || transformResponseToStream;
252
+ const stream = transformHandler(response as unknown as OpenAI.ChatCompletion);
243
253
 
244
- return StreamingResponse(OpenAIStream(stream, streamOptions), {
254
+ const streamHandler = chatCompletion?.handleStream || OpenAIStream;
255
+ return StreamingResponse(streamHandler(stream, streamOptions), {
245
256
  headers: options?.headers,
246
257
  });
247
258
  } catch (error) {
@@ -7,3 +7,4 @@ export * from './ollama';
7
7
  export * from './openai';
8
8
  export * from './protocol';
9
9
  export * from './qwen';
10
+ export * from './spark';
@@ -0,0 +1,199 @@
1
+ import { beforeAll, describe, expect, it, vi } from 'vitest';
2
+ import { SparkAIStream, transformSparkResponseToStream } from './spark';
3
+ import type OpenAI from 'openai';
4
+
5
+ describe('SparkAIStream', () => {
6
+ beforeAll(() => {});
7
+
8
+ it('should transform non-streaming response to stream', async () => {
9
+ const mockResponse = {
10
+ id: "cha000ceba6@dx193d200b580b8f3532",
11
+ object: "chat.completion",
12
+ created: 1734395014,
13
+ model: "max-32k",
14
+ choices: [
15
+ {
16
+ message: {
17
+ role: "assistant",
18
+ content: "",
19
+ refusal: null,
20
+ tool_calls: {
21
+ type: "function",
22
+ function: {
23
+ arguments: '{"city":"Shanghai"}',
24
+ name: "realtime-weather____fetchCurrentWeather"
25
+ },
26
+ id: "call_1"
27
+ }
28
+ },
29
+ index: 0,
30
+ logprobs: null,
31
+ finish_reason: "tool_calls"
32
+ }
33
+ ],
34
+ usage: {
35
+ prompt_tokens: 8,
36
+ completion_tokens: 0,
37
+ total_tokens: 8
38
+ }
39
+ } as unknown as OpenAI.ChatCompletion;
40
+
41
+ const stream = transformSparkResponseToStream(mockResponse);
42
+ const decoder = new TextDecoder();
43
+ const chunks = [];
44
+
45
+ // @ts-ignore
46
+ for await (const chunk of stream) {
47
+ chunks.push(chunk);
48
+ }
49
+
50
+ expect(chunks).toHaveLength(2);
51
+ expect(chunks[0].choices[0].delta.tool_calls).toEqual([{
52
+ function: {
53
+ arguments: '{"city":"Shanghai"}',
54
+ name: "realtime-weather____fetchCurrentWeather"
55
+ },
56
+ id: "call_1",
57
+ index: 0,
58
+ type: "function"
59
+ }]);
60
+ expect(chunks[1].choices[0].finish_reason).toBeDefined();
61
+ });
62
+
63
+ it('should transform streaming response with tool calls', async () => {
64
+ const mockStream = new ReadableStream({
65
+ start(controller) {
66
+ controller.enqueue({
67
+ id: "cha000b0bf9@dx193d1ffa61cb894532",
68
+ object: "chat.completion.chunk",
69
+ created: 1734395014,
70
+ model: "max-32k",
71
+ choices: [
72
+ {
73
+ delta: {
74
+ role: "assistant",
75
+ content: "",
76
+ tool_calls: {
77
+ type: "function",
78
+ function: {
79
+ arguments: '{"city":"Shanghai"}',
80
+ name: "realtime-weather____fetchCurrentWeather"
81
+ },
82
+ id: "call_1"
83
+ }
84
+ },
85
+ index: 0
86
+ }
87
+ ]
88
+ } as unknown as OpenAI.ChatCompletionChunk);
89
+ controller.close();
90
+ }
91
+ });
92
+
93
+ const onToolCallMock = vi.fn();
94
+
95
+ const protocolStream = SparkAIStream(mockStream, {
96
+ onToolCall: onToolCallMock
97
+ });
98
+
99
+ const decoder = new TextDecoder();
100
+ const chunks = [];
101
+
102
+ // @ts-ignore
103
+ for await (const chunk of protocolStream) {
104
+ chunks.push(decoder.decode(chunk, { stream: true }));
105
+ }
106
+
107
+ expect(chunks).toEqual([
108
+ 'id: cha000b0bf9@dx193d1ffa61cb894532\n',
109
+ 'event: tool_calls\n',
110
+ `data: [{\"function\":{\"arguments\":\"{\\\"city\\\":\\\"Shanghai\\\"}\",\"name\":\"realtime-weather____fetchCurrentWeather\"},\"id\":\"call_1\",\"index\":0,\"type\":\"function\"}]\n\n`
111
+ ]);
112
+
113
+ expect(onToolCallMock).toHaveBeenCalledTimes(1);
114
+ });
115
+
116
+ it('should handle text content in stream', async () => {
117
+ const mockStream = new ReadableStream({
118
+ start(controller) {
119
+ controller.enqueue({
120
+ id: "test-id",
121
+ object: "chat.completion.chunk",
122
+ created: 1734395014,
123
+ model: "max-32k",
124
+ choices: [
125
+ {
126
+ delta: {
127
+ content: "Hello",
128
+ role: "assistant"
129
+ },
130
+ index: 0
131
+ }
132
+ ]
133
+ } as OpenAI.ChatCompletionChunk);
134
+ controller.enqueue({
135
+ id: "test-id",
136
+ object: "chat.completion.chunk",
137
+ created: 1734395014,
138
+ model: "max-32k",
139
+ choices: [
140
+ {
141
+ delta: {
142
+ content: " World",
143
+ role: "assistant"
144
+ },
145
+ index: 0
146
+ }
147
+ ]
148
+ } as OpenAI.ChatCompletionChunk);
149
+ controller.close();
150
+ }
151
+ });
152
+
153
+ const onTextMock = vi.fn();
154
+
155
+ const protocolStream = SparkAIStream(mockStream, {
156
+ onText: onTextMock
157
+ });
158
+
159
+ const decoder = new TextDecoder();
160
+ const chunks = [];
161
+
162
+ // @ts-ignore
163
+ for await (const chunk of protocolStream) {
164
+ chunks.push(decoder.decode(chunk, { stream: true }));
165
+ }
166
+
167
+ expect(chunks).toEqual([
168
+ 'id: test-id\n',
169
+ 'event: text\n',
170
+ 'data: "Hello"\n\n',
171
+ 'id: test-id\n',
172
+ 'event: text\n',
173
+ 'data: " World"\n\n'
174
+ ]);
175
+
176
+ expect(onTextMock).toHaveBeenNthCalledWith(1, '"Hello"');
177
+ expect(onTextMock).toHaveBeenNthCalledWith(2, '" World"');
178
+ });
179
+
180
+ it('should handle empty stream', async () => {
181
+ const mockStream = new ReadableStream({
182
+ start(controller) {
183
+ controller.close();
184
+ }
185
+ });
186
+
187
+ const protocolStream = SparkAIStream(mockStream);
188
+
189
+ const decoder = new TextDecoder();
190
+ const chunks = [];
191
+
192
+ // @ts-ignore
193
+ for await (const chunk of protocolStream) {
194
+ chunks.push(decoder.decode(chunk, { stream: true }));
195
+ }
196
+
197
+ expect(chunks).toEqual([]);
198
+ });
199
+ });
@@ -0,0 +1,134 @@
1
+ import OpenAI from 'openai';
2
+ import type { Stream } from 'openai/streaming';
3
+
4
+ import { ChatStreamCallbacks } from '../../types';
5
+ import {
6
+ StreamProtocolChunk,
7
+ StreamProtocolToolCallChunk,
8
+ convertIterableToStream,
9
+ createCallbacksTransformer,
10
+ createSSEProtocolTransformer,
11
+ generateToolCallId,
12
+ } from './protocol';
13
+
14
+ export function transformSparkResponseToStream(data: OpenAI.ChatCompletion) {
15
+ return new ReadableStream({
16
+ start(controller) {
17
+ const chunk: OpenAI.ChatCompletionChunk = {
18
+ choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => {
19
+ const toolCallsArray = choice.message.tool_calls
20
+ ? Array.isArray(choice.message.tool_calls)
21
+ ? choice.message.tool_calls
22
+ : [choice.message.tool_calls]
23
+ : []; // 如果不是数组,包装成数组
24
+
25
+ return {
26
+ delta: {
27
+ content: choice.message.content,
28
+ role: choice.message.role,
29
+ tool_calls: toolCallsArray.map(
30
+ (tool, index): OpenAI.ChatCompletionChunk.Choice.Delta.ToolCall => ({
31
+ function: tool.function,
32
+ id: tool.id,
33
+ index,
34
+ type: tool.type,
35
+ }),
36
+ ),
37
+ },
38
+ finish_reason: null,
39
+ index: choice.index,
40
+ logprobs: choice.logprobs,
41
+ };
42
+ }),
43
+ created: data.created,
44
+ id: data.id,
45
+ model: data.model,
46
+ object: 'chat.completion.chunk',
47
+ };
48
+
49
+ controller.enqueue(chunk);
50
+
51
+ controller.enqueue({
52
+ choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
53
+ delta: {
54
+ content: null,
55
+ role: choice.message.role,
56
+ },
57
+ finish_reason: choice.finish_reason,
58
+ index: choice.index,
59
+ logprobs: choice.logprobs,
60
+ })),
61
+ created: data.created,
62
+ id: data.id,
63
+ model: data.model,
64
+ object: 'chat.completion.chunk',
65
+ system_fingerprint: data.system_fingerprint,
66
+ } as OpenAI.ChatCompletionChunk);
67
+ controller.close();
68
+ },
69
+ });
70
+ }
71
+
72
+ export const transformSparkStream = (chunk: OpenAI.ChatCompletionChunk): StreamProtocolChunk => {
73
+ const item = chunk.choices[0];
74
+
75
+ if (!item) {
76
+ return { data: chunk, id: chunk.id, type: 'data' };
77
+ }
78
+
79
+ if (item.delta?.tool_calls) {
80
+ const toolCallsArray = Array.isArray(item.delta.tool_calls)
81
+ ? item.delta.tool_calls
82
+ : [item.delta.tool_calls]; // 如果不是数组,包装成数组
83
+
84
+ if (toolCallsArray.length > 0) {
85
+ return {
86
+ data: toolCallsArray.map((toolCall, index) => ({
87
+ function: toolCall.function,
88
+ id: toolCall.id || generateToolCallId(index, toolCall.function?.name),
89
+ index: typeof toolCall.index !== 'undefined' ? toolCall.index : index,
90
+ type: toolCall.type || 'function',
91
+ })),
92
+ id: chunk.id,
93
+ type: 'tool_calls',
94
+ } as StreamProtocolToolCallChunk;
95
+ }
96
+ }
97
+
98
+ if (item.finish_reason) {
99
+ // one-api 的流式接口,会出现既有 finish_reason ,也有 content 的情况
100
+ // {"id":"demo","model":"deepl-en","choices":[{"index":0,"delta":{"role":"assistant","content":"Introduce yourself."},"finish_reason":"stop"}]}
101
+
102
+ if (typeof item.delta?.content === 'string' && !!item.delta.content) {
103
+ return { data: item.delta.content, id: chunk.id, type: 'text' };
104
+ }
105
+
106
+ return { data: item.finish_reason, id: chunk.id, type: 'stop' };
107
+ }
108
+
109
+ if (typeof item.delta?.content === 'string') {
110
+ return { data: item.delta.content, id: chunk.id, type: 'text' };
111
+ }
112
+
113
+ if (item.delta?.content === null) {
114
+ return { data: item.delta, id: chunk.id, type: 'data' };
115
+ }
116
+
117
+ return {
118
+ data: { delta: item.delta, id: chunk.id, index: item.index },
119
+ id: chunk.id,
120
+ type: 'data',
121
+ };
122
+ };
123
+
124
+ export const SparkAIStream = (
125
+ stream: Stream<OpenAI.ChatCompletionChunk> | ReadableStream,
126
+ callbacks?: ChatStreamCallbacks,
127
+ ) => {
128
+ const readableStream =
129
+ stream instanceof ReadableStream ? stream : convertIterableToStream(stream);
130
+
131
+ return readableStream
132
+ .pipeThrough(createSSEProtocolTransformer(transformSparkStream))
133
+ .pipeThrough(createCallbacksTransformer(callbacks));
134
+ };
@@ -8,6 +8,7 @@ import GenericOIDC from './generic-oidc';
8
8
  import Github from './github';
9
9
  import Logto from './logto';
10
10
  import MicrosoftEntraID from './microsoft-entra-id';
11
+ import WeChat from './wechat';
11
12
  import Zitadel from './zitadel';
12
13
 
13
14
  export const ssoProviders = [
@@ -22,4 +23,5 @@ export const ssoProviders = [
22
23
  CloudflareZeroTrust,
23
24
  Casdoor,
24
25
  MicrosoftEntraID,
26
+ WeChat,
25
27
  ];
@@ -0,0 +1,24 @@
1
+ import WeChat from '@auth/core/providers/wechat';
2
+
3
+ import { CommonProviderConfig } from './sso.config';
4
+
5
+ const provider = {
6
+ id: 'wechat',
7
+ provider: WeChat({
8
+ ...CommonProviderConfig,
9
+ clientId: process.env.AUTH_WECHAT_ID,
10
+ clientSecret: process.env.AUTH_WECHAT_SECRET,
11
+ platformType: 'WebsiteApp',
12
+ profile: (profile) => {
13
+ return {
14
+ email: null,
15
+ id: profile.unionid,
16
+ image: profile.headimgurl,
17
+ name: profile.nickname,
18
+ providerAccountId: profile.unionid,
19
+ };
20
+ },
21
+ }),
22
+ };
23
+
24
+ export default provider;
@@ -7,19 +7,19 @@ const baseURL = 'https://registry.npmmirror.com/@lobehub/agents-index/v1/files/p
7
7
  describe('AssistantStore', () => {
8
8
  it('should return the default index URL when no language is provided', () => {
9
9
  const agentMarket = new AssistantStore();
10
- const url = agentMarket.getAgentIndexUrl();
10
+ const url = agentMarket['getAgentIndexUrl']();
11
11
  expect(url).toBe(`${baseURL}/index.en-US.json`);
12
12
  });
13
13
 
14
14
  it('should return the index URL for a not supported language', () => {
15
15
  const agentMarket = new AssistantStore();
16
- const url = agentMarket.getAgentIndexUrl('xxx' as any);
16
+ const url = agentMarket['getAgentIndexUrl']('xxx' as any);
17
17
  expect(url).toBe('https://registry.npmmirror.com/@lobehub/agents-index/v1/files/public');
18
18
  });
19
19
 
20
20
  it('should return the zh-CN URL for zh locale', () => {
21
21
  const agentMarket = new AssistantStore();
22
- const url = agentMarket.getAgentIndexUrl('zh' as any);
22
+ const url = agentMarket['getAgentIndexUrl']('zh' as any);
23
23
  expect(url).toBe(
24
24
  'https://registry.npmmirror.com/@lobehub/agents-index/v1/files/public/index.zh-CN.json',
25
25
  );
@@ -27,7 +27,7 @@ describe('AssistantStore', () => {
27
27
 
28
28
  it('should return the default URL for en locale', () => {
29
29
  const agentMarket = new AssistantStore();
30
- const url = agentMarket.getAgentIndexUrl('en' as any);
30
+ const url = agentMarket['getAgentIndexUrl']('en' as any);
31
31
  expect(url).toBe(
32
32
  'https://registry.npmmirror.com/@lobehub/agents-index/v1/files/public/index.en-US.json',
33
33
  );
@@ -35,7 +35,7 @@ describe('AssistantStore', () => {
35
35
 
36
36
  it('should return the base URL if the provided language is not supported', () => {
37
37
  const agentMarket = new AssistantStore();
38
- const url = agentMarket.getAgentIndexUrl('fr' as any);
38
+ const url = agentMarket['getAgentIndexUrl']('fr' as any);
39
39
  expect(url).toBe(baseURL);
40
40
  });
41
41
 
@@ -3,6 +3,8 @@ import urlJoin from 'url-join';
3
3
  import { appEnv } from '@/config/app';
4
4
  import { DEFAULT_LANG, isLocaleNotSupport } from '@/const/locale';
5
5
  import { Locales, normalizeLocale } from '@/locales/resources';
6
+ import { EdgeConfig } from '@/server/modules/EdgeConfig';
7
+ import { AgentStoreIndex } from '@/types/discover';
6
8
 
7
9
  export class AssistantStore {
8
10
  private readonly baseUrl: string;
@@ -11,7 +13,7 @@ export class AssistantStore {
11
13
  this.baseUrl = baseUrl || appEnv.AGENTS_INDEX_URL;
12
14
  }
13
15
 
14
- getAgentIndexUrl = (lang: Locales = DEFAULT_LANG) => {
16
+ private getAgentIndexUrl = (lang: Locales = DEFAULT_LANG) => {
15
17
  if (isLocaleNotSupport(lang)) return this.baseUrl;
16
18
 
17
19
  return urlJoin(this.baseUrl, `index.${normalizeLocale(lang)}.json`);
@@ -22,4 +24,40 @@ export class AssistantStore {
22
24
 
23
25
  return urlJoin(this.baseUrl, `${identifier}.${normalizeLocale(lang)}.json`);
24
26
  };
27
+
28
+ getAgentIndex = async (locale: Locales = DEFAULT_LANG, revalidate?: number) => {
29
+ try {
30
+ let res: Response;
31
+
32
+ res = await fetch(this.getAgentIndexUrl(locale as any), { next: { revalidate } });
33
+
34
+ if (res.status === 404) {
35
+ res = await fetch(this.getAgentIndexUrl(DEFAULT_LANG), { next: { revalidate } });
36
+ }
37
+
38
+ if (!res.ok) {
39
+ console.error('fetch agent index error:', await res.text());
40
+ return [];
41
+ }
42
+
43
+ const data: AgentStoreIndex = await res.json();
44
+
45
+ // Get the assistant whitelist from Edge Config
46
+ const edgeConfig = new EdgeConfig();
47
+
48
+ if (!!appEnv.VERCEL_EDGE_CONFIG) {
49
+ const assistantWhitelist = await edgeConfig.getAgentWhitelist();
50
+
51
+ if (assistantWhitelist && assistantWhitelist?.length > 0) {
52
+ data.agents = data.agents.filter((item) => assistantWhitelist.includes(item.identifier));
53
+ }
54
+ }
55
+
56
+ return data;
57
+ } catch (e) {
58
+ console.error('fetch agent index error:', e);
59
+
60
+ throw e;
61
+ }
62
+ };
25
63
  }