@lobehub/lobehub 2.0.0-next.310 → 2.0.0-next.311

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/.github/PULL_REQUEST_TEMPLATE.md +1 -1
  2. package/CHANGELOG.md +25 -0
  3. package/changelog/v1.json +9 -0
  4. package/docs/development/basic/chat-api.mdx +0 -1
  5. package/docs/development/basic/chat-api.zh-CN.mdx +0 -1
  6. package/package.json +1 -1
  7. package/packages/model-runtime/src/core/BaseAI.ts +0 -2
  8. package/packages/model-runtime/src/core/ModelRuntime.test.ts +0 -37
  9. package/packages/model-runtime/src/core/ModelRuntime.ts +0 -5
  10. package/packages/model-runtime/src/core/RouterRuntime/baseRuntimeMap.ts +4 -0
  11. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.test.ts +325 -200
  12. package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +205 -64
  13. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +0 -14
  14. package/packages/model-runtime/src/providers/aihubmix/index.test.ts +14 -20
  15. package/packages/model-runtime/src/types/index.ts +0 -1
  16. package/packages/model-runtime/src/utils/createError.test.ts +0 -20
  17. package/packages/model-runtime/src/utils/createError.ts +0 -1
  18. package/src/app/(backend)/market/agent/[[...segments]]/route.ts +3 -33
  19. package/src/app/(backend)/market/oidc/[[...segments]]/route.ts +5 -6
  20. package/src/app/(backend)/market/social/[[...segments]]/route.ts +5 -52
  21. package/src/app/(backend)/market/user/[username]/route.ts +3 -9
  22. package/src/app/(backend)/market/user/me/route.ts +3 -34
  23. package/src/features/ChatMiniMap/useMinimapData.ts +1 -1
  24. package/src/features/Conversation/ChatList/components/VirtualizedList.tsx +20 -2
  25. package/src/features/Conversation/store/slices/virtuaList/action.ts +9 -0
  26. package/src/libs/trpc/lambda/middleware/marketSDK.ts +14 -23
  27. package/src/libs/trusted-client/index.ts +1 -1
  28. package/src/server/routers/lambda/market/index.ts +5 -0
  29. package/src/server/routers/lambda/market/oidc.ts +41 -61
  30. package/src/server/routers/tools/market.ts +12 -44
  31. package/src/server/services/agentRuntime/AgentRuntimeService.test.ts +7 -0
  32. package/src/server/services/agentRuntime/AgentRuntimeService.ts +1 -1
  33. package/src/server/services/aiAgent/__tests__/execAgent.threadId.test.ts +7 -0
  34. package/src/server/services/aiAgent/__tests__/execGroupSubAgentTask.test.ts +7 -0
  35. package/src/server/services/aiAgent/index.ts +9 -96
  36. package/src/server/services/discover/index.ts +11 -16
  37. package/src/server/services/market/index.ts +485 -0
  38. package/src/server/services/toolExecution/builtin.ts +11 -17
  39. package/src/server/services/toolExecution/index.ts +6 -2
  40. package/src/services/codeInterpreter.ts +0 -13
  41. package/packages/model-runtime/src/types/textToImage.ts +0 -36
  42. package/src/server/services/lobehubSkill/index.ts +0 -109
@@ -1,11 +1,14 @@
1
1
  /**
2
2
  * @see https://github.com/lobehub/lobe-chat/discussions/6563
3
3
  */
4
+ import type { GoogleGenAIOptions } from '@google/genai';
4
5
  import type { ChatModelCard } from '@lobechat/types';
6
+ import debug from 'debug';
5
7
  import OpenAI, { ClientOptions } from 'openai';
6
8
  import { Stream } from 'openai/streaming';
7
9
 
8
10
  import { LobeOpenAI } from '../../providers/openai';
11
+ import { LobeVertexAI } from '../../providers/vertexai';
9
12
  import {
10
13
  CreateImagePayload,
11
14
  CreateImageResponse,
@@ -20,19 +23,15 @@ import {
20
23
  ChatStreamPayload,
21
24
  EmbeddingsOptions,
22
25
  EmbeddingsPayload,
23
- TextToImagePayload,
24
26
  TextToSpeechPayload,
25
27
  } from '../../types';
26
28
  import { postProcessModelList } from '../../utils/postProcessModelList';
29
+ import { safeParseJSON } from '../../utils/safeParseJSON';
27
30
  import { LobeRuntimeAI } from '../BaseAI';
28
31
  import { CreateImageOptions, CustomClientOptions } from '../openaiCompatibleFactory';
29
32
  import { baseRuntimeMap } from './baseRuntimeMap';
30
33
 
31
- export interface RuntimeItem {
32
- id: string;
33
- models?: string[];
34
- runtime: LobeRuntimeAI;
35
- }
34
+ const log = debug('lobe-model-runtime:router-runtime');
36
35
 
37
36
  interface ProviderIniOptions extends Record<string, any> {
38
37
  accessKeyId?: string;
@@ -46,12 +45,25 @@ interface ProviderIniOptions extends Record<string, any> {
46
45
  sessionToken?: string;
47
46
  }
48
47
 
48
+ /**
49
+ * Router option item used for inference.
50
+ * When `options` is an array, items are tried in order for chat fallback.
51
+ * `apiType` allows switching provider when falling back.
52
+ */
53
+ interface RouterOptionItem extends ProviderIniOptions {
54
+ apiType?: keyof typeof baseRuntimeMap;
55
+ id?: string;
56
+ remark?: string;
57
+ }
58
+
59
+ type RouterOptions = RouterOptionItem | RouterOptionItem[];
60
+
49
61
  export type RuntimeClass = typeof LobeOpenAI;
50
62
 
51
63
  interface RouterInstance {
52
64
  apiType: keyof typeof baseRuntimeMap;
53
65
  models?: string[];
54
- options: ProviderIniOptions;
66
+ options: RouterOptions;
55
67
  runtime?: RuntimeClass;
56
68
  }
57
69
 
@@ -121,6 +133,14 @@ export interface CreateRouterRuntimeOptions<T extends Record<string, any> = any>
121
133
  routers: Routers;
122
134
  }
123
135
 
136
+ const formatErrorMessage = (error: unknown): string => {
137
+ if (error instanceof Error) {
138
+ return error.name ? `${error.name}: ${error.message}` : error.message;
139
+ }
140
+
141
+ return String(error);
142
+ };
143
+
124
144
  export const createRouterRuntime = ({
125
145
  id,
126
146
  routers,
@@ -163,61 +183,200 @@ export const createRouterRuntime = ({
163
183
  return resolvedRouters;
164
184
  }
165
185
 
166
- /**
167
- * Create runtime for inference requests (chat, generateObject, etc.)
168
- * Finds the router that matches the model, or uses the last router as fallback
169
- */
170
- private async createRuntimeForInference(model: string): Promise<RuntimeItem> {
186
+ private async resolveMatchedRouter(model: string): Promise<RouterInstance> {
171
187
  const resolvedRouters = await this.resolveRouters(model);
172
-
173
- const matchedRouter =
188
+ return (
174
189
  resolvedRouters.find((router) => {
175
190
  if (router.models && router.models.length > 0) {
176
191
  return router.models.includes(model);
177
192
  }
178
193
  return false;
179
- }) ?? resolvedRouters.at(-1)!;
194
+ }) ?? resolvedRouters.at(-1)!
195
+ );
196
+ }
197
+
198
+ private normalizeRouterOptions(router: RouterInstance): RouterOptionItem[] {
199
+ const routerOptions = Array.isArray(router.options) ? router.options : [router.options];
200
+
201
+ if (routerOptions.length === 0 || routerOptions.some((optionItem) => !optionItem)) {
202
+ throw new Error('empty provider options');
203
+ }
204
+
205
+ return routerOptions;
206
+ }
207
+
208
+ /**
209
+ * Build a runtime instance for a specific option item.
210
+ * Option items can override apiType to switch providers for fallback.
211
+ */
212
+ private createRuntimeFromOption(
213
+ router: RouterInstance,
214
+ optionItem: RouterOptionItem,
215
+ ): {
216
+ channelId?: string;
217
+ id: keyof typeof baseRuntimeMap;
218
+ remark?: string;
219
+ runtime: LobeRuntimeAI;
220
+ } {
221
+ const { apiType: optionApiType, id: channelId, remark, ...optionOverrides } = optionItem;
222
+ const resolvedApiType = optionApiType ?? router.apiType;
223
+ const finalOptions = { ...this._params, ...this._options, ...optionOverrides };
224
+
225
+ /**
226
+ * Vertex AI uses GoogleGenAI credentials flow rather than API keys.
227
+ * Accept JSON credentials in apiKey for compatibility with server config.
228
+ */
229
+ if (resolvedApiType === 'vertexai') {
230
+ const { apiKey, googleAuthOptions, project, location, ...restOptions } = finalOptions;
231
+ const credentials = safeParseJSON<Record<string, any>>(apiKey);
232
+ const vertexOptions: GoogleGenAIOptions = {
233
+ ...(restOptions as GoogleGenAIOptions),
234
+ vertexai: true,
235
+ };
236
+
237
+ if (googleAuthOptions) {
238
+ vertexOptions.googleAuthOptions = googleAuthOptions;
239
+ } else if (credentials) {
240
+ vertexOptions.googleAuthOptions = { credentials };
241
+ }
242
+
243
+ if (project) vertexOptions.project = project;
244
+ if (location) vertexOptions.location = location as GoogleGenAIOptions['location'];
245
+
246
+ return {
247
+ channelId,
248
+ id: resolvedApiType,
249
+ remark,
250
+ runtime: LobeVertexAI.initFromVertexAI(vertexOptions),
251
+ };
252
+ }
180
253
 
181
254
  const providerAI =
182
- matchedRouter.runtime ?? baseRuntimeMap[matchedRouter.apiType] ?? LobeOpenAI;
183
- const finalOptions = { ...this._params, ...this._options, ...matchedRouter.options };
255
+ resolvedApiType === router.apiType
256
+ ? (router.runtime ?? baseRuntimeMap[resolvedApiType] ?? LobeOpenAI)
257
+ : (baseRuntimeMap[resolvedApiType] ?? LobeOpenAI);
184
258
  const runtime: LobeRuntimeAI = new providerAI({ ...finalOptions, id: this._id });
185
259
 
186
260
  return {
187
- id: matchedRouter.apiType,
188
- models: matchedRouter.models,
261
+ channelId,
262
+ id: resolvedApiType,
263
+ remark,
189
264
  runtime,
190
265
  };
191
266
  }
192
267
 
193
- /**
194
- * Create all runtimes for listing models
195
- */
196
- private async createRuntimes(): Promise<RuntimeItem[]> {
268
+ private async runWithFallback<T>(
269
+ model: string,
270
+ requestHandler: (runtime: LobeRuntimeAI) => Promise<T>,
271
+ ): Promise<T> {
272
+ const matchedRouter = await this.resolveMatchedRouter(model);
273
+ const routerOptions = this.normalizeRouterOptions(matchedRouter);
274
+ const totalOptions = routerOptions.length;
275
+
276
+ log(
277
+ 'resolve router for model=%s apiType=%s options=%d',
278
+ model,
279
+ matchedRouter.apiType,
280
+ totalOptions,
281
+ );
282
+
283
+ let lastError: unknown;
284
+
285
+ for (const [index, optionItem] of routerOptions.entries()) {
286
+ const attempt = index + 1;
287
+ const {
288
+ channelId,
289
+ id: resolvedApiType,
290
+ remark,
291
+ runtime,
292
+ } = this.createRuntimeFromOption(matchedRouter, optionItem);
293
+
294
+ try {
295
+ const result = await requestHandler(runtime);
296
+
297
+ if (totalOptions > 1 && attempt > 1) {
298
+ log(
299
+ 'fallback success for model=%s attempt=%d/%d apiType=%s channelId=%s remark=%s',
300
+ model,
301
+ attempt,
302
+ totalOptions,
303
+ resolvedApiType,
304
+ channelId ?? '',
305
+ remark ?? '',
306
+ );
307
+ }
308
+
309
+ return result;
310
+ } catch (error) {
311
+ lastError = error;
312
+
313
+ const message = formatErrorMessage(error);
314
+ if (attempt < totalOptions) {
315
+ log(
316
+ 'attempt failed, fallback to next: model=%s attempt=%d/%d apiType=%s channelId=%s remark=%s error=%s',
317
+ model,
318
+ attempt,
319
+ totalOptions,
320
+ resolvedApiType,
321
+ channelId ?? '',
322
+ remark ?? '',
323
+ message,
324
+ );
325
+ } else {
326
+ log(
327
+ 'attempt failed, no more fallbacks: model=%s attempt=%d/%d apiType=%s channelId=%s remark=%s error=%s',
328
+ model,
329
+ attempt,
330
+ totalOptions,
331
+ resolvedApiType,
332
+ channelId ?? '',
333
+ remark ?? '',
334
+ message,
335
+ );
336
+ }
337
+ }
338
+ }
339
+
340
+ throw lastError ?? new Error('empty provider options');
341
+ }
342
+
343
+ async models() {
197
344
  const resolvedRouters = await this.resolveRouters();
198
- return resolvedRouters.map((router) => {
199
- const providerAI = router.runtime ?? baseRuntimeMap[router.apiType] ?? LobeOpenAI;
200
- const finalOptions = { ...this._params, ...this._options, ...router.options };
201
- const runtime: LobeRuntimeAI = new providerAI({ ...finalOptions, id: this._id });
345
+ const runtimes = resolvedRouters.map((router) => {
346
+ const routerOptions = this.normalizeRouterOptions(router);
347
+ const { id: resolvedApiType, runtime } = this.createRuntimeFromOption(
348
+ router,
349
+ routerOptions[0],
350
+ );
202
351
 
203
352
  return {
204
- id: router.apiType,
353
+ id: resolvedApiType,
205
354
  models: router.models,
206
355
  runtime,
207
356
  };
208
357
  });
209
- }
210
358
 
211
- // Check if it can match a specific model, otherwise default to using the last runtime
212
- async getRuntimeByModel(model: string) {
213
- const runtimeItem = await this.createRuntimeForInference(model);
214
- return runtimeItem.runtime;
359
+ if (modelsOption && typeof modelsOption === 'function') {
360
+ // If it's a functional configuration, use the last runtime's client to call the function
361
+ const lastRuntime = runtimes.at(-1)?.runtime;
362
+ if (lastRuntime && 'client' in lastRuntime) {
363
+ const modelList = await modelsOption({ client: (lastRuntime as any).client });
364
+ return await postProcessModelList(modelList);
365
+ }
366
+ }
367
+
368
+ return runtimes.at(-1)?.runtime.models?.();
215
369
  }
216
370
 
371
+ /**
372
+ * Try router options in order for chat requests.
373
+ * When options is an array, fall back to the next item on failure.
374
+ */
217
375
  async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
218
376
  try {
219
- const runtime = await this.getRuntimeByModel(payload.model);
220
- return await runtime.chat!(payload, options);
377
+ return await this.runWithFallback(payload.model, (runtime) =>
378
+ runtime.chat!(payload, options),
379
+ );
221
380
  } catch (e) {
222
381
  if (params.chatCompletion?.handleError) {
223
382
  const error = params.chatCompletion.handleError(e, this._options);
@@ -231,44 +390,26 @@ export const createRouterRuntime = ({
231
390
  }
232
391
  }
233
392
 
234
- async generateObject(payload: GenerateObjectPayload, options?: GenerateObjectOptions) {
235
- const runtime = await this.getRuntimeByModel(payload.model);
236
- return runtime.generateObject!(payload, options);
237
- }
238
-
239
393
  async createImage(payload: CreateImagePayload) {
240
- const runtime = await this.getRuntimeByModel(payload.model);
241
- return runtime.createImage!(payload);
394
+ return this.runWithFallback(payload.model, (runtime) => runtime.createImage!(payload));
242
395
  }
243
396
 
244
- async textToImage(payload: TextToImagePayload) {
245
- const runtime = await this.getRuntimeByModel(payload.model);
246
- return runtime.textToImage!(payload);
247
- }
248
-
249
- async models() {
250
- if (modelsOption && typeof modelsOption === 'function') {
251
- const runtimes = await this.createRuntimes();
252
- // If it's a functional configuration, use the last runtime's client to call the function
253
- const lastRuntime = runtimes.at(-1)?.runtime;
254
- if (lastRuntime && 'client' in lastRuntime) {
255
- const modelList = await modelsOption({ client: (lastRuntime as any).client });
256
- return await postProcessModelList(modelList);
257
- }
258
- }
259
-
260
- const runtimes = await this.createRuntimes();
261
- return runtimes.at(-1)?.runtime.models?.();
397
+ async generateObject(payload: GenerateObjectPayload, options?: GenerateObjectOptions) {
398
+ return this.runWithFallback(payload.model, (runtime) =>
399
+ runtime.generateObject!(payload, options),
400
+ );
262
401
  }
263
402
 
264
403
  async embeddings(payload: EmbeddingsPayload, options?: EmbeddingsOptions) {
265
- const runtime = await this.getRuntimeByModel(payload.model);
266
- return runtime.embeddings!(payload, options);
404
+ return this.runWithFallback(payload.model, (runtime) =>
405
+ runtime.embeddings!(payload, options),
406
+ );
267
407
  }
268
408
 
269
409
  async textToSpeech(payload: TextToSpeechPayload, options?: EmbeddingsOptions) {
270
- const runtime = await this.getRuntimeByModel(payload.model);
271
- return runtime.textToSpeech!(payload, options);
410
+ return this.runWithFallback(payload.model, (runtime) =>
411
+ runtime.textToSpeech!(payload, options),
412
+ );
272
413
  }
273
414
  };
274
415
  };
@@ -19,7 +19,6 @@ import {
19
19
  EmbeddingsPayload,
20
20
  GenerateObjectOptions,
21
21
  GenerateObjectPayload,
22
- TextToImagePayload,
23
22
  TextToSpeechOptions,
24
23
  TextToSpeechPayload,
25
24
  } from '../../types';
@@ -736,19 +735,6 @@ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = an
736
735
  }
737
736
  }
738
737
 
739
- async textToImage(payload: TextToImagePayload) {
740
- const log = debug(`${this.logPrefix}:textToImage`);
741
- log('textToImage called with prompt length: %d', payload.prompt?.length || 0);
742
-
743
- try {
744
- const res = await this.client.images.generate(payload);
745
- log('generated %d images', res.data?.length || 0);
746
- return (res.data || []).map((o) => o.url) as string[];
747
- } catch (error) {
748
- throw this.handleError(error);
749
- }
750
- }
751
-
752
738
  async textToSpeech(payload: TextToSpeechPayload, options?: TextToSpeechOptions) {
753
739
  const log = debug(`${this.logPrefix}:textToSpeech`);
754
740
  log(
@@ -1,5 +1,4 @@
1
1
  // @vitest-environment node
2
- import { ModelProvider } from 'model-bank';
3
2
  import { beforeEach, describe, expect, it, vi } from 'vitest';
4
3
 
5
4
  import { LobeAiHubMixAI } from './index';
@@ -25,9 +24,7 @@ describe('LobeAiHubMixAI', () => {
25
24
 
26
25
  describe('chat', () => {
27
26
  it('should support chat method', async () => {
28
- vi.spyOn(instance as any, 'getRuntimeByModel').mockResolvedValue({
29
- chat: vi.fn().mockResolvedValue(new Response()),
30
- });
27
+ vi.spyOn(instance as any, 'runWithFallback').mockResolvedValue(new Response());
31
28
 
32
29
  const payload = {
33
30
  messages: [{ content: 'Hello', role: 'user' as const }],
@@ -49,24 +46,21 @@ describe('LobeAiHubMixAI', () => {
49
46
  },
50
47
  };
51
48
 
52
- vi.spyOn(instance as any, 'getRuntimeByModel').mockResolvedValue({
53
- models: async () => {
54
- try {
55
- await mockClient.models.list();
56
- } catch (error) {
57
- console.warn(
58
- 'Failed to fetch AiHubMix models. Please ensure your AiHubMix API key is valid:',
59
- error,
60
- );
61
- return [];
62
- }
63
- },
64
- });
49
+ class MockRuntime {
50
+ client = mockClient;
51
+ }
65
52
 
66
53
  // The models method should return empty array on error
67
- const models = await (instance as any)
68
- .getRuntimeByModel('test-model')
69
- .then((r: any) => r.models());
54
+ vi.spyOn(instance as any, 'resolveRouters').mockResolvedValue([
55
+ {
56
+ apiType: 'openai',
57
+ models: [],
58
+ options: {},
59
+ runtime: MockRuntime,
60
+ },
61
+ ]);
62
+
63
+ const models = await instance.models();
70
64
  expect(models).toEqual([]);
71
65
  });
72
66
  });
@@ -4,7 +4,6 @@ export * from './error';
4
4
  export * from './image';
5
5
  export * from './model';
6
6
  export * from './structureOutput';
7
- export * from './textToImage';
8
7
  export * from './toolsCalling';
9
8
  export * from './tts';
10
9
  export * from './type';
@@ -72,24 +72,4 @@ describe('AgentRuntimeError', () => {
72
72
  expect(result).toEqual(errorPayload);
73
73
  });
74
74
  });
75
-
76
- describe('textToImage', () => {
77
- it('should return the same error object', () => {
78
- const error = { message: 'Text to image failed', code: 'GENERATION_ERROR' };
79
- const result = AgentRuntimeError.textToImage(error);
80
-
81
- expect(result).toBe(error);
82
- expect(result).toEqual(error);
83
- });
84
-
85
- it('should handle any type of error', () => {
86
- const stringError = 'String error';
87
- const numberError = 404;
88
- const arrayError = ['error1', 'error2'];
89
-
90
- expect(AgentRuntimeError.textToImage(stringError)).toBe(stringError);
91
- expect(AgentRuntimeError.textToImage(numberError)).toBe(numberError);
92
- expect(AgentRuntimeError.textToImage(arrayError)).toBe(arrayError);
93
- });
94
- });
95
75
  });
@@ -12,5 +12,4 @@ export const AgentRuntimeError = {
12
12
  error?: any,
13
13
  ): AgentInitErrorPayload => ({ error, errorType }),
14
14
  createImage: (error: CreateImageErrorPayload): CreateImageErrorPayload => error,
15
- textToImage: (error: any): any => error,
16
15
  };
@@ -1,7 +1,6 @@
1
- import { MarketSDK } from '@lobehub/market-sdk';
2
1
  import { type NextRequest, NextResponse } from 'next/server';
3
2
 
4
- import { getTrustedClientTokenForSession } from '@/libs/trusted-client';
3
+ import { MarketService } from '@/server/services/market';
5
4
 
6
5
  type RouteContext = {
7
6
  params: Promise<{
@@ -9,18 +8,6 @@ type RouteContext = {
9
8
  }>;
10
9
  };
11
10
 
12
- const MARKET_BASE_URL = process.env.NEXT_PUBLIC_MARKET_BASE_URL || 'https://market.lobehub.com';
13
-
14
- const extractAccessToken = (req: NextRequest) => {
15
- const authorization = req.headers.get('authorization');
16
- if (!authorization) return undefined;
17
-
18
- const [scheme, token] = authorization.split(' ');
19
- if (scheme?.toLowerCase() !== 'bearer' || !token) return undefined;
20
-
21
- return token;
22
- };
23
-
24
11
  const methodNotAllowed = (methods: string[]) =>
25
12
  NextResponse.json(
26
13
  {
@@ -55,14 +42,8 @@ const notFound = (reason: string) =>
55
42
  );
56
43
 
57
44
  const handleAgent = async (req: NextRequest, segments: string[]) => {
58
- const accessToken = extractAccessToken(req);
59
- const trustedClientToken = await getTrustedClientTokenForSession();
60
-
61
- const market = new MarketSDK({
62
- accessToken,
63
- baseURL: MARKET_BASE_URL,
64
- trustedClientToken,
65
- });
45
+ const marketService = await MarketService.createFromRequest(req);
46
+ const market = marketService.market;
66
47
 
67
48
  if (segments.length === 0) {
68
49
  return notFound('Missing agent action.');
@@ -94,17 +75,6 @@ const handleAgent = async (req: NextRequest, segments: string[]) => {
94
75
  if (action === 'own') {
95
76
  if (req.method !== 'GET') return methodNotAllowed(['GET']);
96
77
 
97
- if (!accessToken) {
98
- return NextResponse.json(
99
- {
100
- error: 'unauthorized',
101
- message: 'Authentication required to get own agents',
102
- status: 'error',
103
- },
104
- { status: 401 },
105
- );
106
- }
107
-
108
78
  try {
109
79
  // Parse query parameters from the request URL
110
80
  const url = new URL(req.url);
@@ -1,15 +1,15 @@
1
- import { MarketSDK } from '@lobehub/market-sdk';
2
1
  import { type NextRequest, NextResponse } from 'next/server';
3
2
 
4
3
  import { getTrustedClientTokenForSession } from '@/libs/trusted-client';
4
+ import { MarketService } from '@/server/services/market';
5
+
6
+ const MARKET_BASE_URL = process.env.NEXT_PUBLIC_MARKET_BASE_URL || 'https://market.lobehub.com';
5
7
 
6
8
  type RouteContext = {
7
9
  params: Promise<{
8
10
  segments?: string[];
9
11
  }>;
10
12
  };
11
-
12
- const MARKET_BASE_URL = process.env.NEXT_PUBLIC_MARKET_BASE_URL || 'https://market.lobehub.com';
13
13
  const ALLOWED_ENDPOINTS = new Set(['handoff', 'token', 'userinfo']);
14
14
 
15
15
  const ensureEndpoint = (segments?: string[]) => {
@@ -44,9 +44,8 @@ const methodNotAllowed = (allowed: string[]) =>
44
44
  );
45
45
 
46
46
  const handleProxy = async (req: NextRequest, context: RouteContext) => {
47
- const market = new MarketSDK({
48
- baseURL: MARKET_BASE_URL,
49
- });
47
+ const marketService = new MarketService();
48
+ const market = marketService.market;
50
49
 
51
50
  const { segments } = await context.params;
52
51
  const endpointResult = ensureEndpoint(segments);