@lobehub/lobehub 2.0.0-next.101 → 2.0.0-next.102

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/changelog/v1.json +9 -0
  3. package/package.json +1 -1
  4. package/packages/model-bank/package.json +1 -0
  5. package/packages/model-bank/src/aiModels/aihubmix.ts +27 -0
  6. package/packages/model-bank/src/aiModels/google.ts +69 -10
  7. package/packages/model-bank/src/aiModels/index.ts +3 -0
  8. package/packages/model-bank/src/aiModels/infiniai.ts +5 -22
  9. package/packages/model-bank/src/aiModels/ollamacloud.ts +12 -0
  10. package/packages/model-bank/src/aiModels/siliconcloud.ts +0 -61
  11. package/packages/model-bank/src/aiModels/vertexai.ts +88 -1
  12. package/packages/model-bank/src/aiModels/zenmux.ts +1423 -0
  13. package/packages/model-bank/src/const/modelProvider.ts +1 -0
  14. package/packages/model-bank/src/standard-parameters/index.ts +9 -0
  15. package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +2 -2
  16. package/packages/model-runtime/src/core/streams/google/index.ts +7 -2
  17. package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +166 -166
  18. package/packages/model-runtime/src/index.ts +1 -1
  19. package/packages/model-runtime/src/providers/google/createImage.ts +1 -0
  20. package/packages/model-runtime/src/providers/google/index.ts +11 -1
  21. package/packages/model-runtime/src/providers/zenmux/index.test.ts +320 -0
  22. package/packages/model-runtime/src/providers/zenmux/index.ts +84 -0
  23. package/packages/model-runtime/src/runtimeMap.ts +2 -0
  24. package/packages/types/src/user/settings/keyVaults.ts +1 -0
  25. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/components/ResolutionSelect.tsx +88 -0
  26. package/src/app/[variants]/(main)/image/@menu/features/ConfigPanel/index.tsx +9 -0
  27. package/src/config/modelProviders/index.ts +3 -0
  28. package/src/config/modelProviders/zenmux.ts +21 -0
  29. package/src/envs/llm.ts +6 -0
  30. package/src/locales/default/image.ts +8 -0
  31. package/src/store/chat/slices/aiChat/actions/__tests__/conversationLifecycle.test.ts +3 -0
  32. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +11 -0
@@ -64,6 +64,7 @@ export enum ModelProvider {
64
64
  Wenxin = 'wenxin',
65
65
  XAI = 'xai',
66
66
  Xinference = 'xinference',
67
+ ZenMux = 'zenmux',
67
68
  ZeroOne = 'zeroone',
68
69
  ZhiPu = 'zhipu',
69
70
  }
@@ -149,6 +149,15 @@ export const ModelParamsMetaSchema = z.object({
149
149
  })
150
150
  .optional(),
151
151
 
152
+ resolution: z
153
+ .object({
154
+ default: z.string(),
155
+ description: z.string().optional(),
156
+ enum: z.array(z.string()),
157
+ type: z.literal('string').optional(),
158
+ })
159
+ .optional(),
160
+
152
161
  cfg: z
153
162
  .object({
154
163
  default: z.number(),
@@ -426,7 +426,7 @@ describe('LobeOpenAICompatibleFactory', () => {
426
426
  'data: "Hello"\n\n',
427
427
  'id: a\n',
428
428
  'event: usage\n',
429
- 'data: {"inputTextTokens":5,"outputTextTokens":5,"totalInputTokens":5,"totalOutputTokens":5,"totalTokens":10}\n\n',
429
+ 'data: {"inputTextTokens":5,"outputTextTokens":5,"totalInputTokens":5,"totalOutputTokens":5,"totalTokens":10,"cost":0.000005}\n\n',
430
430
  'id: output_speed\n',
431
431
  'event: speed\n',
432
432
  expect.stringMatching(/^data: {.*"tps":.*,"ttft":.*}\n\n$/), // tps ttft should be calculated with elapsed time
@@ -601,7 +601,7 @@ describe('LobeOpenAICompatibleFactory', () => {
601
601
  signal: controller.signal,
602
602
  }),
603
603
  );
604
- });
604
+ }, 10000);
605
605
  });
606
606
 
607
607
  describe('Error', () => {
@@ -148,7 +148,8 @@ const transformGoogleGenerativeAIStream = (
148
148
 
149
149
  // Check for image data before handling finishReason
150
150
  if (Array.isArray(candidate.content?.parts) && candidate.content.parts.length > 0) {
151
- const part = candidate.content.parts[0];
151
+ // Filter out reasoning content and get first non-reasoning part
152
+ const part = candidate.content.parts.find((p: any) => !p.thought);
152
153
 
153
154
  if (part && part.inlineData && part.inlineData.data && part.inlineData.mimeType) {
154
155
  const imageChunk = {
@@ -182,7 +183,11 @@ const transformGoogleGenerativeAIStream = (
182
183
  ...usageChunks,
183
184
  ].filter(Boolean) as StreamProtocolChunk[];
184
185
  }
185
- return { data: candidate.finishReason, id: context?.id, type: 'stop' };
186
+ // 当有 finishReason 但没有 text 内容时,发送一个空的 text 块以停止加载动画
187
+ return [
188
+ { data: '', id: context?.id, type: 'text' },
189
+ { data: candidate.finishReason, id: context?.id, type: 'stop' },
190
+ ];
186
191
  }
187
192
 
188
193
  if (!!text?.trim()) return { data: text, id: context?.id, type: 'text' };