@lobehub/chat 1.142.6 → 1.142.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/apps/desktop/src/main/core/browser/Browser.ts +36 -0
  3. package/apps/desktop/src/main/core/infrastructure/StaticFileServerManager.ts +57 -7
  4. package/changelog/v1.json +9 -0
  5. package/locales/ar/models.json +26 -5
  6. package/locales/bg-BG/models.json +26 -5
  7. package/locales/de-DE/models.json +26 -5
  8. package/locales/en-US/models.json +26 -5
  9. package/locales/es-ES/models.json +26 -5
  10. package/locales/fa-IR/models.json +26 -5
  11. package/locales/fr-FR/models.json +26 -5
  12. package/locales/it-IT/models.json +25 -4
  13. package/locales/ja-JP/models.json +26 -5
  14. package/locales/ko-KR/models.json +26 -5
  15. package/locales/nl-NL/models.json +26 -5
  16. package/locales/pl-PL/models.json +26 -5
  17. package/locales/pt-BR/models.json +25 -4
  18. package/locales/ru-RU/models.json +26 -5
  19. package/locales/tr-TR/models.json +26 -5
  20. package/locales/vi-VN/models.json +26 -5
  21. package/locales/zh-CN/models.json +25 -4
  22. package/locales/zh-TW/models.json +26 -5
  23. package/package.json +1 -1
  24. package/packages/electron-client-ipc/src/events/windows.ts +1 -1
  25. package/packages/model-bank/src/aiModels/aihubmix.ts +5 -4
  26. package/packages/model-bank/src/aiModels/novita.ts +2 -2
  27. package/packages/model-bank/src/aiModels/siliconcloud.ts +1 -1
  28. package/packages/model-runtime/src/providers/minimax/index.ts +5 -5
  29. package/packages/model-runtime/src/providers/qiniu/index.ts +1 -0
  30. package/src/app/[variants]/(main)/_layout/Desktop/SideBar/TopActions.tsx +5 -2
  31. package/src/app/[variants]/(main)/discover/(detail)/assistant/AssistantDetailPage.tsx +2 -2
  32. package/src/app/[variants]/(main)/discover/(detail)/components/NotFound.tsx +1 -7
  33. package/src/app/[variants]/(main)/discover/(detail)/mcp/McpDetailPage.tsx +2 -2
  34. package/src/app/[variants]/(main)/discover/(detail)/model/ModelDetailPage.tsx +2 -2
  35. package/src/app/[variants]/(main)/discover/(detail)/provider/ProviderDetailPage.tsx +2 -2
  36. package/src/app/[variants]/(main)/discover/(list)/assistant/AssistantLayout.tsx +1 -1
  37. package/src/app/[variants]/(main)/discover/(list)/assistant/features/Category/index.tsx +1 -1
  38. package/src/app/[variants]/(main)/discover/(list)/mcp/McpLayout.tsx +1 -1
  39. package/src/app/[variants]/(main)/discover/(list)/mcp/features/Category/index.tsx +1 -1
  40. package/src/app/[variants]/(main)/discover/(list)/model/ModelLayout.tsx +1 -1
  41. package/src/app/[variants]/(main)/discover/(list)/model/features/Category/index.tsx +1 -1
  42. package/src/app/[variants]/(main)/discover/components/Title.tsx +1 -4
  43. package/src/app/[variants]/(main)/knowledge/KnowledgeRouter.tsx +73 -0
  44. package/src/app/[variants]/(main)/knowledge/[[...path]]/page.tsx +12 -0
  45. package/src/app/[variants]/(main)/{files/features → knowledge/components}/FileDetail.tsx +15 -3
  46. package/src/app/[variants]/(main)/knowledge/components/GoBack/index.tsx +60 -0
  47. package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase/Item → knowledge/components/KnowledgeBaseItem}/Content.tsx +3 -1
  48. package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase/Item → knowledge/components/KnowledgeBaseItem}/index.tsx +22 -23
  49. package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase/KnowledgeBaseList.tsx → knowledge/components/KnowledgeBaseList/index.tsx} +4 -2
  50. package/src/app/[variants]/(main)/{files/(content)/@modal/(.)[id] → knowledge/components/modal}/FileDetail.tsx +1 -1
  51. package/src/app/[variants]/(main)/{files/(content)/@modal/(.)[id] → knowledge/components/modal}/FullscreenModal.tsx +10 -9
  52. package/src/app/[variants]/(main)/knowledge/components/modal/ModalPageClient.tsx +32 -0
  53. package/src/app/[variants]/(main)/knowledge/components/modal/page.tsx +13 -0
  54. package/src/app/[variants]/(main)/knowledge/components/modal/useFilesQueryParam.ts +65 -0
  55. package/src/app/[variants]/(main)/knowledge/hooks/useFileCategory.ts +33 -0
  56. package/src/app/[variants]/(main)/knowledge/routes/KnowledgeBaseDetail/index.tsx +47 -0
  57. package/src/app/[variants]/(main)/{repos/[id]/features/Menu/Head/index.tsx → knowledge/routes/KnowledgeBaseDetail/menu/Head.tsx} +6 -2
  58. package/src/app/[variants]/(main)/{repos/[id]/features/Menu/index.tsx → knowledge/routes/KnowledgeBaseDetail/menu/Menu.tsx} +7 -5
  59. package/src/app/[variants]/(main)/knowledge/routes/KnowledgeBaseDetail/menu/MenuItems.tsx +35 -0
  60. package/src/app/[variants]/(main)/knowledge/routes/KnowledgeBaseSettings/index.tsx +37 -0
  61. package/src/app/[variants]/(main)/knowledge/routes/KnowledgeBasesList/index.tsx +41 -0
  62. package/src/app/[variants]/(main)/knowledge/routes/KnowledgeHome/index.tsx +131 -0
  63. package/src/app/[variants]/(main)/{files/(content)/_layout/Desktop → knowledge/routes/KnowledgeHome/layout}/Container.tsx +2 -0
  64. package/src/app/[variants]/(main)/{files/(content)/@menu/features/FileMenu/index.tsx → knowledge/routes/KnowledgeHome/menu/FileMenu.tsx} +10 -32
  65. package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase/index.tsx → knowledge/routes/KnowledgeHome/menu/KnowledgeBase.tsx} +17 -2
  66. package/src/app/[variants]/(main)/knowledge/shared/FileModalQueryRoute.tsx +99 -0
  67. package/src/app/[variants]/(main)/knowledge/shared/useFileQueryParam.ts +66 -0
  68. package/src/app/[variants]/(main)/settings/llm/components/Checker.tsx +1 -1
  69. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/Checker.tsx +1 -1
  70. package/src/app/robots.tsx +1 -1
  71. package/src/features/FileManager/FileList/FileListItem/index.tsx +10 -3
  72. package/src/features/FileManager/FileList/MasonryFileItem/MasonryItemWrapper.tsx +2 -0
  73. package/src/features/FileManager/FileList/MasonryFileItem/index.tsx +3 -3
  74. package/src/features/FileManager/FileList/index.tsx +4 -2
  75. package/src/features/FileManager/index.tsx +3 -2
  76. package/src/features/KnowledgeBaseModal/AddFilesToKnowledgeBase/SelectForm.tsx +1 -1
  77. package/src/features/KnowledgeBaseModal/AssignKnowledgeBase/Item/Action.tsx +2 -2
  78. package/src/features/KnowledgeBaseModal/CreateNew/CreateForm.tsx +8 -4
  79. package/src/features/KnowledgeBaseModal/CreateNew/index.tsx +20 -16
  80. package/src/middleware.ts +4 -4
  81. package/src/store/chat/slices/translate/action.ts +1 -1
  82. package/src/tools/web-browsing/Render/Search/index.tsx +1 -1
  83. package/src/app/[variants]/(main)/files/(content)/@menu/default.tsx +0 -27
  84. package/src/app/[variants]/(main)/files/(content)/@modal/(.)[id]/page.tsx +0 -19
  85. package/src/app/[variants]/(main)/files/(content)/@modal/default.tsx +0 -3
  86. package/src/app/[variants]/(main)/files/(content)/NotSupportClient.tsx +0 -161
  87. package/src/app/[variants]/(main)/files/(content)/_layout/Desktop/index.tsx +0 -29
  88. package/src/app/[variants]/(main)/files/(content)/_layout/Mobile.tsx +0 -47
  89. package/src/app/[variants]/(main)/files/(content)/_layout/type.ts +0 -7
  90. package/src/app/[variants]/(main)/files/(content)/layout.tsx +0 -18
  91. package/src/app/[variants]/(main)/files/(content)/page.tsx +0 -14
  92. package/src/app/[variants]/(main)/files/[id]/Header.tsx +0 -63
  93. package/src/app/[variants]/(main)/files/[id]/page.tsx +0 -41
  94. package/src/app/[variants]/(main)/files/hooks/useFileCategory.ts +0 -6
  95. package/src/app/[variants]/(main)/files/loading.tsx +0 -3
  96. package/src/app/[variants]/(main)/repos/[id]/Client.tsx +0 -29
  97. package/src/app/[variants]/(main)/repos/[id]/_layout/Desktop/index.tsx +0 -20
  98. package/src/app/[variants]/(main)/repos/[id]/_layout/Mobile.tsx +0 -37
  99. package/src/app/[variants]/(main)/repos/[id]/_layout/type.ts +0 -5
  100. package/src/app/[variants]/(main)/repos/[id]/evals/components/Container.tsx +0 -25
  101. package/src/app/[variants]/(main)/repos/[id]/evals/components/Tabs.tsx +0 -35
  102. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/CreateDataset/CreateForm.tsx +0 -59
  103. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/CreateDataset/index.tsx +0 -37
  104. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/DatasetDetail/index.tsx +0 -126
  105. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/DatasetList/Item.tsx +0 -57
  106. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/DatasetList/index.tsx +0 -31
  107. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/EmptyGuide/index.tsx +0 -33
  108. package/src/app/[variants]/(main)/repos/[id]/evals/dataset/page.tsx +0 -52
  109. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/CreateEvaluation/CreateForm.tsx +0 -89
  110. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/CreateEvaluation/index.tsx +0 -28
  111. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/CreateEvaluation/useModal.tsx +0 -39
  112. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/EmptyGuide/index.tsx +0 -25
  113. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/EvaluationList/index.tsx +0 -210
  114. package/src/app/[variants]/(main)/repos/[id]/evals/evaluation/page.tsx +0 -37
  115. package/src/app/[variants]/(main)/repos/[id]/evals/layout.tsx +0 -29
  116. package/src/app/[variants]/(main)/repos/[id]/evals/page.tsx +0 -9
  117. package/src/app/[variants]/(main)/repos/[id]/features/Menu/Menu/index.tsx +0 -68
  118. package/src/app/[variants]/(main)/repos/[id]/layout.tsx +0 -13
  119. package/src/app/[variants]/(main)/repos/[id]/not-found.tsx +0 -1
  120. package/src/app/[variants]/(main)/repos/[id]/page.tsx +0 -9
  121. package/src/app/[variants]/(main)/repos/layout.tsx +0 -13
  122. package/src/app/[variants]/(main)/repos/loading.tsx +0 -3
  123. /package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase → knowledge/components/KnowledgeBaseList}/EmptyStatus.tsx +0 -0
  124. /package/src/app/[variants]/(main)/{files/(content)/@menu/features/KnowledgeBase → knowledge/components/KnowledgeBaseList}/SkeletonList.tsx +0 -0
  125. /package/src/app/[variants]/(main)/{files/(content)/@modal/(.)[id] → knowledge/components/modal}/FilePreview.tsx +0 -0
  126. /package/src/app/[variants]/(main)/{repos/[id] → knowledge}/hooks/useKnowledgeItem.ts +0 -0
  127. /package/src/app/[variants]/(main)/{files → knowledge}/layout.tsx +0 -0
  128. /package/src/app/[variants]/(main)/{files/(content)/_layout/Desktop → knowledge/routes/KnowledgeHome/layout}/RegisterHotkeys.tsx +0 -0
package/CHANGELOG.md CHANGED
@@ -2,6 +2,41 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.142.7](https://github.com/lobehub/lobe-chat/compare/v1.142.6...v1.142.7)
6
+
7
+ <sup>Released on **2025-10-28**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Change files page from RSC to SPA mode to improve performance.
12
+
13
+ #### 💄 Styles
14
+
15
+ - **aihubmix**: Update extendParams to include urlContext.
16
+ - **misc**: Update i18n.
17
+
18
+ <br/>
19
+
20
+ <details>
21
+ <summary><kbd>Improvements and Fixes</kbd></summary>
22
+
23
+ #### Code refactoring
24
+
25
+ - **misc**: Change files page from RSC to SPA mode to improve performance, closes [#9846](https://github.com/lobehub/lobe-chat/issues/9846) ([f46cc50](https://github.com/lobehub/lobe-chat/commit/f46cc50))
26
+
27
+ #### Styles
28
+
29
+ - **aihubmix**: Update extendParams to include urlContext, closes [#9914](https://github.com/lobehub/lobe-chat/issues/9914) ([5a8fd85](https://github.com/lobehub/lobe-chat/commit/5a8fd85))
30
+ - **misc**: Update i18n, closes [#9907](https://github.com/lobehub/lobe-chat/issues/9907) ([d149c4d](https://github.com/lobehub/lobe-chat/commit/d149c4d))
31
+
32
+ </details>
33
+
34
+ <div align="right">
35
+
36
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
37
+
38
+ </div>
39
+
5
40
  ### [Version 1.142.6](https://github.com/lobehub/lobe-chat/compare/v1.142.5...v1.142.6)
6
41
 
7
42
  <sup>Released on **2025-10-28**</sup>
@@ -358,6 +358,9 @@ export default class Browser {
358
358
  session: browserWindow.webContents.session,
359
359
  });
360
360
 
361
+ // Setup CORS bypass for local file server
362
+ this.setupCORSBypass(browserWindow);
363
+
361
364
  logger.debug(`[${this.identifier}] Initiating placeholder and URL loading sequence.`);
362
365
  this.loadPlaceholder().then(() => {
363
366
  this.loadUrl(path).catch((e) => {
@@ -491,4 +494,37 @@ export default class Browser {
491
494
  logger.debug(`[${this.identifier}] Manually reapplying visual effects via Browser.`);
492
495
  this.applyVisualEffects();
493
496
  }
497
+
498
+ /**
499
+ * Setup CORS bypass for local file server (127.0.0.1:*)
500
+ * This is needed for Electron to access files from the local static file server
501
+ */
502
+ private setupCORSBypass(browserWindow: BrowserWindow): void {
503
+ logger.debug(`[${this.identifier}] Setting up CORS bypass for local file server`);
504
+
505
+ const session = browserWindow.webContents.session;
506
+
507
+ // Intercept response headers to add CORS headers
508
+ session.webRequest.onHeadersReceived((details, callback) => {
509
+ const url = details.url;
510
+
511
+ // Only modify headers for local file server requests (127.0.0.1)
512
+ if (url.includes('127.0.0.1') || url.includes('lobe-desktop-file')) {
513
+ const responseHeaders = details.responseHeaders || {};
514
+
515
+ // Add CORS headers
516
+ responseHeaders['Access-Control-Allow-Origin'] = ['*'];
517
+ responseHeaders['Access-Control-Allow-Methods'] = ['GET, POST, PUT, DELETE, OPTIONS'];
518
+ responseHeaders['Access-Control-Allow-Headers'] = ['*'];
519
+
520
+ callback({
521
+ responseHeaders,
522
+ });
523
+ } else {
524
+ callback({ responseHeaders: details.responseHeaders });
525
+ }
526
+ });
527
+
528
+ logger.debug(`[${this.identifier}] CORS bypass setup completed`);
529
+ }
494
530
  }
@@ -9,6 +9,21 @@ import type { App } from '../App';
9
9
 
10
10
  const logger = createLogger('core:StaticFileServerManager');
11
11
 
12
+ const getAllowedOrigin = (rawOrigin?: string) => {
13
+ if (!rawOrigin) return '*';
14
+
15
+ try {
16
+ const url = new URL(rawOrigin);
17
+ const normalizedOrigin = `${url.protocol}//${url.host}`;
18
+ return url.hostname === 'localhost' || url.hostname === '127.0.0.1' ? normalizedOrigin : '*';
19
+ } catch {
20
+ const normalizedOrigin = rawOrigin.replace(/\/$/, '');
21
+ return normalizedOrigin.includes('localhost') || normalizedOrigin.includes('127.0.0.1')
22
+ ? normalizedOrigin
23
+ : '*';
24
+ }
25
+ };
26
+
12
27
  export class StaticFileServerManager {
13
28
  private app: App;
14
29
  private fileService: FileService;
@@ -126,16 +141,38 @@ export class StaticFileServerManager {
126
141
  return;
127
142
  }
128
143
 
144
+ // 获取请求的 Origin 并设置 CORS
145
+ const origin = req.headers.origin || req.headers.referer;
146
+ const allowedOrigin = getAllowedOrigin(origin);
147
+
148
+ // 处理 CORS 预检请求
149
+ if (req.method === 'OPTIONS') {
150
+ res.writeHead(204, {
151
+ 'Access-Control-Allow-Headers': 'Content-Type',
152
+ 'Access-Control-Allow-Methods': 'GET, OPTIONS',
153
+ 'Access-Control-Allow-Origin': allowedOrigin,
154
+ 'Access-Control-Max-Age': '86400',
155
+ });
156
+ res.end();
157
+ return;
158
+ }
159
+
129
160
  const url = new URL(req.url, `http://127.0.0.1:${this.serverPort}`);
130
161
  logger.debug(`Processing HTTP file request: ${req.url}`);
162
+ logger.debug(`Request method: ${req.method}`);
163
+ logger.debug(`Request headers: ${JSON.stringify(req.headers)}`);
131
164
 
132
165
  // 提取文件路径:从 /desktop-file/path/to/file.png 中提取相对路径
133
166
  let filePath = decodeURIComponent(url.pathname.slice(1)); // 移除开头的 /
167
+ logger.debug(`Initial file path after decode: ${filePath}`);
134
168
 
135
169
  // 如果路径以 desktop-file/ 开头,则移除该前缀
136
170
  const prefixWithoutSlash = LOCAL_STORAGE_URL_PREFIX.slice(1) + '/'; // 移除开头的 / 并添加结尾的 /
171
+ logger.debug(`Prefix to remove: ${prefixWithoutSlash}`);
172
+
137
173
  if (filePath.startsWith(prefixWithoutSlash)) {
138
174
  filePath = filePath.slice(prefixWithoutSlash.length);
175
+ logger.debug(`File path after removing prefix: ${filePath}`);
139
176
  }
140
177
 
141
178
  if (!filePath) {
@@ -148,7 +185,12 @@ export class StaticFileServerManager {
148
185
  }
149
186
 
150
187
  // 使用 FileService 获取文件
151
- const fileResult = await this.fileService.getFile(`desktop://${filePath}`);
188
+ const desktopPath = `desktop://${filePath}`;
189
+ logger.debug(`Attempting to get file: ${desktopPath}`);
190
+ const fileResult = await this.fileService.getFile(desktopPath);
191
+ logger.debug(
192
+ `File retrieved successfully, mime type: ${fileResult.mimeType}, size: ${fileResult.content.byteLength} bytes`,
193
+ );
152
194
 
153
195
  // 再次检查响应状态
154
196
  if (res.destroyed || res.headersSent) {
@@ -158,11 +200,8 @@ export class StaticFileServerManager {
158
200
 
159
201
  // 设置响应头
160
202
  res.writeHead(200, {
161
- // 缓存一年
162
- 'Access-Control-Allow-Origin': 'http://localhost:*',
163
-
203
+ 'Access-Control-Allow-Origin': allowedOrigin,
164
204
  'Cache-Control': 'public, max-age=31536000',
165
- // 允许 localhost 的任意端口
166
205
  'Content-Length': Buffer.byteLength(fileResult.content),
167
206
  'Content-Type': fileResult.mimeType,
168
207
  });
@@ -173,16 +212,27 @@ export class StaticFileServerManager {
173
212
  logger.debug(`HTTP file served successfully: desktop://${filePath}`);
174
213
  } catch (error) {
175
214
  logger.error(`Error serving HTTP file: ${error}`);
215
+ logger.error(`Error stack: ${error.stack}`);
176
216
 
177
217
  // 检查响应是否仍然可写
178
218
  if (!res.destroyed && !res.headersSent) {
179
219
  try {
220
+ // 获取请求的 Origin 并设置 CORS(错误响应也需要!)
221
+ const origin = req.headers.origin || req.headers.referer;
222
+ const allowedOrigin = getAllowedOrigin(origin);
223
+
180
224
  // 判断是否是文件未找到错误
181
225
  if (error.name === 'FileNotFoundError') {
182
- res.writeHead(404, { 'Content-Type': 'text/plain' });
226
+ res.writeHead(404, {
227
+ 'Access-Control-Allow-Origin': allowedOrigin,
228
+ 'Content-Type': 'text/plain',
229
+ });
183
230
  res.end('File Not Found');
184
231
  } else {
185
- res.writeHead(500, { 'Content-Type': 'text/plain' });
232
+ res.writeHead(500, {
233
+ 'Access-Control-Allow-Origin': allowedOrigin,
234
+ 'Content-Type': 'text/plain',
235
+ });
186
236
  res.end('Internal Server Error');
187
237
  }
188
238
  } catch (writeError) {
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Update i18n."
6
+ ]
7
+ },
8
+ "date": "2025-10-28",
9
+ "version": "1.142.7"
10
+ },
2
11
  {
3
12
  "children": {},
4
13
  "date": "2025-10-28",
@@ -222,7 +222,10 @@
222
222
  "description": "Llama 4 Maverick: نموذج واسع النطاق قائم على مزيج من الخبراء، يوفر استراتيجية تفعيل خبراء فعالة لتحقيق أداء متميز في الاستدلال."
223
223
  },
224
224
  "MiniMax-M1": {
225
- "description": "نموذج استدلال جديد مطور ذاتيًا. رائد عالميًا: 80 ألف سلسلة تفكير × 1 مليون إدخال، أداء يضاهي أفضل النماذج العالمية."
225
+ "description": "نموذج استدلال جديد مطوّر ذاتيًا. رائد عالميًا: سلسلة تفكير 80K × إدخال 1M، بأداء يضاهي أفضل النماذج العالمية."
226
+ },
227
+ "MiniMax-M2": {
228
+ "description": "مصمم خصيصًا للترميز الفعّال وتدفقات عمل الوكلاء."
226
229
  },
227
230
  "MiniMax-Text-01": {
228
231
  "description": "في سلسلة نماذج MiniMax-01، قمنا بإجراء ابتكارات جريئة: تم تنفيذ آلية الانتباه الخطي على نطاق واسع لأول مرة، لم يعد هيكل Transformer التقليدي هو الخيار الوحيد. يصل عدد معلمات هذا النموذج إلى 456 مليار، مع تنشيط واحد يصل إلى 45.9 مليار. الأداء الشامل للنموذج يتساوى مع النماذج الرائدة في الخارج، بينما يمكنه معالجة سياقات تصل إلى 4 ملايين توكن، وهو 32 مرة من GPT-4o و20 مرة من Claude-3.5-Sonnet."
@@ -398,6 +401,15 @@
398
401
  "Qwen/Qwen3-Next-80B-A3B-Thinking": {
399
402
  "description": "Qwen3-Next-80B-A3B-Thinking هو نموذج أساسي من الجيل التالي أصدره فريق Tongyi Qianwen في علي بابا، مصمم خصيصًا لمهام الاستدلال المعقدة. يعتمد على بنية Qwen3-Next المبتكرة التي تدمج آلية انتباه هجينة (Gated DeltaNet و Gated Attention) وهيكل خبراء مختلط عالي التشتت (MoE)، بهدف تحقيق أقصى كفاءة في التدريب والاستدلال. كنموذج متناثر يحتوي على 80 مليار معلمة إجمالية، فإنه ينشط حوالي 3 مليارات معلمة فقط أثناء الاستدلال، مما يقلل بشكل كبير من تكلفة الحوسبة، وعند معالجة مهام سياق طويل تتجاوز 32 ألف رمز، فإن معدل الاستدلال يتفوق على نموذج Qwen3-32B بأكثر من 10 أضعاف. نسخة \"Thinking\" هذه مخصصة لتنفيذ مهام متعددة الخطوات عالية الصعوبة مثل الإثباتات الرياضية، توليف الشيفرة، التحليل المنطقي والتخطيط، وتخرج عملية الاستدلال بشكل افتراضي في شكل \"سلسلة تفكير\" منظمة. من حيث الأداء، يتفوق هذا النموذج ليس فقط على نماذج ذات تكلفة أعلى مثل Qwen3-32B-Thinking، بل يتفوق أيضًا في عدة اختبارات معيارية على Gemini-2.5-Flash-Thinking."
400
403
  },
404
+ "Qwen/Qwen3-Omni-30B-A3B-Captioner": {
405
+ "description": "Qwen3-Omni-30B-A3B-Captioner هو نموذج لغة بصرية (VLM) من سلسلة Qwen3 التي طورتها شركة علي بابا وفريق Tongyi Qianwen. تم تصميمه خصيصًا لإنشاء أوصاف صور عالية الجودة، دقيقة ومفصلة. يعتمد النموذج على بنية خبراء هجينة (MoE) بإجمالي 30 مليار معلمة، مما يتيح له فهمًا عميقًا لمحتوى الصور وتحويله إلى أوصاف نصية طبيعية وسلسة. يتميز بأداء ممتاز في التقاط تفاصيل الصور، وفهم المشاهد، والتعرف على الكائنات، والاستدلال على العلاقات، مما يجعله مثاليًا للتطبيقات التي تتطلب فهمًا دقيقًا للصور وتوليد أوصاف لها."
406
+ },
407
+ "Qwen/Qwen3-Omni-30B-A3B-Instruct": {
408
+ "description": "Qwen3-Omni-30B-A3B-Instruct هو أحد نماذج سلسلة Qwen3 الأحدث من تطوير فريق Tongyi Qianwen في علي بابا. يتميز ببنية خبراء هجينة (MoE) بإجمالي 30 مليار معلمة و3 مليارات معلمة مفعّلة، مما يحقق أداءً قويًا مع تقليل تكلفة الاستدلال. تم تدريبه على بيانات عالية الجودة ومتعددة المصادر واللغات، ويتميز بقدرات عامة قوية، ويدعم معالجة مدخلات متعددة الوسائط تشمل النصوص، الصور، الصوت والفيديو، مع إمكانية فهم وتوليد محتوى متعدد الوسائط."
409
+ },
410
+ "Qwen/Qwen3-Omni-30B-A3B-Thinking": {
411
+ "description": "Qwen3-Omni-30B-A3B-Thinking هو المكون الأساسي \"المفكر\" (Thinker) في نموذج Qwen3-Omni متعدد الوسائط. يتولى معالجة مدخلات متعددة الوسائط تشمل النصوص، الصوت، الصور والفيديو، ويقوم بتنفيذ سلاسل استدلال معقدة. يعمل كنواة الاستدلال، حيث يوحّد جميع المدخلات ضمن فضاء تمثيلي عام، مما يتيح فهماً عميقاً واستدلالاً معقداً عبر الوسائط. يعتمد على بنية خبراء هجينة (MoE) بإجمالي 30 مليار معلمة و3 مليارات معلمة مفعّلة، مما يوازن بين قوة الاستدلال وكفاءة الحوسبة."
412
+ },
401
413
  "Qwen/Qwen3-VL-235B-A22B-Instruct": {
402
414
  "description": "Qwen3-VL-235B-A22B-Instruct هو نموذج كبير من سلسلة Qwen3-VL تم تدريبه على التعليمات، ويعتمد على بنية الخبراء المختلطة (MoE)، ويتميز بقدرات فائقة في الفهم والتوليد متعدد الوسائط. يدعم السياق الأصلي حتى 256 ألف رمز، مما يجعله مناسبًا لخدمات الإنتاج متعددة الوسائط عالية التوازي."
403
415
  },
@@ -410,6 +422,12 @@
410
422
  "Qwen/Qwen3-VL-30B-A3B-Thinking": {
411
423
  "description": "Qwen3-VL-30B-A3B-Thinking هو إصدار معزز بالاستدلال من Qwen3-VL (Thinking)، تم تحسينه لمهام الاستدلال متعدد الوسائط، وتحويل الصور إلى كود، وفهم الرؤية المعقدة. يدعم سياقًا يصل إلى 256 ألف رمز ويتميز بقدرات تفكير متسلسلة أقوى."
412
424
  },
425
+ "Qwen/Qwen3-VL-32B-Instruct": {
426
+ "description": "Qwen3-VL-32B-Instruct هو نموذج لغة بصرية من تطوير فريق Tongyi Qianwen في علي بابا، وقد حقق أداءً رائدًا (SOTA) في العديد من اختبارات اللغة البصرية. يدعم إدخال صور عالية الدقة بمستوى ملايين البكسلات، ويتميز بقدرات قوية في الفهم البصري العام، والتعرف البصري متعدد اللغات (OCR)، وتحديد المواقع البصرية الدقيقة، والحوار البصري. كجزء من سلسلة Qwen3، يمكنه التعامل مع مهام متعددة الوسائط مع دعم وظائف متقدمة مثل استدعاء الأدوات واستكمال السياق."
427
+ },
428
+ "Qwen/Qwen3-VL-32B-Thinking": {
429
+ "description": "Qwen3-VL-32B-Thinking هو إصدار محسن من نموذج اللغة البصرية الذي طوره فريق Tongyi Qianwen في علي بابا، ومخصص لمهام الاستدلال البصري المعقدة. يتميز بوضع \"التفكير\" المدمج، الذي يتيح له توليد خطوات استدلال وسيطة مفصلة قبل الإجابة، مما يعزز أداءه في المهام التي تتطلب منطقًا متعدد الخطوات، وتخطيطًا واستدلالًا معقدًا. يدعم إدخال صور عالية الدقة بمستوى ملايين البكسلات، ويتميز بقدرات قوية في الفهم البصري العام، والتعرف البصري متعدد اللغات (OCR)، وتحديد المواقع البصرية الدقيقة، والحوار البصري، بالإضافة إلى دعم استدعاء الأدوات واستكمال السياق."
430
+ },
413
431
  "Qwen/Qwen3-VL-8B-Instruct": {
414
432
  "description": "Qwen3-VL-8B-Instruct هو نموذج لغة بصرية من سلسلة Qwen3، تم تطويره استنادًا إلى Qwen3-8B-Instruct وتدريبه على كمية كبيرة من بيانات الصور والنصوص. يتميز بقدرته على فهم الرؤية العامة، وإجراء حوارات تتمحور حول المحتوى البصري، والتعرف على النصوص متعددة اللغات داخل الصور. وهو مناسب لتطبيقات مثل الأسئلة والأجوبة البصرية، ووصف الصور، واتباع التعليمات متعددة الوسائط، واستدعاء الأدوات."
415
433
  },
@@ -959,6 +977,9 @@
959
977
  "databricks/dbrx-instruct": {
960
978
  "description": "DBRX Instruct يوفر قدرة معالجة تعليمات موثوقة، يدعم تطبيقات متعددة الصناعات."
961
979
  },
980
+ "deepseek-ai/DeepSeek-OCR": {
981
+ "description": "DeepSeek-OCR هو نموذج لغة بصرية طورته DeepSeek AI، يركز على التعرف البصري على الحروف (OCR) و\"الضغط البصري السياقي\". يهدف هذا النموذج إلى استكشاف حدود ضغط المعلومات السياقية من الصور، ويستطيع معالجة المستندات بكفاءة وتحويلها إلى تنسيقات نصية منظمة مثل Markdown. يتمتع بقدرة دقيقة على التعرف على النصوص داخل الصور، مما يجعله مثاليًا لتطبيقات رقمنة المستندات، واستخراج النصوص، والمعالجة المنظمة."
982
+ },
962
983
  "deepseek-ai/DeepSeek-R1": {
963
984
  "description": "DeepSeek-R1 هو نموذج استدلال مدفوع بالتعلم المعزز (RL) يعالج مشكلات التكرار وقابلية القراءة في النموذج. قبل استخدام RL، قدم DeepSeek-R1 بيانات بدء باردة، مما أدى إلى تحسين أداء الاستدلال. إنه يقدم أداءً مماثلاً لـ OpenAI-o1 في المهام الرياضية والبرمجية والاستدلال، وقد حسّن النتائج العامة من خلال طرق تدريب مصممة بعناية."
964
985
  },
@@ -1670,9 +1691,6 @@
1670
1691
  "google/gemma-3-12b-it": {
1671
1692
  "description": "Gemma 3 12B هو نموذج لغة مفتوح المصدر من جوجل، وضع معايير جديدة في الكفاءة والأداء."
1672
1693
  },
1673
- "google/gemma-3-1b-it": {
1674
- "description": "Gemma 3 1B هو نموذج لغة مفتوح المصدر من جوجل، وضع معايير جديدة في الكفاءة والأداء."
1675
- },
1676
1694
  "google/gemma-3-27b-it": {
1677
1695
  "description": "جيمّا 3 27B هو نموذج لغوي مفتوح المصدر من جوجل، وقد وضع معايير جديدة من حيث الكفاءة والأداء."
1678
1696
  },
@@ -3149,6 +3167,9 @@
3149
3167
  "tencent/Hunyuan-A13B-Instruct": {
3150
3168
  "description": "Hunyuan-A13B-Instruct يحتوي على 80 مليار معلمة، ويمكن تفعيل 13 مليار معلمة فقط لمنافسة النماذج الأكبر، ويدعم الاستدلال المختلط بين \"التفكير السريع/التفكير البطيء\"؛ فهم مستقر للنصوص الطويلة؛ تم التحقق من قدرات الوكيل عبر BFCL-v3 وτ-Bench، مع أداء متقدم؛ يجمع بين GQA وتنسيقات التكميم المتعددة لتحقيق استدلال فعال."
3151
3169
  },
3170
+ "tencent/Hunyuan-MT-7B": {
3171
+ "description": "نموذج الترجمة Hunyuan يتكون من نموذج Hunyuan-MT-7B ونموذج مدمج Hunyuan-MT-Chimera. Hunyuan-MT-7B هو نموذج ترجمة خفيف الوزن يحتوي على 7 مليارات معلمة، ويُستخدم لترجمة النصوص من اللغة المصدر إلى اللغة الهدف. يدعم النموذج الترجمة بين 33 لغة بالإضافة إلى 5 لغات من الأقليات الصينية. في مسابقة الترجمة الآلية الدولية WMT25، حصل Hunyuan-MT-7B على المركز الأول في 30 من أصل 31 فئة لغوية شارك فيها، مما يبرز قدراته المتميزة في الترجمة. ولتلبية احتياجات الترجمة، طورت Tencent Hunyuan منهجية تدريب شاملة تبدأ من ما قبل التدريب، ثم الضبط الخاضع للإشراف، ثم التعزيز المخصص للترجمة، وأخيرًا التعزيز المدمج، مما مكنه من تحقيق أداء رائد بين النماذج ذات الحجم المماثل. يتميز النموذج بكفاءة حسابية عالية وسهولة في النشر، مما يجعله مناسبًا لمجموعة واسعة من التطبيقات."
3172
+ },
3152
3173
  "text-embedding-3-large": {
3153
3174
  "description": "أقوى نموذج لتضمين النصوص، مناسب للمهام الإنجليزية وغير الإنجليزية."
3154
3175
  },
@@ -3314,4 +3335,4 @@
3314
3335
  "zai/glm-4.5v": {
3315
3336
  "description": "GLM-4.5V مبني على نموذج GLM-4.5-Air الأساسي، يرث التقنيات المثبتة من GLM-4.1V-Thinking، ويوسعها بفعالية من خلال بنية MoE القوية التي تضم 106 مليار معلمة."
3316
3337
  }
3317
- }
3338
+ }
@@ -222,7 +222,10 @@
222
222
  "description": "Llama 4 Maverick: Голям модел, базиран на Mixture-of-Experts, предлагащ ефективна стратегия за активиране на експерти за отлични резултати при разсъждение."
223
223
  },
224
224
  "MiniMax-M1": {
225
- "description": "Изцяло ново самостоятелно разработено модел за разсъждение. Световен лидер: 80K вериги на мислене x 1M вход, с резултати, сравними с водещите модели в чужбина."
225
+ "description": "Изцяло нова самостоятелно разработена инференсна система. Световен лидер: 80K вериги на мислене x 1M входни данни, с ефективност, съпоставима с водещите международни модели."
226
+ },
227
+ "MiniMax-M2": {
228
+ "description": "Създаден специално за ефективно програмиране и работни потоци с агенти."
226
229
  },
227
230
  "MiniMax-Text-01": {
228
231
  "description": "В серията модели MiniMax-01 направихме смели иновации: за първи път реализирахме мащабно линейно внимание, традиционната архитектура на Transformer вече не е единственият избор. Параметрите на този модел достигат 4560 милиарда, с единична активация от 45.9 милиарда. Общата производителност на модела е на нивото на водещите модели в чужбина, като същевременно ефективно обработва глобалния контекст от 4 милиона токена, което е 32 пъти повече от GPT-4o и 20 пъти повече от Claude-3.5-Sonnet."
@@ -398,6 +401,15 @@
398
401
  "Qwen/Qwen3-Next-80B-A3B-Thinking": {
399
402
  "description": "Qwen3-Next-80B-A3B-Thinking е следващото поколение основен модел, публикуван от екипа на Alibaba Tongyi Qianwen, специално проектиран за сложни задачи за разсъждение. Той е базиран на иновативната архитектура Qwen3-Next, която комбинира хибриден механизъм за внимание (Gated DeltaNet и Gated Attention) и структура с висока степен на разреждане на смесени експерти (MoE), с цел постигане на изключителна ефективност при обучение и извод. Като разреден модел с общо 80 милиарда параметри, при извод активира само около 3 милиарда параметри, което значително намалява изчислителните разходи. При обработка на задачи с дълъг контекст над 32K токена, пропускателната способност при извод е над 10 пъти по-висока в сравнение с модела Qwen3-32B. Тази „Thinking“ версия е оптимизирана за изпълнение на сложни многостъпкови задачи като математически доказателства, синтез на код, логически анализ и планиране, като по подразбиране изходът на разсъжденията е във формата на структурирана „мисловна верига“. По отношение на производителността, тя не само превъзхожда модели с по-високи разходи като Qwen3-32B-Thinking, но и превъзхожда Gemini-2.5-Flash-Thinking в множество бенчмаркове."
400
403
  },
404
+ "Qwen/Qwen3-Omni-30B-A3B-Captioner": {
405
+ "description": "Qwen3-Omni-30B-A3B-Captioner е визуално-езиков модел (VLM) от серията Qwen3 на екипа Tongyi Qianwen на Alibaba. Той е специално проектиран за генериране на висококачествени, подробни и точни описания на изображения. Моделът използва архитектура с хибридни експерти (MoE) с общо 30 милиарда параметъра, което му позволява дълбоко разбиране на съдържанието на изображенията и превръщането им в естествен и плавен текст. Отличава се в улавянето на детайли, разбиране на сцени, разпознаване на обекти и логическо извеждане на връзки, което го прави особено подходящ за приложения, изискващи прецизно визуално разбиране и генериране на описания."
406
+ },
407
+ "Qwen/Qwen3-Omni-30B-A3B-Instruct": {
408
+ "description": "Qwen3-Omni-30B-A3B-Instruct е част от най-новата серия Qwen3 на екипа Tongyi Qianwen на Alibaba. Това е модел с хибридни експерти (MoE), съдържащ 30 милиарда общи параметъра и 3 милиарда активни параметъра, който съчетава висока производителност с ниски разходи за инференция. Обучен е върху висококачествени, многоизточникови и многоезични данни, притежава силни универсални способности и поддържа обработка на вход от всички модалности, включително текст, изображения, аудио и видео, като може да разбира и генерира мултимодално съдържание."
409
+ },
410
+ "Qwen/Qwen3-Omni-30B-A3B-Thinking": {
411
+ "description": "Qwen3-Omni-30B-A3B-Thinking е основният компонент \"мислител\" (Thinker) в мултимодалния модел Qwen3-Omni. Той е специално проектиран за обработка на мултимодални входове, включително текст, аудио, изображения и видео, и изпълнение на сложни вериги на мислене. Като интелектуален център на инференцията, моделът обединява всички входове в общо представително пространство, което позволява дълбоко междумодално разбиране и сложни логически изводи. Изграден е върху архитектура с хибридни експерти (MoE), с 30 милиарда общи параметъра и 3 милиарда активни параметъра, осигуряващи мощни възможности за разсъждение при оптимизирана изчислителна ефективност."
412
+ },
401
413
  "Qwen/Qwen3-VL-235B-A22B-Instruct": {
402
414
  "description": "Qwen3-VL-235B-A22B-Instruct е голям модел от серията Qwen3-VL, фино настроен с инструкции, базиран на архитектура с множество експерти (MoE). Той притежава изключителни способности за мултимодално разбиране и генериране, с вградена поддръжка на 256K контекст, подходящ за високонадеждни производствени мултимодални услуги."
403
415
  },
@@ -410,6 +422,12 @@
410
422
  "Qwen/Qwen3-VL-30B-A3B-Thinking": {
411
423
  "description": "Qwen3-VL-30B-A3B-Thinking е подобрена версия за разсъждение (Thinking) от серията Qwen3-VL, оптимизирана за мултимодално разсъждение, преобразуване на изображения в код и сложни задачи за визуално разбиране. Поддържа 256K контекст и притежава по-силни способности за верижно мислене."
412
424
  },
425
+ "Qwen/Qwen3-VL-32B-Instruct": {
426
+ "description": "Qwen3-VL-32B-Instruct е визуално-езиков модел, разработен от екипа Tongyi Qianwen на Alibaba, който постига водещи SOTA резултати в множество визуално-езикови бенчмаркове. Моделът поддържа вход на изображения с висока резолюция от милиони пиксели и притежава силни способности за общо визуално разбиране, многоезичен OCR, прецизна визуална локализация и визуален диалог. Като част от серията Qwen3, той може да изпълнява сложни мултимодални задачи и поддържа разширени функции като извикване на инструменти и продължаване на префикси."
427
+ },
428
+ "Qwen/Qwen3-VL-32B-Thinking": {
429
+ "description": "Qwen3-VL-32B-Thinking е специално оптимизирана версия на визуално-езиков модел от екипа Tongyi Qianwen на Alibaba, предназначена за сложни визуални логически задачи. Моделът включва \"режим на мислене\", който му позволява да генерира подробни междинни стъпки на разсъждение преди да отговори на въпрос, значително подобрявайки представянето му при задачи, изискващи многoетапна логика, планиране и сложни изводи. Поддържа изображения с висока резолюция от милиони пиксели, притежава силни способности за общо визуално разбиране, многоезичен OCR, прецизна визуална локализация и визуален диалог, както и функции като извикване на инструменти и продължаване на префикси."
430
+ },
413
431
  "Qwen/Qwen3-VL-8B-Instruct": {
414
432
  "description": "Qwen3-VL-8B-Instruct е визуално-езиков модел от серията Qwen3, базиран на Qwen3-8B-Instruct и обучен върху голям обем от данни с изображения и текст. Той е особено добър в общо визуално разбиране, визуално-центрирани диалози и разпознаване на многоезичен текст в изображения. Подходящ е за визуални въпроси и отговори, описание на изображения, мултимодални инструкции и използване на инструменти."
415
433
  },
@@ -959,6 +977,9 @@
959
977
  "databricks/dbrx-instruct": {
960
978
  "description": "DBRX Instruct предлага висока надеждност в обработката на инструкции, поддържаща приложения в множество индустрии."
961
979
  },
980
+ "deepseek-ai/DeepSeek-OCR": {
981
+ "description": "DeepSeek-OCR е визуално-езиков модел, разработен от DeepSeek AI, фокусиран върху оптично разпознаване на символи (OCR) и \"контекстуална оптична компресия\". Моделът изследва границите на компресиране на контекстуална информация от изображения и може ефективно да обработва документи, преобразувайки ги в структурирани текстови формати като Markdown. Той точно разпознава текстово съдържание в изображения, което го прави особено подходящ за дигитализация на документи, извличане на текст и структурирана обработка."
982
+ },
962
983
  "deepseek-ai/DeepSeek-R1": {
963
984
  "description": "DeepSeek-R1 е модел за извеждане, управляван от подсилено обучение (RL), който решава проблемите с повторяемостта и четимостта в модела. Преди RL, DeepSeek-R1 въвежда данни за студен старт, за да оптимизира допълнително производителността на извеждане. Той показва сравнима производителност с OpenAI-o1 в математически, кодови и извеждащи задачи и подобрява общите резултати чрез внимателно проектирани методи на обучение."
964
985
  },
@@ -1670,9 +1691,6 @@
1670
1691
  "google/gemma-3-12b-it": {
1671
1692
  "description": "Gemma 3 12B е отворен езиков модел на Google, който поставя нови стандарти за ефективност и производителност."
1672
1693
  },
1673
- "google/gemma-3-1b-it": {
1674
- "description": "Gemma 3 1B е отворен езиков модел на Google, който поставя нови стандарти за ефективност и производителност."
1675
- },
1676
1694
  "google/gemma-3-27b-it": {
1677
1695
  "description": "Gemma 3 27B е отворен езиков модел на Google, който поставя нови стандарти за ефективност и производителност."
1678
1696
  },
@@ -3149,6 +3167,9 @@
3149
3167
  "tencent/Hunyuan-A13B-Instruct": {
3150
3168
  "description": "Hunyuan-A13B-Instruct има 80 милиарда параметри, като активиране на 13 милиарда параметри е достатъчно за съпоставяне с по-големи модели, поддържа хибридно разсъждение „бързо мислене/бавно мислене“; стабилно разбиране на дълги текстове; потвърдено с BFCL-v3 и τ-Bench, с водещи възможности на агент; комбинира GQA и множество формати за квантоване за ефективно разсъждение."
3151
3169
  },
3170
+ "tencent/Hunyuan-MT-7B": {
3171
+ "description": "Моделът за превод Hunyuan (Hunyuan Translation Model) се състои от преводния модел Hunyuan-MT-7B и интегрирания модел Hunyuan-MT-Chimera. Hunyuan-MT-7B е лек модел с 7 милиарда параметъра, предназначен за превод на изходен текст към целеви език. Поддържа превод между 33 езика и 5 езика на китайски малцинства. В международното състезание по машинен превод WMT25, Hunyuan-MT-7B спечели първо място в 30 от 31 езикови категории, в които участва, демонстрирайки изключителни преводачески способности. За нуждите на превода, Tencent Hunyuan предлага цялостна тренировъчна парадигма — от предварително обучение до контролирано фино настройване, последвано от усилване чрез превод и интеграция, което му позволява да постигне водеща производителност сред модели със същия мащаб. Моделът е с висока изчислителна ефективност и лесен за внедряване, подходящ за различни приложения."
3172
+ },
3152
3173
  "text-embedding-3-large": {
3153
3174
  "description": "Най-мощният модел за векторизация, подходящ за английски и неанглийски задачи."
3154
3175
  },
@@ -3314,4 +3335,4 @@
3314
3335
  "zai/glm-4.5v": {
3315
3336
  "description": "GLM-4.5V е изграден върху основния модел GLM-4.5-Air, наследявайки проверените технологии на GLM-4.1V-Thinking и постига ефективно мащабиране чрез мощната MoE архитектура с 106 милиарда параметри."
3316
3337
  }
3317
- }
3338
+ }
@@ -222,7 +222,10 @@
222
222
  "description": "Llama 4 Maverick: Ein groß angelegtes Modell basierend auf Mixture-of-Experts, das eine effiziente Expertenaktivierungsstrategie bietet, um bei der Inferenz herausragende Leistungen zu erzielen."
223
223
  },
224
224
  "MiniMax-M1": {
225
- "description": "Ein völlig neu entwickeltes Inferenzmodell. Weltweit führend: 80K Denkketten x 1M Eingaben, Leistung auf Augenhöhe mit den besten Modellen im Ausland."
225
+ "description": "Ein neu entwickeltes Inferenzmodell. Weltweit führend: 80K Denkketten x 1M Eingaben, vergleichbare Leistung mit den besten internationalen Modellen."
226
+ },
227
+ "MiniMax-M2": {
228
+ "description": "Speziell entwickelt für effizientes Programmieren und Agent-Workflows."
226
229
  },
227
230
  "MiniMax-Text-01": {
228
231
  "description": "In der MiniMax-01-Serie haben wir mutige Innovationen vorgenommen: Erstmals wurde die lineare Aufmerksamkeitsmechanismus in großem Maßstab implementiert, sodass die traditionelle Transformer-Architektur nicht mehr die einzige Wahl ist. Dieses Modell hat eine Parameteranzahl von bis zu 456 Milliarden, wobei eine Aktivierung 45,9 Milliarden beträgt. Die Gesamtleistung des Modells kann mit den besten Modellen im Ausland mithalten und kann gleichzeitig effizient den weltweit längsten Kontext von 4 Millionen Tokens verarbeiten, was 32-mal so viel wie GPT-4o und 20-mal so viel wie Claude-3.5-Sonnet ist."
@@ -398,6 +401,15 @@
398
401
  "Qwen/Qwen3-Next-80B-A3B-Thinking": {
399
402
  "description": "Qwen3-Next-80B-A3B-Thinking ist ein von Alibaba Tongyi Qianwen Team veröffentlichtes nächstes Generation Basis-Modell, das speziell für komplexe Inferenzaufgaben entwickelt wurde. Es basiert auf der innovativen Qwen3-Next-Architektur, die hybride Aufmerksamkeitsmechanismen (Gated DeltaNet und Gated Attention) mit einer hochgradig spärlichen Mixture-of-Experts (MoE)-Struktur kombiniert, um höchste Trainings- und Inferenz-Effizienz zu gewährleisten. Als spärliches Modell mit insgesamt 80 Milliarden Parametern werden bei der Inferenz nur etwa 3 Milliarden Parameter aktiviert, was die Rechenkosten stark reduziert. Bei der Verarbeitung von Langkontextaufgaben mit über 32K Tokens übertrifft der Durchsatz das Qwen3-32B-Modell um das Zehnfache. Diese „Thinking“-Version ist für anspruchsvolle mehrstufige Aufgaben wie mathematische Beweise, Code-Synthese, logische Analyse und Planung optimiert und gibt den Inferenzprozess standardmäßig in strukturierter „Denkketten“-Form aus. In der Leistung übertrifft es nicht nur kostenintensivere Modelle wie Qwen3-32B-Thinking, sondern auch in mehreren Benchmarks das Gemini-2.5-Flash-Thinking."
400
403
  },
404
+ "Qwen/Qwen3-Omni-30B-A3B-Captioner": {
405
+ "description": "Qwen3-Omni-30B-A3B-Captioner ist ein visuelles Sprachmodell (VLM) aus der Qwen3-Serie des Alibaba Tongyi Qianwen-Teams. Es ist speziell darauf ausgelegt, hochwertige, detaillierte und präzise Bildbeschreibungen zu generieren. Das Modell basiert auf einer Mixture-of-Experts (MoE)-Architektur mit insgesamt 30 Milliarden Parametern und ist in der Lage, Bildinhalte tiefgreifend zu verstehen und in natürlich fließende Textbeschreibungen umzuwandeln. Es überzeugt durch exzellente Leistungen in Bereichen wie Detailerkennung, Szenenverständnis, Objekterkennung und Beziehungslogik und eignet sich besonders für Anwendungen, die präzises Bildverständnis und Beschreibungsgenerierung erfordern."
406
+ },
407
+ "Qwen/Qwen3-Omni-30B-A3B-Instruct": {
408
+ "description": "Qwen3-Omni-30B-A3B-Instruct ist ein Modell aus der neuesten Qwen3-Serie des Alibaba Tongyi Qianwen-Teams. Es handelt sich um ein Mixture-of-Experts (MoE)-Modell mit insgesamt 30 Milliarden Parametern und 3 Milliarden aktiven Parametern, das starke Leistung bei gleichzeitig reduzierten Inferenzkosten bietet. Das Modell wurde mit hochwertigen, vielfältigen und mehrsprachigen Daten trainiert und verfügt über umfassende Fähigkeiten zur Verarbeitung multimodaler Eingaben, darunter Text, Bild, Audio und Video. Es kann Inhalte über verschiedene Modalitäten hinweg verstehen und generieren."
409
+ },
410
+ "Qwen/Qwen3-Omni-30B-A3B-Thinking": {
411
+ "description": "Qwen3-Omni-30B-A3B-Thinking ist die zentrale \"Denkkomponente\" (Thinker) innerhalb des multimodalen Qwen3-Omni-Modells. Sie ist speziell dafür konzipiert, komplexe Denkketten und Schlussfolgerungen über multimodale Eingaben wie Text, Audio, Bilder und Videos hinweg zu verarbeiten. Als das \"Gehirn\" der Inferenz vereinheitlicht dieses Modell alle Eingaben in einem gemeinsamen Repräsentationsraum und ermöglicht so tiefes Verständnis und komplexe Schlussfolgerungen über Modalitäten hinweg. Es basiert auf einer Mixture-of-Experts (MoE)-Architektur mit 30 Milliarden Gesamtparametern und 3 Milliarden aktiven Parametern und bietet starke Inferenzfähigkeiten bei optimierter Rechenleistung."
412
+ },
401
413
  "Qwen/Qwen3-VL-235B-A22B-Instruct": {
402
414
  "description": "Qwen3-VL-235B-A22B-Instruct ist ein groß angelegtes, instruktional feinabgestimmtes Modell der Qwen3-VL-Serie. Es basiert auf einer Mixture-of-Experts (MoE)-Architektur und bietet herausragende multimodale Verständnis- und Generierungsfähigkeiten. Mit nativer Unterstützung für 256K Kontextlänge eignet es sich ideal für hochgradig parallele, produktionsreife multimodale Dienste."
403
415
  },
@@ -410,6 +422,12 @@
410
422
  "Qwen/Qwen3-VL-30B-A3B-Thinking": {
411
423
  "description": "Qwen3-VL-30B-A3B-Thinking ist die reasoning-optimierte Version (Thinking) der Qwen3-VL-Serie. Sie wurde für multimodale Schlussfolgerungen, Bild-zu-Code-Generierung und komplexe visuelle Verständnisaufgaben optimiert. Mit Unterstützung für 256K Kontext bietet sie eine verbesserte Fähigkeit zum kettenbasierten Denken."
412
424
  },
425
+ "Qwen/Qwen3-VL-32B-Instruct": {
426
+ "description": "Qwen3-VL-32B-Instruct ist ein visuelles Sprachmodell des Alibaba Tongyi Qianwen-Teams, das in mehreren Benchmarks für visuelle Sprachverarbeitung führende SOTA-Ergebnisse erzielt hat. Das Modell unterstützt hochauflösende Bildeingaben im Megapixelbereich und bietet starke Fähigkeiten in allgemeinem visuellen Verständnis, mehrsprachiger Texterkennung (OCR), feinkörniger visueller Lokalisierung und visueller Dialogführung. Als Teil der Qwen3-Serie ist es in der Lage, komplexe multimodale Aufgaben zu bewältigen und unterstützt fortgeschrittene Funktionen wie Tool-Aufrufe und Präfix-Fortsetzungen."
427
+ },
428
+ "Qwen/Qwen3-VL-32B-Thinking": {
429
+ "description": "Qwen3-VL-32B-Thinking ist eine speziell für komplexe visuelle Schlussfolgerungsaufgaben optimierte Version des visuellen Sprachmodells vom Alibaba Tongyi Qianwen-Team. Das Modell verfügt über einen integrierten \"Denkmodus\", der es ihm ermöglicht, vor der Beantwortung von Fragen detaillierte Zwischenschritte der Argumentation zu generieren. Dadurch wird seine Leistung bei Aufgaben mit mehrstufiger Logik, Planung und komplexem Denken erheblich verbessert. Es unterstützt hochauflösende Bildeingaben im Megapixelbereich und bietet starke Fähigkeiten in allgemeinem visuellen Verständnis, mehrsprachiger OCR, feinkörniger visueller Lokalisierung und visueller Dialogführung sowie Funktionen wie Tool-Aufrufe und Präfix-Fortsetzungen."
430
+ },
413
431
  "Qwen/Qwen3-VL-8B-Instruct": {
414
432
  "description": "Qwen3-VL-8B-Instruct ist ein visuelles Sprachmodell der Qwen3-Serie, basierend auf Qwen3-8B-Instruct und auf umfangreichen Bild-Text-Daten trainiert. Es ist spezialisiert auf allgemeines visuelles Verständnis, visuell zentrierte Dialoge und mehrsprachige Texterkennung in Bildern. Es eignet sich für Szenarien wie visuelle Frage-Antwort-Systeme, Bildbeschreibungen, multimodale Befehlsausführung und Tool-Integration."
415
433
  },
@@ -959,6 +977,9 @@
959
977
  "databricks/dbrx-instruct": {
960
978
  "description": "DBRX Instruct bietet zuverlässige Anweisungsverarbeitungsfähigkeiten und unterstützt Anwendungen in verschiedenen Branchen."
961
979
  },
980
+ "deepseek-ai/DeepSeek-OCR": {
981
+ "description": "DeepSeek-OCR ist ein visuelles Sprachmodell von DeepSeek AI, das sich auf optische Zeichenerkennung (OCR) und \"kontextuelle optische Kompression\" spezialisiert hat. Das Modell zielt darauf ab, die Grenzen der Kontextkompression aus Bildern auszuloten und kann Dokumente effizient verarbeiten und in strukturierte Textformate wie Markdown umwandeln. Es erkennt Textinhalte in Bildern präzise und eignet sich besonders für Anwendungen wie Dokumentendigitalisierung, Textextraktion und strukturierte Verarbeitung."
982
+ },
962
983
  "deepseek-ai/DeepSeek-R1": {
963
984
  "description": "DeepSeek-R1 ist ein durch verstärkendes Lernen (RL) gesteuertes Inferenzmodell, das die Probleme der Wiederholbarkeit und Lesbarkeit im Modell löst. Vor dem RL führte DeepSeek-R1 Kaltstartdaten ein, um die Inferenzleistung weiter zu optimieren. Es zeigt in mathematischen, programmierbezogenen und Inferenzaufgaben eine vergleichbare Leistung zu OpenAI-o1 und verbessert durch sorgfältig gestaltete Trainingsmethoden die Gesamteffizienz."
964
985
  },
@@ -1670,9 +1691,6 @@
1670
1691
  "google/gemma-3-12b-it": {
1671
1692
  "description": "Gemma 3 12B ist ein Open-Source-Sprachmodell von Google, das neue Maßstäbe in Effizienz und Leistung setzt."
1672
1693
  },
1673
- "google/gemma-3-1b-it": {
1674
- "description": "Gemma 3 1B ist ein Open-Source-Sprachmodell von Google, das neue Maßstäbe in Effizienz und Leistung setzt."
1675
- },
1676
1694
  "google/gemma-3-27b-it": {
1677
1695
  "description": "Gemma 3 27B ist ein Open-Source-Sprachmodell von Google, das neue Maßstäbe in Bezug auf Effizienz und Leistung setzt."
1678
1696
  },
@@ -3149,6 +3167,9 @@
3149
3167
  "tencent/Hunyuan-A13B-Instruct": {
3150
3168
  "description": "Hunyuan-A13B-Instruct verfügt über 80 Milliarden Parameter, von denen 13 Milliarden aktiviert werden können, um mit größeren Modellen zu konkurrieren. Es unterstützt eine hybride Denkweise aus „schnellem Denken/langsamem Denken“; die Verarbeitung langer Texte ist stabil; durch BFCL-v3 und τ-Bench validiert, übertrifft die Agentenfähigkeit andere Modelle; in Kombination mit GQA und mehreren Quantisierungsformaten ermöglicht es effiziente Inferenz."
3151
3169
  },
3170
+ "tencent/Hunyuan-MT-7B": {
3171
+ "description": "Das Hunyuan-Übersetzungsmodell besteht aus dem Übersetzungsmodell Hunyuan-MT-7B und dem integrierten Modell Hunyuan-MT-Chimera. Hunyuan-MT-7B ist ein leichtgewichtiges Übersetzungsmodell mit 7 Milliarden Parametern, das Quelltexte in Zielsprache übersetzt. Es unterstützt Übersetzungen zwischen 33 Sprachen sowie 5 chinesischen Minderheitensprachen. Beim internationalen WMT25-Maschinenübersetzungswettbewerb belegte Hunyuan-MT-7B in 30 von 31 teilnehmenden Sprachpaaren den ersten Platz und demonstrierte damit seine herausragende Übersetzungsleistung. Für Übersetzungsszenarien hat Tencent Hunyuan ein vollständiges Trainingsparadigma entwickelt – von Pretraining über überwachtes Fine-Tuning bis hin zu Übersetzungsverstärkung und integrierter Optimierung – und damit branchenführende Leistung bei vergleichbarer Modellgröße erreicht. Das Modell ist recheneffizient, leicht zu implementieren und für vielfältige Anwendungsszenarien geeignet."
3172
+ },
3152
3173
  "text-embedding-3-large": {
3153
3174
  "description": "Das leistungsstärkste Vektormodell, geeignet für englische und nicht-englische Aufgaben."
3154
3175
  },
@@ -3314,4 +3335,4 @@
3314
3335
  "zai/glm-4.5v": {
3315
3336
  "description": "GLM-4.5V basiert auf dem GLM-4.5-Air Basismodell, übernimmt bewährte Techniken von GLM-4.1V-Thinking und skaliert effektiv mit einer leistungsstarken MoE-Architektur mit 106 Milliarden Parametern."
3316
3337
  }
3317
- }
3338
+ }
@@ -222,7 +222,10 @@
222
222
  "description": "Llama 4 Maverick: A large-scale model based on Mixture-of-Experts, offering an efficient expert activation strategy for superior inference performance."
223
223
  },
224
224
  "MiniMax-M1": {
225
- "description": "A brand-new self-developed inference model. Globally leading: 80K reasoning chains x 1M input, performance comparable to top overseas models."
225
+ "description": "A newly developed inference model. World-leading: 80K chain-of-thought x 1M input, delivering performance on par with top-tier international models."
226
+ },
227
+ "MiniMax-M2": {
228
+ "description": "Purpose-built for efficient coding and agent workflows."
226
229
  },
227
230
  "MiniMax-Text-01": {
228
231
  "description": "In the MiniMax-01 series of models, we have made bold innovations: for the first time, we have implemented a linear attention mechanism on a large scale, making the traditional Transformer architecture no longer the only option. This model has a parameter count of up to 456 billion, with a single activation of 45.9 billion. Its overall performance rivals that of top overseas models while efficiently handling the world's longest context of 4 million tokens, which is 32 times that of GPT-4o and 20 times that of Claude-3.5-Sonnet."
@@ -398,6 +401,15 @@
398
401
  "Qwen/Qwen3-Next-80B-A3B-Thinking": {
399
402
  "description": "Qwen3-Next-80B-A3B-Thinking is the next-generation foundational model released by Alibaba's Tongyi Qianwen team, specifically designed for complex reasoning tasks. It is based on the innovative Qwen3-Next architecture, which integrates a hybrid attention mechanism (Gated DeltaNet and Gated Attention) and a highly sparse mixture-of-experts (MoE) structure, aiming for ultimate training and inference efficiency. As a sparse model with a total of 80 billion parameters, it activates only about 3 billion parameters during inference, greatly reducing computational costs. When processing long-context tasks exceeding 32K tokens, its throughput is more than 10 times higher than the Qwen3-32B model. This \"Thinking\" version is optimized for executing challenging multi-step tasks such as mathematical proofs, code synthesis, logical analysis, and planning, and by default outputs the reasoning process in a structured \"chain-of-thought\" format. In terms of performance, it not only surpasses higher-cost models like Qwen3-32B-Thinking but also outperforms Gemini-2.5-Flash-Thinking on multiple benchmarks."
400
403
  },
404
+ "Qwen/Qwen3-Omni-30B-A3B-Captioner": {
405
+ "description": "Qwen3-Omni-30B-A3B-Captioner is a vision-language model (VLM) from Alibaba's Qwen3 series, developed by the Tongyi Qianwen team. It is specifically designed to generate high-quality, detailed, and accurate image captions. Built on a 30-billion-parameter Mixture of Experts (MoE) architecture, the model excels at understanding image content and converting it into natural, fluent textual descriptions. It demonstrates outstanding performance in capturing image details, scene understanding, object recognition, and relational reasoning, making it ideal for applications requiring precise image comprehension and caption generation."
406
+ },
407
+ "Qwen/Qwen3-Omni-30B-A3B-Instruct": {
408
+ "description": "Qwen3-Omni-30B-A3B-Instruct is part of the latest Qwen3 series from Alibaba's Tongyi Qianwen team. This Mixture of Experts (MoE) model features 30 billion total parameters and 3 billion active parameters, offering powerful performance while reducing inference costs. Trained on high-quality, diverse, and multilingual data, it boasts strong general capabilities and supports full-modality input processing—including text, images, audio, and video—enabling it to understand and generate cross-modal content."
409
+ },
410
+ "Qwen/Qwen3-Omni-30B-A3B-Thinking": {
411
+ "description": "Qwen3-Omni-30B-A3B-Thinking is the core 'Thinker' component of the Qwen3-Omni multimodal model. It is designed to handle complex chain-of-thought reasoning across multiple modalities, including text, audio, images, and video. Acting as the reasoning engine, it unifies all inputs into a shared representation space, enabling deep cross-modal understanding and sophisticated reasoning. Built on a Mixture of Experts (MoE) architecture with 30 billion total parameters and 3 billion active parameters, it balances powerful reasoning capabilities with computational efficiency."
412
+ },
401
413
  "Qwen/Qwen3-VL-235B-A22B-Instruct": {
402
414
  "description": "Qwen3-VL-235B-A22B-Instruct is a large instruction-tuned model in the Qwen3-VL series. Based on a Mixture of Experts (MoE) architecture, it offers exceptional multimodal understanding and generation capabilities. With native support for 256K context length, it is well-suited for high-concurrency, production-grade multimodal services."
403
415
  },
@@ -410,6 +422,12 @@
410
422
  "Qwen/Qwen3-VL-30B-A3B-Thinking": {
411
423
  "description": "Qwen3-VL-30B-A3B-Thinking is the reasoning-enhanced version of Qwen3-VL. It is optimized for multimodal reasoning, image-to-code tasks, and complex visual understanding. Supporting 256K context length, it offers stronger chain-of-thought capabilities."
412
424
  },
425
+ "Qwen/Qwen3-VL-32B-Instruct": {
426
+ "description": "Qwen3-VL-32B-Instruct is a vision-language model developed by Alibaba's Tongyi Qianwen team, achieving state-of-the-art (SOTA) performance across multiple vision-language benchmarks. It supports high-resolution image inputs at the megapixel level and offers robust general visual understanding, multilingual OCR, fine-grained visual localization, and visual dialogue capabilities. As part of the Qwen3 series, it is equipped to handle complex multimodal tasks and supports advanced features such as tool invocation and prefix continuation."
427
+ },
428
+ "Qwen/Qwen3-VL-32B-Thinking": {
429
+ "description": "Qwen3-VL-32B-Thinking is a specialized version of Alibaba's Qwen3 vision-language model, optimized for complex visual reasoning tasks. It features a built-in 'thinking mode' that enables the model to generate detailed intermediate reasoning steps before answering, significantly enhancing its performance on tasks requiring multi-step logic, planning, and complex inference. The model supports high-resolution image inputs at the megapixel level and offers strong general visual understanding, multilingual OCR, fine-grained visual localization, and visual dialogue capabilities, along with support for tool invocation and prefix continuation."
430
+ },
413
431
  "Qwen/Qwen3-VL-8B-Instruct": {
414
432
  "description": "Qwen3-VL-8B-Instruct is a vision-language model from the Qwen3 series, built on Qwen3-8B-Instruct and trained on a large corpus of image-text data. It excels at general visual understanding, vision-centric dialogue, and multilingual text recognition within images. It is well-suited for tasks such as visual question answering, image captioning, multimodal instruction following, and tool invocation."
415
433
  },
@@ -959,6 +977,9 @@
959
977
  "databricks/dbrx-instruct": {
960
978
  "description": "DBRX Instruct provides highly reliable instruction processing capabilities, supporting applications across multiple industries."
961
979
  },
980
+ "deepseek-ai/DeepSeek-OCR": {
981
+ "description": "DeepSeek-OCR is a vision-language model developed by DeepSeek AI, focused on Optical Character Recognition (OCR) and 'contextual optical compression.' The model explores the limits of compressing contextual information from images and efficiently processes documents into structured text formats such as Markdown. It accurately recognizes textual content within images, making it particularly suitable for document digitization, text extraction, and structured data processing applications."
982
+ },
962
983
  "deepseek-ai/DeepSeek-R1": {
963
984
  "description": "DeepSeek-R1 is a reinforcement learning (RL) driven inference model that addresses issues of repetitiveness and readability within the model. Prior to RL, DeepSeek-R1 introduced cold start data to further optimize inference performance. It performs comparably to OpenAI-o1 in mathematical, coding, and reasoning tasks, and enhances overall effectiveness through meticulously designed training methods."
964
985
  },
@@ -1670,9 +1691,6 @@
1670
1691
  "google/gemma-3-12b-it": {
1671
1692
  "description": "Gemma 3 12B is an open-source language model from Google that sets new standards in efficiency and performance."
1672
1693
  },
1673
- "google/gemma-3-1b-it": {
1674
- "description": "Gemma 3 1B is an open-source language model from Google that sets new standards in efficiency and performance."
1675
- },
1676
1694
  "google/gemma-3-27b-it": {
1677
1695
  "description": "Gemma 3 27B is an open-source language model from Google that sets new standards in efficiency and performance."
1678
1696
  },
@@ -3149,6 +3167,9 @@
3149
3167
  "tencent/Hunyuan-A13B-Instruct": {
3150
3168
  "description": "Hunyuan-A13B-Instruct has 80 billion parameters, with 13 billion activated parameters matching the performance of larger models. It supports hybrid reasoning with 'fast thinking/slow thinking'; offers stable long-text comprehension; validated by BFCL-v3 and τ-Bench, demonstrating leading agent capabilities; integrates GQA and multiple quantization formats for efficient inference."
3151
3169
  },
3170
+ "tencent/Hunyuan-MT-7B": {
3171
+ "description": "The Hunyuan Translation Model consists of the Hunyuan-MT-7B translation model and the integrated Hunyuan-MT-Chimera model. Hunyuan-MT-7B is a lightweight translation model with 7 billion parameters, designed to translate source text into target languages. It supports translation across 33 languages and 5 Chinese minority languages. In the WMT25 international machine translation competition, Hunyuan-MT-7B ranked first in 30 out of 31 language categories it participated in, showcasing its exceptional translation capabilities. Tencent's Hunyuan team has developed a comprehensive training paradigm for translation, encompassing pretraining, supervised fine-tuning, translation reinforcement, and integrated enhancement, achieving industry-leading performance among models of similar scale. The model is highly efficient and easy to deploy, making it suitable for a wide range of applications."
3172
+ },
3152
3173
  "text-embedding-3-large": {
3153
3174
  "description": "The most powerful vectorization model, suitable for both English and non-English tasks."
3154
3175
  },
@@ -3314,4 +3335,4 @@
3314
3335
  "zai/glm-4.5v": {
3315
3336
  "description": "GLM-4.5V is built on the GLM-4.5-Air foundational model, inheriting the proven techniques of GLM-4.1V-Thinking while achieving efficient scaling through a powerful 106 billion parameter MoE architecture."
3316
3337
  }
3317
- }
3338
+ }