@lobehub/lobehub 2.0.0-next.172 → 2.0.0-next.173

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ## [Version 2.0.0-next.173](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.172...v2.0.0-next.173)
6
+
7
+ <sup>Released on **2025-12-16**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Request to gpt5 series should not with `top_p`, temperature when reasoning effort is not none.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Request to gpt5 series should not with `top_p`, temperature when reasoning effort is not none, closes [#10800](https://github.com/lobehub/lobe-chat/issues/10800) ([b4ad470](https://github.com/lobehub/lobe-chat/commit/b4ad470))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ## [Version 2.0.0-next.172](https://github.com/lobehub/lobe-chat/compare/v2.0.0-next.171...v2.0.0-next.172)
6
31
 
7
32
  <sup>Released on **2025-12-15**</sup>
package/README.md CHANGED
@@ -352,7 +352,7 @@ In addition, these plugins are not limited to news aggregation, but can also ext
352
352
  | [Git OSS Stats](https://lobechat.com/discover/plugin/gitUserRepoStats)<br/><sup>By **yunwei37** on **2025-12-13**</sup> | Dynamically generate and analyze stats and history for OSS repos and developers.<br/>`github` `oss` |
353
353
  | [Questmate Forms](https://lobechat.com/discover/plugin/questmate)<br/><sup>By **questmate** on **2025-12-13**</sup> | Create forms, checklists and workflows (we call 'em Quests!) that you can assign, schedule or make public.<br/>`forms` `checklists` `productivity` |
354
354
 
355
- > 📊 Total plugins: [<kbd>**41**</kbd>](https://lobechat.com/discover/plugins)
355
+ > 📊 Total plugins: [<kbd>**39**</kbd>](https://lobechat.com/discover/plugins)
356
356
 
357
357
  <!-- PLUGIN LIST -->
358
358
 
package/README.zh-CN.md CHANGED
@@ -345,7 +345,7 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地
345
345
  | [Git OSS Stats](https://lobechat.com/discover/plugin/gitUserRepoStats)<br/><sup>By **yunwei37** on **2025-12-13**</sup> | 动态生成和分析开源软件仓库和开发者的统计数据和历史记录。<br/>`github` `oss` |
346
346
  | [Questmate Forms](https://lobechat.com/discover/plugin/questmate)<br/><sup>By **questmate** on **2025-12-13**</sup> | 创建表单、清单和工作流程(我们称之为任务!),您可以分配、安排或公开。<br/>`表单` `清单` `生产力` |
347
347
 
348
- > 📊 Total plugins: [<kbd>**41**</kbd>](https://lobechat.com/discover/plugins)
348
+ > 📊 Total plugins: [<kbd>**39**</kbd>](https://lobechat.com/discover/plugins)
349
349
 
350
350
  <!-- PLUGIN LIST -->
351
351
 
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Request to gpt5 series should not with top_p, temperature when reasoning effort is not none."
6
+ ]
7
+ },
8
+ "date": "2025-12-16",
9
+ "version": "2.0.0-next.173"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/lobehub",
3
- "version": "2.0.0-next.172",
3
+ "version": "2.0.0-next.173",
4
4
  "description": "LobeHub - an open-source,comprehensive AI Agent framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -156,8 +156,14 @@ export const pruneReasoningPayload = (payload: ChatStreamPayload) => {
156
156
  stream: shouldStream,
157
157
  // Only include stream_options when stream is enabled
158
158
  ...(shouldStream && stream_options && { stream_options }),
159
- temperature: isEffortNone ? payload.temperature : 1,
160
- top_p: isEffortNone ? payload.top_p : 1,
159
+
160
+ /**
161
+ * In openai docs: https://platform.openai.com/docs/guides/latest-model#gpt-5-2-parameter-compatibility
162
+ * Fields like `top_p`, `temperature` and `logprobs` only supported to
163
+ * GPT-5 series (e.g. 5-mini 5-nano ) when reasoning effort is none
164
+ */
165
+ temperature: isEffortNone ? payload.temperature : undefined,
166
+ top_p: isEffortNone ? payload.top_p : undefined,
161
167
  };
162
168
  };
163
169
 
@@ -86,8 +86,8 @@ describe('LobeGithubAI - custom features', () => {
86
86
 
87
87
  expect(result.model).toBe('o1-preview');
88
88
  expect(result.stream).toBe(false);
89
- expect(result.temperature).toBe(1);
90
- expect(result.top_p).toBe(1);
89
+ expect(result.temperature).toBe(undefined);
90
+ expect(result.top_p).toBe(undefined);
91
91
  expect(result.frequency_penalty).toBe(0);
92
92
  expect(result.presence_penalty).toBe(0);
93
93
  });
@@ -117,8 +117,8 @@ describe('LobeGithubAI - custom features', () => {
117
117
 
118
118
  expect(result.model).toBe('o3-preview');
119
119
  expect(result.stream).toBe(false);
120
- expect(result.temperature).toBe(1);
121
- expect(result.top_p).toBe(1);
120
+ expect(result.temperature).toBe(undefined);
121
+ expect(result.top_p).toBe(undefined);
122
122
  });
123
123
 
124
124
  it('should handle o3-mini models', () => {
@@ -19,9 +19,12 @@ import ViewSwitcher, { ViewMode } from './ViewSwitcher';
19
19
  export const List = memo(() => {
20
20
  const { t } = useTranslation('file');
21
21
 
22
- const useFetchFilesAndKnowledgeBases = useAgentStore((s) => s.useFetchFilesAndKnowledgeBases);
22
+ const [useFetchFilesAndKnowledgeBases, activeAgentId] = useAgentStore((s) => [
23
+ s.useFetchFilesAndKnowledgeBases,
24
+ s.activeAgentId,
25
+ ]);
23
26
 
24
- const { isLoading, error, data } = useFetchFilesAndKnowledgeBases();
27
+ const { isLoading, error, data } = useFetchFilesAndKnowledgeBases(activeAgentId);
25
28
 
26
29
  const [columnCount, setColumnCount] = useState(2);
27
30
  const [isTransitioning, setIsTransitioning] = useState(false);
@@ -49,7 +49,7 @@ export interface AgentChatAction {
49
49
  updateAgentChatConfig: (config: Partial<LobeAgentChatConfig>) => Promise<void>;
50
50
  updateAgentConfig: (config: PartialDeep<LobeAgentConfig>) => Promise<void>;
51
51
  useFetchAgentConfig: (isLogin: boolean | undefined, id: string) => SWRResponse<LobeAgentConfig>;
52
- useFetchFilesAndKnowledgeBases: () => SWRResponse<KnowledgeItem[]>;
52
+ useFetchFilesAndKnowledgeBases: (agentId?: string) => SWRResponse<KnowledgeItem[]>;
53
53
  useInitInboxAgentStore: (
54
54
  isLogin: boolean | undefined,
55
55
  defaultAgentConfig?: PartialDeep<LobeAgentConfig>,
@@ -180,9 +180,9 @@ export const createChatSlice: StateCreator<
180
180
  },
181
181
  },
182
182
  ),
183
- useFetchFilesAndKnowledgeBases: () => {
183
+ useFetchFilesAndKnowledgeBases: (agentId) => {
184
184
  return useClientDataSWR<KnowledgeItem[]>(
185
- [FETCH_AGENT_KNOWLEDGE_KEY, get().activeAgentId],
185
+ agentId ? [FETCH_AGENT_KNOWLEDGE_KEY, agentId] : null,
186
186
  ([, id]: string[]) => agentService.getFilesAndKnowledgeBases(id),
187
187
  {
188
188
  fallbackData: [],