@lobehub/chat 1.97.10 → 1.97.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -182,6 +182,13 @@
182
182
  "when": 1749309388370,
183
183
  "tag": "0025_add_provider_config",
184
184
  "breakpoints": true
185
+ },
186
+ {
187
+ "idx": 26,
188
+ "version": "7",
189
+ "when": 1752212281564,
190
+ "tag": "0026_add_autovacuum_tuning",
191
+ "breakpoints": true
185
192
  }
186
193
  ],
187
194
  "version": "6"
@@ -3,7 +3,6 @@ import {
3
3
  InvokeModelCommand,
4
4
  InvokeModelWithResponseStreamCommand,
5
5
  } from '@aws-sdk/client-bedrock-runtime';
6
- import { experimental_buildLlama2Prompt } from 'ai/prompts';
7
6
 
8
7
  import { LobeRuntimeAI } from '../BaseAI';
9
8
  import { AgentRuntimeErrorType } from '../error';
@@ -25,6 +24,38 @@ import {
25
24
  createBedrockStream,
26
25
  } from '../utils/streams';
27
26
 
27
+ /**
28
+ * A prompt constructor for HuggingFace LLama 2 chat models.
29
+ * Does not support `function` messages.
30
+ * @see https://huggingface.co/meta-llama/Llama-2-70b-chat-hf and https://huggingface.co/blog/llama2#how-to-prompt-llama-2
31
+ */
32
+ export function experimental_buildLlama2Prompt(messages: { content: string; role: string }[]) {
33
+ const startPrompt = `<s>[INST] `;
34
+ const endPrompt = ` [/INST]`;
35
+ const conversation = messages.map(({ content, role }, index) => {
36
+ switch (role) {
37
+ case 'user': {
38
+ return content.trim();
39
+ }
40
+ case 'assistant': {
41
+ return ` [/INST] ${content}</s><s>[INST] `;
42
+ }
43
+ case 'function': {
44
+ throw new Error('Llama 2 does not support function calls.');
45
+ }
46
+ default: {
47
+ if (role === 'system' && index === 0) {
48
+ return `<<SYS>>\n${content}\n<</SYS>>\n\n`;
49
+ } else {
50
+ throw new Error(`Invalid message role: ${role}`);
51
+ }
52
+ }
53
+ }
54
+ });
55
+
56
+ return startPrompt + conversation.join('') + endPrompt;
57
+ }
58
+
28
59
  export interface LobeBedrockAIParams {
29
60
  accessKeyId?: string;
30
61
  accessKeySecret?: string;
@@ -2,7 +2,8 @@ import {
2
2
  InvokeModelWithResponseStreamResponse,
3
3
  ResponseStream,
4
4
  } from '@aws-sdk/client-bedrock-runtime';
5
- import { readableFromAsyncIterable } from 'ai';
5
+
6
+ import { readableFromAsyncIterable } from '../protocol';
6
7
 
7
8
  const chatStreamable = async function* (stream: AsyncIterable<ResponseStream>) {
8
9
  for await (const response of stream) {
@@ -108,6 +108,22 @@ const chatStreamable = async function* <T>(stream: AsyncIterable<T>) {
108
108
  };
109
109
 
110
110
  const ERROR_CHUNK_PREFIX = '%FIRST_CHUNK_ERROR%: ';
111
+
112
+ export function readableFromAsyncIterable<T>(iterable: AsyncIterable<T>) {
113
+ let it = iterable[Symbol.asyncIterator]();
114
+ return new ReadableStream<T>({
115
+ async cancel(reason) {
116
+ await it.return?.(reason);
117
+ },
118
+
119
+ async pull(controller) {
120
+ const { done, value } = await it.next();
121
+ if (done) controller.close();
122
+ else controller.enqueue(value);
123
+ },
124
+ });
125
+ }
126
+
111
127
  // make the response to the streamable format
112
128
  export const convertIterableToStream = <T>(stream: AsyncIterable<T>) => {
113
129
  const iterable = chatStreamable(stream);
@@ -1,6 +1,8 @@
1
- import { ChatCompletionContentPartText } from 'ai/prompts';
2
1
  import OpenAI from 'openai';
3
- import { ChatCompletionContentPart } from 'openai/resources/index.mjs';
2
+ import {
3
+ ChatCompletionContentPart,
4
+ ChatCompletionContentPartText,
5
+ } from 'openai/resources/index.mjs';
4
6
  import type { Stream } from 'openai/streaming';
5
7
 
6
8
  import { ChatStreamCallbacks } from '../../types';
@@ -9,9 +9,12 @@ export interface XAIModelCard {
9
9
 
10
10
  export const GrokReasoningModels = new Set([
11
11
  'grok-3-mini',
12
- 'grok-4-0709',
12
+ 'grok-4',
13
13
  ]);
14
14
 
15
+ export const isGrokReasoningModel = (model: string) =>
16
+ Array.from(GrokReasoningModels).some((id) => model.includes(id));
17
+
15
18
  export const LobeXAI = createOpenAICompatibleRuntime({
16
19
  baseURL: 'https://api.x.ai/v1',
17
20
  chatCompletion: {
@@ -20,9 +23,9 @@ export const LobeXAI = createOpenAICompatibleRuntime({
20
23
 
21
24
  return {
22
25
  ...rest,
23
- frequency_penalty: GrokReasoningModels.has(model) ? undefined : frequency_penalty,
26
+ frequency_penalty: isGrokReasoningModel(model) ? undefined : frequency_penalty,
24
27
  model,
25
- presence_penalty: GrokReasoningModels.has(model) ? undefined : presence_penalty,
28
+ presence_penalty: isGrokReasoningModel(model) ? undefined : presence_penalty,
26
29
  stream: true,
27
30
  ...(enabledSearch && {
28
31
  search_parameters: {
@@ -201,6 +201,11 @@ const isSendButtonDisabledByMessage = (s: ChatStoreState) =>
201
201
  // 4. when the message is in RAG flow
202
202
  isInRAGFlow(s);
203
203
 
204
+ const inboxActiveTopicMessages = (state: ChatStoreState) => {
205
+ const activeTopicId = state.activeTopicId;
206
+ return state.messagesMap[messageMapKey(INBOX_SESSION_ID, activeTopicId)] || [];
207
+ };
208
+
204
209
  export const chatSelectors = {
205
210
  activeBaseChats,
206
211
  activeBaseChatsWithoutTool,
@@ -213,6 +218,7 @@ export const chatSelectors = {
213
218
  getMessageById,
214
219
  getMessageByToolCallId,
215
220
  getTraceIdByMessageId,
221
+ inboxActiveTopicMessages,
216
222
  isAIGenerating,
217
223
  isCreatingMessage,
218
224
  isCurrentChatLoaded,