@lobehub/chat 1.15.9 → 1.15.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,40 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.15.11](https://github.com/lobehub/lobe-chat/compare/v1.15.10...v1.15.11)
6
+
7
+ <sup>Released on **2024-09-04**</sup>
8
+
9
+ <br/>
10
+
11
+ <details>
12
+ <summary><kbd>Improvements and Fixes</kbd></summary>
13
+
14
+ </details>
15
+
16
+ <div align="right">
17
+
18
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
19
+
20
+ </div>
21
+
22
+ ### [Version 1.15.10](https://github.com/lobehub/lobe-chat/compare/v1.15.9...v1.15.10)
23
+
24
+ <sup>Released on **2024-09-03**</sup>
25
+
26
+ <br/>
27
+
28
+ <details>
29
+ <summary><kbd>Improvements and Fixes</kbd></summary>
30
+
31
+ </details>
32
+
33
+ <div align="right">
34
+
35
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
36
+
37
+ </div>
38
+
5
39
  ### [Version 1.15.9](https://github.com/lobehub/lobe-chat/compare/v1.15.8...v1.15.9)
6
40
 
7
41
  <sup>Released on **2024-09-03**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.15.9",
3
+ "version": "1.15.11",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -56,6 +56,7 @@ export const useStyles = createStyles(
56
56
  border-radius: unset;
57
57
  `,
58
58
  }) as Partial<{
59
+ // eslint-disable-next-line unused-imports/no-unused-vars
59
60
  [k in keyof ElementsConfig]: any;
60
61
  }>,
61
62
  );
@@ -0,0 +1,32 @@
1
+ // @vitest-environment edge-runtime
2
+ import { describe, expect, it } from 'vitest';
3
+
4
+ import { POST } from './route';
5
+
6
+ describe('tokenizer Route', () => {
7
+ it('count hello world', async () => {
8
+ const txt = 'Hello, world!';
9
+ const request = new Request('https://test.com', {
10
+ method: 'POST',
11
+ body: txt,
12
+ });
13
+
14
+ const response = await POST(request);
15
+
16
+ const data = await response.json();
17
+ expect(data.count).toEqual(4);
18
+ });
19
+
20
+ it('count Chinese', async () => {
21
+ const txt = '今天天气真好';
22
+ const request = new Request('https://test.com', {
23
+ method: 'POST',
24
+ body: txt,
25
+ });
26
+
27
+ const response = await POST(request);
28
+
29
+ const data = await response.json();
30
+ expect(data.count).toEqual(5);
31
+ });
32
+ });
@@ -0,0 +1,8 @@
1
+ import { encode } from 'gpt-tokenizer/encoding/o200k_base';
2
+ import { NextResponse } from 'next/server';
3
+
4
+ export const POST = async (req: Request) => {
5
+ const str = await req.text();
6
+
7
+ return NextResponse.json({ count: encode(str).length });
8
+ };
@@ -170,7 +170,7 @@ export class SessionModel {
170
170
 
171
171
  if (!result) return;
172
172
 
173
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
173
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars,unused-imports/no-unused-vars
174
174
  const { agent, clientId, ...session } = result;
175
175
  const sessionId = this.genId();
176
176
 
@@ -1,20 +1,32 @@
1
- import { startTransition, useEffect, useState } from 'react';
1
+ import { debounce } from 'lodash-es';
2
+ import { startTransition, useCallback, useEffect, useState } from 'react';
2
3
 
3
4
  import { encodeAsync } from '@/utils/tokenizer';
4
5
 
5
6
  export const useTokenCount = (input: string = '') => {
6
7
  const [value, setNum] = useState(0);
7
8
 
8
- useEffect(() => {
9
- startTransition(() => {
10
- encodeAsync(input || '')
9
+ const debouncedEncode = useCallback(
10
+ debounce((text: string) => {
11
+ encodeAsync(text)
11
12
  .then(setNum)
12
13
  .catch(() => {
13
- // 兜底采用字符数
14
- setNum(input.length);
14
+ setNum(text.length);
15
15
  });
16
+ }, 300),
17
+ [],
18
+ );
19
+
20
+ useEffect(() => {
21
+ startTransition(() => {
22
+ debouncedEncode(input || '');
16
23
  });
17
- }, [input]);
24
+
25
+ // 清理函数
26
+ return () => {
27
+ debouncedEncode.cancel();
28
+ };
29
+ }, [input, debouncedEncode]);
18
30
 
19
31
  return value;
20
32
  };
@@ -89,6 +89,7 @@ export const useStyles = createStyles(
89
89
  order: -1;
90
90
  `,
91
91
  }) as Partial<{
92
+ // eslint-disable-next-line unused-imports/no-unused-vars
92
93
  [k in keyof ElementsConfig]: any;
93
94
  }>,
94
95
  );
@@ -62,7 +62,9 @@ export const createContext = async (request: NextRequest): Promise<Context> => {
62
62
  userId = session.user.id;
63
63
  }
64
64
  return createContextInner({ authorizationHeader: authorization, nextAuth: auth, userId });
65
- } catch {}
65
+ } catch (e) {
66
+ console.error('next auth err', e);
67
+ }
66
68
  }
67
69
 
68
70
  return createContextInner({ authorizationHeader: authorization, userId });
@@ -1,5 +1,5 @@
1
1
  /**
2
- * This file contains the root router of Lobe Chat tRPC-backend
2
+ * This file contains the edge router of Lobe Chat tRPC-backend
3
3
  */
4
4
  import { publicProcedure, router } from '@/libs/trpc';
5
5
 
@@ -50,7 +50,7 @@ export class ClientService implements IUserService {
50
50
  await this.preferenceStorage.saveToLocalStorage(preference);
51
51
  }
52
52
 
53
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
53
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars,unused-imports/no-unused-vars
54
54
  async updateGuide(guide: Partial<UserGuide>) {
55
55
  throw new Error('Method not implemented.');
56
56
  }
@@ -0,0 +1,7 @@
1
+ declare module '*.worker.ts' {
2
+ class WebpackWorker extends Worker {
3
+ constructor();
4
+ }
5
+
6
+ export default WebpackWorker;
7
+ }
@@ -2,6 +2,7 @@ import { afterEach, describe, expect, it, vi } from 'vitest';
2
2
 
3
3
  import { MESSAGE_CANCEL_FLAT } from '@/const/message';
4
4
  import { ChatMessageError } from '@/types/message';
5
+ import { sleep } from '@/utils/sleep';
5
6
 
6
7
  import { FetchEventSourceInit } from '../fetchEventSource';
7
8
  import { fetchEventSource } from '../fetchEventSource';
@@ -127,9 +128,10 @@ describe('fetchSSE', () => {
127
128
  const mockOnFinish = vi.fn();
128
129
 
129
130
  (fetchEventSource as any).mockImplementationOnce(
130
- (url: string, options: FetchEventSourceInit) => {
131
+ async (url: string, options: FetchEventSourceInit) => {
131
132
  options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
132
133
  options.onmessage!({ event: 'text', data: JSON.stringify('Hello') } as any);
134
+ await sleep(100);
133
135
  options.onmessage!({ event: 'text', data: JSON.stringify(' World') } as any);
134
136
  },
135
137
  );
@@ -139,8 +141,9 @@ describe('fetchSSE', () => {
139
141
  onFinish: mockOnFinish,
140
142
  });
141
143
 
142
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'He', type: 'text' });
143
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'llo World', type: 'text' });
144
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'text' });
145
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'text' });
146
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' World', type: 'text' });
144
147
  // more assertions for each character...
145
148
  expect(mockOnFinish).toHaveBeenCalledWith('Hello World', {
146
149
  observationId: null,
@@ -184,7 +187,7 @@ describe('fetchSSE', () => {
184
187
 
185
188
  // TODO: need to check whether the `aarg1` is correct
186
189
  expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, {
187
- isAnimationActives: [true],
190
+ isAnimationActives: [true, true],
188
191
  tool_calls: [
189
192
  { id: '1', type: 'function', function: { name: 'func1', arguments: 'aarg1' } },
190
193
  { function: { arguments: 'aarg2', name: 'func2' }, id: '2', type: 'function' },
@@ -218,9 +221,10 @@ describe('fetchSSE', () => {
218
221
  const abortController = new AbortController();
219
222
 
220
223
  (fetchEventSource as any).mockImplementationOnce(
221
- (url: string, options: FetchEventSourceInit) => {
224
+ async (url: string, options: FetchEventSourceInit) => {
222
225
  options.onopen!({ clone: () => ({ ok: true, headers: new Headers() }) } as any);
223
226
  options.onmessage!({ event: 'text', data: JSON.stringify('Hello') } as any);
227
+ await sleep(100);
224
228
  abortController.abort();
225
229
  options.onmessage!({ event: 'text', data: JSON.stringify(' World') } as any);
226
230
  },
@@ -232,8 +236,9 @@ describe('fetchSSE', () => {
232
236
  signal: abortController.signal,
233
237
  });
234
238
 
235
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'He', type: 'text' });
236
- expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'llo World', type: 'text' });
239
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(1, { text: 'Hell', type: 'text' });
240
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(2, { text: 'o', type: 'text' });
241
+ expect(mockOnMessageHandle).toHaveBeenNthCalledWith(3, { text: ' World', type: 'text' });
237
242
 
238
243
  expect(mockOnFinish).toHaveBeenCalledWith('Hello World', {
239
244
  type: 'done',
@@ -44,27 +44,29 @@ export interface FetchSSEOptions {
44
44
  smoothing?: boolean;
45
45
  }
46
46
 
47
+ const START_ANIMATION_SPEED = 4;
48
+
49
+ const END_ANIMATION_SPEED = 15;
50
+
47
51
  const createSmoothMessage = (params: { onTextUpdate: (delta: string, text: string) => void }) => {
48
52
  let buffer = '';
49
53
  // why use queue: https://shareg.pt/GLBrjpK
50
54
  let outputQueue: string[] = [];
51
-
52
- // eslint-disable-next-line no-undef
53
- let animationTimeoutId: NodeJS.Timeout | null = null;
54
55
  let isAnimationActive = false;
56
+ let animationFrameId: number | null = null;
55
57
 
56
58
  // when you need to stop the animation, call this function
57
59
  const stopAnimation = () => {
58
60
  isAnimationActive = false;
59
- if (animationTimeoutId !== null) {
60
- clearTimeout(animationTimeoutId);
61
- animationTimeoutId = null;
61
+ if (animationFrameId !== null) {
62
+ cancelAnimationFrame(animationFrameId);
63
+ animationFrameId = null;
62
64
  }
63
65
  };
64
66
 
65
67
  // define startAnimation function to display the text in buffer smooth
66
68
  // when you need to start the animation, call this function
67
- const startAnimation = (speed = 2) =>
69
+ const startAnimation = (speed = START_ANIMATION_SPEED) =>
68
70
  new Promise<void>((resolve) => {
69
71
  if (isAnimationActive) {
70
72
  resolve();
@@ -76,32 +78,33 @@ const createSmoothMessage = (params: { onTextUpdate: (delta: string, text: strin
76
78
  const updateText = () => {
77
79
  // 如果动画已经不再激活,则停止更新文本
78
80
  if (!isAnimationActive) {
79
- clearTimeout(animationTimeoutId!);
80
- animationTimeoutId = null;
81
+ cancelAnimationFrame(animationFrameId!);
82
+ animationFrameId = null;
81
83
  resolve();
84
+ return;
82
85
  }
83
86
 
84
87
  // 如果还有文本没有显示
85
88
  // 检查队列中是否有字符待显示
86
89
  if (outputQueue.length > 0) {
87
- // 从队列中获取前两个字符(如果存在)
90
+ // 从队列中获取前 n 个字符(如果存在)
88
91
  const charsToAdd = outputQueue.splice(0, speed).join('');
89
92
  buffer += charsToAdd;
90
93
 
91
94
  // 更新消息内容,这里可能需要结合实际情况调整
92
95
  params.onTextUpdate(charsToAdd, buffer);
93
-
94
- // 设置下一个字符的延迟
95
- animationTimeoutId = setTimeout(updateText, 16); // 16 毫秒的延迟模拟打字机效果
96
96
  } else {
97
97
  // 当所有字符都显示完毕时,清除动画状态
98
98
  isAnimationActive = false;
99
- animationTimeoutId = null;
99
+ animationFrameId = null;
100
100
  resolve();
101
+ return;
101
102
  }
103
+
104
+ animationFrameId = requestAnimationFrame(updateText);
102
105
  };
103
106
 
104
- updateText();
107
+ animationFrameId = requestAnimationFrame(updateText);
105
108
  });
106
109
 
107
110
  const pushToQueue = (text: string) => {
@@ -124,20 +127,19 @@ const createSmoothToolCalls = (params: {
124
127
 
125
128
  // 为每个 tool_call 维护一个输出队列和动画控制器
126
129
 
127
- // eslint-disable-next-line no-undef
128
- const animationTimeoutIds: (NodeJS.Timeout | null)[] = [];
129
130
  const outputQueues: string[][] = [];
130
131
  const isAnimationActives: boolean[] = [];
132
+ const animationFrameIds: (number | null)[] = [];
131
133
 
132
134
  const stopAnimation = (index: number) => {
133
135
  isAnimationActives[index] = false;
134
- if (animationTimeoutIds[index] !== null) {
135
- clearTimeout(animationTimeoutIds[index]!);
136
- animationTimeoutIds[index] = null;
136
+ if (animationFrameIds[index] !== null) {
137
+ cancelAnimationFrame(animationFrameIds[index]!);
138
+ animationFrameIds[index] = null;
137
139
  }
138
140
  };
139
141
 
140
- const startAnimation = (index: number, speed = 2) =>
142
+ const startAnimation = (index: number, speed = START_ANIMATION_SPEED) =>
141
143
  new Promise<void>((resolve) => {
142
144
  if (isAnimationActives[index]) {
143
145
  resolve();
@@ -149,6 +151,7 @@ const createSmoothToolCalls = (params: {
149
151
  const updateToolCall = () => {
150
152
  if (!isAnimationActives[index]) {
151
153
  resolve();
154
+ return;
152
155
  }
153
156
 
154
157
  if (outputQueues[index].length > 0) {
@@ -163,15 +166,15 @@ const createSmoothToolCalls = (params: {
163
166
  params.onToolCallsUpdate(toolCallsBuffer, [...isAnimationActives]);
164
167
  }
165
168
 
166
- animationTimeoutIds[index] = setTimeout(updateToolCall, 16);
169
+ animationFrameIds[index] = requestAnimationFrame(() => updateToolCall());
167
170
  } else {
168
171
  isAnimationActives[index] = false;
169
- animationTimeoutIds[index] = null;
172
+ animationFrameIds[index] = null;
170
173
  resolve();
171
174
  }
172
175
  };
173
176
 
174
- updateToolCall();
177
+ animationFrameIds[index] = requestAnimationFrame(() => updateToolCall());
175
178
  });
176
179
 
177
180
  const pushToQueue = (toolCallChunks: MessageToolCallChunk[]) => {
@@ -184,14 +187,14 @@ const createSmoothToolCalls = (params: {
184
187
  if (!outputQueues[chunk.index]) {
185
188
  outputQueues[chunk.index] = [];
186
189
  isAnimationActives[chunk.index] = false;
187
- animationTimeoutIds[chunk.index] = null;
190
+ animationFrameIds[chunk.index] = null;
188
191
  }
189
192
 
190
193
  outputQueues[chunk.index].push(...(chunk.function?.arguments || '').split(''));
191
194
  });
192
195
  };
193
196
 
194
- const startAnimations = async (speed = 2) => {
197
+ const startAnimations = async (speed = START_ANIMATION_SPEED) => {
195
198
  const pools = toolCallsBuffer.map(async (_, index) => {
196
199
  if (outputQueues[index].length > 0 && !isAnimationActives[index]) {
197
200
  await startAnimation(index, speed);
@@ -365,11 +368,11 @@ export const fetchSSE = async (url: string, options: RequestInit & FetchSSEOptio
365
368
  const observationId = response.headers.get(LOBE_CHAT_OBSERVATION_ID);
366
369
 
367
370
  if (textController.isTokenRemain()) {
368
- await textController.startAnimation(15);
371
+ await textController.startAnimation(END_ANIMATION_SPEED);
369
372
  }
370
373
 
371
374
  if (toolCallsController.isTokenRemain()) {
372
- await toolCallsController.startAnimations(15);
375
+ await toolCallsController.startAnimations(END_ANIMATION_SPEED);
373
376
  }
374
377
 
375
378
  await options?.onFinish?.(output, { observationId, toolCalls, traceId, type: finishedType });
@@ -0,0 +1,35 @@
1
+ let worker: Worker | null = null;
2
+
3
+ const getWorker = () => {
4
+ if (!worker && typeof Worker !== 'undefined') {
5
+ worker = new Worker(new URL('tokenizer.worker.ts', import.meta.url));
6
+ }
7
+ return worker;
8
+ };
9
+
10
+ export const clientEncodeAsync = (str: string): Promise<number> =>
11
+ new Promise((resolve, reject) => {
12
+ const worker = getWorker();
13
+
14
+ if (!worker) {
15
+ // 如果 WebWorker 不可用,回退到字符串计算
16
+ resolve(str.length);
17
+ return;
18
+ }
19
+
20
+ const id = Date.now().toString();
21
+
22
+ const handleMessage = (event: MessageEvent) => {
23
+ if (event.data.id === id) {
24
+ worker.removeEventListener('message', handleMessage);
25
+ if (event.data.error) {
26
+ reject(new Error(event.data.error));
27
+ } else {
28
+ resolve(event.data.result);
29
+ }
30
+ }
31
+ };
32
+
33
+ worker.addEventListener('message', handleMessage);
34
+ worker.postMessage({ id, str });
35
+ });
@@ -0,0 +1,15 @@
1
+ export const encodeAsync = async (str: string): Promise<number> => {
2
+ if (str.length === 0) return 0;
3
+
4
+ // 50_000 is the limit of the client
5
+ // if the string is longer than 100_000, we will use the server
6
+ if (str.length <= 50_000) {
7
+ const { clientEncodeAsync } = await import('./client');
8
+
9
+ return await clientEncodeAsync(str);
10
+ } else {
11
+ const { serverEncodeAsync } = await import('./server');
12
+
13
+ return await serverEncodeAsync(str);
14
+ }
15
+ };
@@ -0,0 +1,11 @@
1
+ export const serverEncodeAsync = async (str: string): Promise<number> => {
2
+ try {
3
+ const res = await fetch('/webapi/tokenizer', { body: str, method: 'POST' });
4
+ const data = await res.json();
5
+
6
+ return data.count;
7
+ } catch (e) {
8
+ console.error('serverEncodeAsync:', e);
9
+ return str.length;
10
+ }
11
+ };
@@ -0,0 +1,14 @@
1
+ addEventListener('message', async (event) => {
2
+ const { id, str } = event.data;
3
+ try {
4
+ const { encode } = await import('gpt-tokenizer');
5
+
6
+ console.time('client tokenizer');
7
+ const tokenCount = encode(str).length;
8
+ console.timeEnd('client tokenizer');
9
+
10
+ postMessage({ id, result: tokenCount });
11
+ } catch (error) {
12
+ postMessage({ error: (error as Error).message, id });
13
+ }
14
+ });
@@ -1,5 +0,0 @@
1
- export const encodeAsync = async (str: string) => {
2
- const { encode } = await import('gpt-tokenizer');
3
-
4
- return encode(str).length;
5
- };