@lobehub/chat 1.6.3 → 1.6.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.6.5](https://github.com/lobehub/lobe-chat/compare/v1.6.4...v1.6.5)
6
+
7
+ <sup>Released on **2024-07-22**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Content lost unexpectedly on Qwen provider when `finish_reason` is `stop`.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Content lost unexpectedly on Qwen provider when `finish_reason` is `stop`, closes [#3252](https://github.com/lobehub/lobe-chat/issues/3252) ([d35c5b0](https://github.com/lobehub/lobe-chat/commit/d35c5b0))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.6.4](https://github.com/lobehub/lobe-chat/compare/v1.6.3...v1.6.4)
31
+
32
+ <sup>Released on **2024-07-21**</sup>
33
+
34
+ #### ♻ Code Refactoring
35
+
36
+ - **misc**: Add trpc query client with react-query.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Code refactoring
44
+
45
+ - **misc**: Add trpc query client with react-query, closes [#3282](https://github.com/lobehub/lobe-chat/issues/3282) ([013ee54](https://github.com/lobehub/lobe-chat/commit/013ee54))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.6.3](https://github.com/lobehub/lobe-chat/compare/v1.6.2...v1.6.3)
6
56
 
7
57
  <sup>Released on **2024-07-21**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.6.3",
3
+ "version": "1.6.5",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -121,8 +121,10 @@
121
121
  "@next/third-parties": "^14.2.4",
122
122
  "@sentry/nextjs": "^7.118.0",
123
123
  "@t3-oss/env-nextjs": "^0.10.1",
124
+ "@tanstack/react-query": "^5.51.11",
124
125
  "@trpc/client": "next",
125
126
  "@trpc/next": "next",
127
+ "@trpc/react-query": "next",
126
128
  "@trpc/server": "next",
127
129
  "@vercel/analytics": "^1.3.1",
128
130
  "@vercel/speed-insights": "^1.0.12",
@@ -136,7 +138,7 @@
136
138
  "debug": "^4.3.5",
137
139
  "dexie": "^3.2.7",
138
140
  "diff": "^5.2.0",
139
- "drizzle-orm": "^0.31.2",
141
+ "drizzle-orm": "^0.32.0",
140
142
  "drizzle-zod": "^0.5.1",
141
143
  "fast-deep-equal": "^3.1.3",
142
144
  "gpt-tokenizer": "^2.1.2",
@@ -239,7 +241,7 @@
239
241
  "consola": "^3.2.3",
240
242
  "dotenv": "^16.4.5",
241
243
  "dpdm": "^3.14.0",
242
- "drizzle-kit": "^0.22.8",
244
+ "drizzle-kit": "^0.23.0",
243
245
  "eslint": "^8.57.0",
244
246
  "eslint-plugin-mdx": "^2.3.4",
245
247
  "fake-indexeddb": "^6.0.0",
@@ -0,0 +1,18 @@
1
+ 'use client';
2
+
3
+ import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
4
+ import React, { PropsWithChildren, useState } from 'react';
5
+
6
+ import { lambdaQuery, lambdaQueryClient } from '@/libs/trpc/client';
7
+
8
+ const QueryProvider = ({ children }: PropsWithChildren) => {
9
+ const [queryClient] = useState(() => new QueryClient());
10
+
11
+ return (
12
+ <lambdaQuery.Provider client={lambdaQueryClient} queryClient={queryClient}>
13
+ <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
14
+ </lambdaQuery.Provider>
15
+ );
16
+ };
17
+
18
+ export default QueryProvider;
@@ -19,6 +19,7 @@ import { isMobileDevice } from '@/utils/responsive';
19
19
 
20
20
  import AppTheme from './AppTheme';
21
21
  import Locale from './Locale';
22
+ import QueryProvider from './Query';
22
23
  import StoreInitialization from './StoreInitialization';
23
24
  import StyleRegistry from './StyleRegistry';
24
25
 
@@ -87,7 +88,7 @@ const GlobalLayout = async ({ children }: PropsWithChildren) => {
87
88
  isMobile={isMobile}
88
89
  serverConfig={serverConfig}
89
90
  >
90
- {children}
91
+ <QueryProvider>{children}</QueryProvider>
91
92
  <StoreInitialization />
92
93
  </ServerConfigStoreProvider>
93
94
  <DebugUI />
@@ -3,9 +3,7 @@ import OpenAI from 'openai';
3
3
  import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
4
 
5
5
  import Qwen from '@/config/modelProviders/qwen';
6
- import { LobeOpenAICompatibleRuntime } from '@/libs/agent-runtime';
7
- import { ModelProvider } from '@/libs/agent-runtime';
8
- import { AgentRuntimeErrorType } from '@/libs/agent-runtime';
6
+ import { AgentRuntimeErrorType, ModelProvider } from '@/libs/agent-runtime';
9
7
 
10
8
  import * as debugStreamModule from '../utils/debugStream';
11
9
  import { LobeQwenAI } from './index';
@@ -134,19 +132,23 @@ describe('LobeQwenAI', () => {
134
132
  });
135
133
 
136
134
  const decoder = new TextDecoder();
137
-
138
135
  const reader = result.body!.getReader();
139
- expect(decoder.decode((await reader.read()).value)).toContain(
140
- 'id: chatcmpl-fc539f49-51a8-94be-8061\n',
141
- );
142
- expect(decoder.decode((await reader.read()).value)).toContain('event: text\n');
143
- expect(decoder.decode((await reader.read()).value)).toContain('data: "Hello"\n\n');
136
+ const stream: string[] = [];
144
137
 
145
- expect(decoder.decode((await reader.read()).value)).toContain(
138
+ while (true) {
139
+ const { value, done } = await reader.read();
140
+ if (done) break;
141
+ stream.push(decoder.decode(value));
142
+ }
143
+
144
+ expect(stream).toEqual([
146
145
  'id: chatcmpl-fc539f49-51a8-94be-8061\n',
147
- );
148
- expect(decoder.decode((await reader.read()).value)).toContain('event: stop\n');
149
- expect(decoder.decode((await reader.read()).value)).toContain('');
146
+ 'event: text\n',
147
+ 'data: "Hello"\n\n',
148
+ 'id: chatcmpl-fc539f49-51a8-94be-8061\n',
149
+ 'event: stop\n',
150
+ 'data: "stop"\n\n',
151
+ ]);
150
152
 
151
153
  expect((await reader.read()).done).toBe(true);
152
154
  });
@@ -123,6 +123,6 @@ export class LobeQwenAI extends LobeOpenAICompatibleRuntime implements LobeRunti
123
123
  'result_format',
124
124
  'top_p',
125
125
  )
126
- : params;
126
+ : omit(params, 'frequency_penalty');
127
127
  }
128
128
  }
@@ -373,15 +373,23 @@ describe('LobeOpenAICompatibleFactory', () => {
373
373
  });
374
374
 
375
375
  const decoder = new TextDecoder();
376
-
377
376
  const reader = result.body!.getReader();
378
- expect(decoder.decode((await reader.read()).value)).toContain('id: a\n');
379
- expect(decoder.decode((await reader.read()).value)).toContain('event: text\n');
380
- expect(decoder.decode((await reader.read()).value)).toContain('data: "Hello"\n\n');
377
+ const stream: string[] = [];
378
+
379
+ while (true) {
380
+ const { value, done } = await reader.read();
381
+ if (done) break;
382
+ stream.push(decoder.decode(value));
383
+ }
381
384
 
382
- expect(decoder.decode((await reader.read()).value)).toContain('id: a\n');
383
- expect(decoder.decode((await reader.read()).value)).toContain('event: text\n');
384
- expect(decoder.decode((await reader.read()).value)).toContain('');
385
+ expect(stream).toEqual([
386
+ 'id: a\n',
387
+ 'event: text\n',
388
+ 'data: "Hello"\n\n',
389
+ 'id: a\n',
390
+ 'event: stop\n',
391
+ 'data: "stop"\n\n',
392
+ ]);
385
393
 
386
394
  expect((await reader.read()).done).toBe(true);
387
395
  });
@@ -89,7 +89,7 @@ export function transformResponseToStream(data: OpenAI.ChatCompletion) {
89
89
  controller.enqueue({
90
90
  choices: data.choices.map((choice: OpenAI.ChatCompletion.Choice) => ({
91
91
  delta: {
92
- content: choice.message.content,
92
+ content: null,
93
93
  role: choice.message.role,
94
94
  },
95
95
  finish_reason: choice.finish_reason,
@@ -62,14 +62,14 @@ export const transformQwenStream = (chunk: OpenAI.ChatCompletionChunk): StreamPr
62
62
  } as StreamProtocolToolCallChunk;
63
63
  }
64
64
 
65
- if (item.finish_reason) {
66
- return { data: item.finish_reason, id: chunk.id, type: 'stop' };
67
- }
68
-
69
65
  if (typeof item.delta?.content === 'string') {
70
66
  return { data: item.delta.content, id: chunk.id, type: 'text' };
71
67
  }
72
68
 
69
+ if (item.finish_reason) {
70
+ return { data: item.finish_reason, id: chunk.id, type: 'stop' };
71
+ }
72
+
73
73
  if (item.delta?.content === null) {
74
74
  return { data: item.delta, id: chunk.id, type: 'data' };
75
75
  }
@@ -0,0 +1,20 @@
1
+ import { createTRPCClient, httpBatchLink } from '@trpc/client';
2
+ import superjson from 'superjson';
3
+
4
+ import type { EdgeRouter } from '@/server/routers/edge';
5
+ import { withBasePath } from '@/utils/basePath';
6
+
7
+ export const edgeClient = createTRPCClient<EdgeRouter>({
8
+ links: [
9
+ httpBatchLink({
10
+ headers: async () => {
11
+ // dynamic import to avoid circular dependency
12
+ const { createHeaderWithAuth } = await import('@/services/_auth');
13
+
14
+ return createHeaderWithAuth();
15
+ },
16
+ transformer: superjson,
17
+ url: withBasePath('/trpc/edge'),
18
+ }),
19
+ ],
20
+ });
@@ -0,0 +1,2 @@
1
+ export { edgeClient } from './edge';
2
+ export * from './lambda';
@@ -0,0 +1,38 @@
1
+ import { createTRPCClient, httpBatchLink } from '@trpc/client';
2
+ import { createTRPCReact } from '@trpc/react-query';
3
+ import superjson from 'superjson';
4
+
5
+ import { fetchErrorNotification } from '@/components/FetchErrorNotification';
6
+ import type { LambdaRouter } from '@/server/routers/lambda';
7
+
8
+ import { ErrorResponse } from './types';
9
+
10
+ const links = [
11
+ httpBatchLink({
12
+ fetch: async (input, init) => {
13
+ const response = await fetch(input, init);
14
+ if (response.ok) return response;
15
+
16
+ const errorRes: ErrorResponse = await response.clone().json();
17
+
18
+ errorRes.forEach((item) => {
19
+ const errorData = item.error.json;
20
+
21
+ const status = errorData.data.httpStatus;
22
+ fetchErrorNotification.error({ errorMessage: errorData.message, status });
23
+ });
24
+
25
+ return response;
26
+ },
27
+ transformer: superjson,
28
+ url: '/trpc/lambda',
29
+ }),
30
+ ];
31
+
32
+ export const lambdaClient = createTRPCClient<LambdaRouter>({
33
+ links,
34
+ });
35
+
36
+ export const lambdaQuery = createTRPCReact<LambdaRouter>();
37
+
38
+ export const lambdaQueryClient = lambdaQuery.createClient({ links });
@@ -0,0 +1,18 @@
1
+ export type ErrorResponse = ErrorItem[];
2
+
3
+ export interface ErrorItem {
4
+ error: {
5
+ json: {
6
+ code: number;
7
+ data: Data;
8
+ message: string;
9
+ };
10
+ };
11
+ }
12
+
13
+ export interface Data {
14
+ code: string;
15
+ httpStatus: number;
16
+ path: string;
17
+ stack: string;
18
+ }
@@ -1,65 +0,0 @@
1
- import { createTRPCClient, httpBatchLink } from '@trpc/client';
2
- import superjson from 'superjson';
3
-
4
- import { fetchErrorNotification } from '@/components/FetchErrorNotification';
5
- import type { EdgeRouter } from '@/server/routers/edge';
6
- import type { LambdaRouter } from '@/server/routers/lambda';
7
- import { withBasePath } from '@/utils/basePath';
8
-
9
- export const edgeClient = createTRPCClient<EdgeRouter>({
10
- links: [
11
- httpBatchLink({
12
- headers: async () => {
13
- // dynamic import to avoid circular dependency
14
- const { createHeaderWithAuth } = await import('@/services/_auth');
15
-
16
- return createHeaderWithAuth();
17
- },
18
- transformer: superjson,
19
- url: withBasePath('/trpc/edge'),
20
- }),
21
- ],
22
- });
23
-
24
- export type ErrorResponse = ErrorItem[];
25
-
26
- export interface ErrorItem {
27
- error: {
28
- json: {
29
- code: number;
30
- data: Data;
31
- message: string;
32
- };
33
- };
34
- }
35
-
36
- export interface Data {
37
- code: string;
38
- httpStatus: number;
39
- path: string;
40
- stack: string;
41
- }
42
-
43
- export const lambdaClient = createTRPCClient<LambdaRouter>({
44
- links: [
45
- httpBatchLink({
46
- fetch: async (input, init) => {
47
- const response = await fetch(input, init);
48
- if (response.ok) return response;
49
-
50
- const errorRes: ErrorResponse = await response.clone().json();
51
-
52
- errorRes.forEach((item) => {
53
- const errorData = item.error.json;
54
-
55
- const status = errorData.data.httpStatus;
56
- fetchErrorNotification.error({ errorMessage: errorData.message, status });
57
- });
58
-
59
- return response;
60
- },
61
- transformer: superjson,
62
- url: '/trpc/lambda',
63
- }),
64
- ],
65
- });