@lobehub/chat 1.49.5 → 1.49.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.49.6](https://github.com/lobehub/lobe-chat/compare/v1.49.5...v1.49.6)
6
+
7
+ <sup>Released on **2025-01-30**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Support litellm reasoning streaming.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Support litellm reasoning streaming, closes [#5632](https://github.com/lobehub/lobe-chat/issues/5632) ([9942fb3](https://github.com/lobehub/lobe-chat/commit/9942fb3))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.49.5](https://github.com/lobehub/lobe-chat/compare/v1.49.4...v1.49.5)
6
31
 
7
32
  <sup>Released on **2025-01-28**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Support litellm reasoning streaming."
6
+ ]
7
+ },
8
+ "date": "2025-01-30",
9
+ "version": "1.49.6"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.49.5",
3
+ "version": "1.49.6",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -554,7 +554,7 @@ describe('OpenAIStream', () => {
554
554
  });
555
555
 
556
556
  describe('Reasoning', () => {
557
- it('should handle reasoning event', async () => {
557
+ it('should handle reasoning event in official DeepSeek api', async () => {
558
558
  const data = [
559
559
  {
560
560
  id: '1',
@@ -722,6 +722,206 @@ describe('OpenAIStream', () => {
722
722
  chunks.push(decoder.decode(chunk, { stream: true }));
723
723
  }
724
724
 
725
+ expect(chunks).toEqual(
726
+ [
727
+ 'id: 1',
728
+ 'event: reasoning',
729
+ `data: ""\n`,
730
+ 'id: 1',
731
+ 'event: reasoning',
732
+ `data: "您好"\n`,
733
+ 'id: 1',
734
+ 'event: reasoning',
735
+ `data: "!"\n`,
736
+ 'id: 1',
737
+ 'event: text',
738
+ `data: "你好"\n`,
739
+ 'id: 1',
740
+ 'event: text',
741
+ `data: "很高兴"\n`,
742
+ 'id: 1',
743
+ 'event: text',
744
+ `data: "为您"\n`,
745
+ 'id: 1',
746
+ 'event: text',
747
+ `data: "提供"\n`,
748
+ 'id: 1',
749
+ 'event: text',
750
+ `data: "帮助。"\n`,
751
+ 'id: 1',
752
+ 'event: stop',
753
+ `data: "stop"\n`,
754
+ ].map((i) => `${i}\n`),
755
+ );
756
+ });
757
+ it('should handle reasoning in litellm', async () => {
758
+ const data = [
759
+ {
760
+ id: '1',
761
+ object: 'chat.completion.chunk',
762
+ created: 1737563070,
763
+ model: 'deepseek-reasoner',
764
+ system_fingerprint: 'fp_1c5d8833bc',
765
+ choices: [
766
+ {
767
+ index: 0,
768
+ delta: { role: 'assistant', reasoning_content: '' },
769
+ logprobs: null,
770
+ finish_reason: null,
771
+ },
772
+ ],
773
+ },
774
+ {
775
+ id: '1',
776
+ object: 'chat.completion.chunk',
777
+ created: 1737563070,
778
+ model: 'deepseek-reasoner',
779
+ system_fingerprint: 'fp_1c5d8833bc',
780
+ choices: [
781
+ {
782
+ index: 0,
783
+ delta: { reasoning_content: '您好' },
784
+ logprobs: null,
785
+ finish_reason: null,
786
+ },
787
+ ],
788
+ },
789
+ {
790
+ id: '1',
791
+ object: 'chat.completion.chunk',
792
+ created: 1737563070,
793
+ model: 'deepseek-reasoner',
794
+ system_fingerprint: 'fp_1c5d8833bc',
795
+ choices: [
796
+ {
797
+ index: 0,
798
+ delta: { reasoning_content: '!' },
799
+ logprobs: null,
800
+ finish_reason: null,
801
+ },
802
+ ],
803
+ },
804
+ {
805
+ id: '1',
806
+ object: 'chat.completion.chunk',
807
+ created: 1737563070,
808
+ model: 'deepseek-reasoner',
809
+ system_fingerprint: 'fp_1c5d8833bc',
810
+ choices: [
811
+ {
812
+ index: 0,
813
+ delta: { content: '你好', reasoning_content: null },
814
+ logprobs: null,
815
+ finish_reason: null,
816
+ },
817
+ ],
818
+ },
819
+ {
820
+ id: '1',
821
+ object: 'chat.completion.chunk',
822
+ created: 1737563070,
823
+ model: 'deepseek-reasoner',
824
+ system_fingerprint: 'fp_1c5d8833bc',
825
+ choices: [
826
+ {
827
+ index: 0,
828
+ delta: { content: '很高兴', reasoning_cont: null },
829
+ logprobs: null,
830
+ finish_reason: null,
831
+ },
832
+ ],
833
+ },
834
+ {
835
+ id: '1',
836
+ object: 'chat.completion.chunk',
837
+ created: 1737563070,
838
+ model: 'deepseek-reasoner',
839
+ system_fingerprint: 'fp_1c5d8833bc',
840
+ choices: [
841
+ {
842
+ index: 0,
843
+ delta: { content: '为您', reasoning_content: null },
844
+ logprobs: null,
845
+ finish_reason: null,
846
+ },
847
+ ],
848
+ },
849
+ {
850
+ id: '1',
851
+ object: 'chat.completion.chunk',
852
+ created: 1737563070,
853
+ model: 'deepseek-reasoner',
854
+ system_fingerprint: 'fp_1c5d8833bc',
855
+ choices: [
856
+ {
857
+ index: 0,
858
+ delta: { content: '提供', reasoning_content: null },
859
+ logprobs: null,
860
+ finish_reason: null,
861
+ },
862
+ ],
863
+ },
864
+ {
865
+ id: '1',
866
+ object: 'chat.completion.chunk',
867
+ created: 1737563070,
868
+ model: 'deepseek-reasoner',
869
+ system_fingerprint: 'fp_1c5d8833bc',
870
+ choices: [
871
+ {
872
+ index: 0,
873
+ delta: { content: '帮助。', reasoning_content: null },
874
+ logprobs: null,
875
+ finish_reason: null,
876
+ },
877
+ ],
878
+ },
879
+ {
880
+ id: '1',
881
+ object: 'chat.completion.chunk',
882
+ created: 1737563070,
883
+ model: 'deepseek-reasoner',
884
+ system_fingerprint: 'fp_1c5d8833bc',
885
+ choices: [
886
+ {
887
+ index: 0,
888
+ delta: { content: '', reasoning_content: null },
889
+ logprobs: null,
890
+ finish_reason: 'stop',
891
+ },
892
+ ],
893
+ usage: {
894
+ prompt_tokens: 6,
895
+ completion_tokens: 104,
896
+ total_tokens: 110,
897
+ prompt_tokens_details: { cached_tokens: 0 },
898
+ completion_tokens_details: { reasoning_tokens: 70 },
899
+ prompt_cache_hit_tokens: 0,
900
+ prompt_cache_miss_tokens: 6,
901
+ },
902
+ },
903
+ ];
904
+
905
+ const mockOpenAIStream = new ReadableStream({
906
+ start(controller) {
907
+ data.forEach((chunk) => {
908
+ controller.enqueue(chunk);
909
+ });
910
+
911
+ controller.close();
912
+ },
913
+ });
914
+
915
+ const protocolStream = OpenAIStream(mockOpenAIStream);
916
+
917
+ const decoder = new TextDecoder();
918
+ const chunks = [];
919
+
920
+ // @ts-ignore
921
+ for await (const chunk of protocolStream) {
922
+ chunks.push(decoder.decode(chunk, { stream: true }));
923
+ }
924
+
725
925
  expect(chunks).toEqual(
726
926
  [
727
927
  'id: 1',
@@ -92,13 +92,18 @@ export const transformOpenAIStream = (
92
92
  return { data: item.delta.content, id: chunk.id, type: 'text' };
93
93
  }
94
94
 
95
+ // DeepSeek reasoner 会将 thinking 放在 reasoning_content 字段中
96
+ // litellm 处理 reasoning content 时 不会设定 content = null
97
+ if (
98
+ item.delta &&
99
+ 'reasoning_content' in item.delta &&
100
+ typeof item.delta.reasoning_content === 'string'
101
+ ) {
102
+ return { data: item.delta.reasoning_content, id: chunk.id, type: 'reasoning' };
103
+ }
104
+
95
105
  // 无内容情况
96
106
  if (item.delta && item.delta.content === null) {
97
- // deepseek reasoner 会将 thinking 放在 reasoning_content 字段中
98
- if ('reasoning_content' in item.delta && typeof item.delta.reasoning_content === 'string') {
99
- return { data: item.delta.reasoning_content, id: chunk.id, type: 'reasoning' };
100
- }
101
-
102
107
  return { data: item.delta, id: chunk.id, type: 'data' };
103
108
  }
104
109