@lobehub/chat 1.79.4 → 1.79.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/docs/self-hosting/advanced/online-search.zh-CN.mdx +59 -0
  4. package/package.json +3 -3
  5. package/src/app/(backend)/webapi/chat/[provider]/route.ts +1 -4
  6. package/src/app/(backend)/webapi/plugin/gateway/route.ts +1 -0
  7. package/src/app/(backend)/webapi/trace/route.ts +6 -1
  8. package/src/const/trace.ts +2 -4
  9. package/src/database/models/__tests__/_util.ts +4 -2
  10. package/src/libs/agent-runtime/AgentRuntime.test.ts +11 -17
  11. package/src/libs/agent-runtime/helpers/index.ts +1 -0
  12. package/src/{utils/fetch/__tests__ → libs/agent-runtime/helpers}/parseToolCalls.test.ts +1 -2
  13. package/src/libs/agent-runtime/index.ts +1 -0
  14. package/src/libs/agent-runtime/types/chat.ts +41 -9
  15. package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +4 -2
  16. package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +4 -7
  17. package/src/libs/agent-runtime/utils/streams/bedrock/llama.test.ts +6 -14
  18. package/src/libs/agent-runtime/utils/streams/google-ai.test.ts +3 -6
  19. package/src/libs/agent-runtime/utils/streams/ollama.test.ts +3 -9
  20. package/src/libs/agent-runtime/utils/streams/openai.test.ts +5 -8
  21. package/src/libs/agent-runtime/utils/streams/protocol.ts +55 -10
  22. package/src/libs/agent-runtime/utils/streams/qwen.test.ts +3 -6
  23. package/src/libs/agent-runtime/utils/streams/spark.test.ts +63 -60
  24. package/src/libs/agent-runtime/utils/streams/vertex-ai.test.ts +3 -7
  25. package/src/libs/agent-runtime/xai/index.ts +10 -0
  26. package/src/libs/agent-runtime/zhipu/index.test.ts +2 -2
  27. package/src/server/modules/AgentRuntime/index.ts +4 -75
  28. package/src/server/modules/AgentRuntime/trace.ts +107 -0
  29. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +6 -0
  30. package/src/utils/fetch/fetchSSE.ts +1 -1
  31. /package/src/{utils/fetch → libs/agent-runtime/helpers}/parseToolCalls.ts +0 -0
@@ -0,0 +1,107 @@
1
+ import { after } from 'next/server';
2
+
3
+ import { INBOX_SESSION_ID } from '@/const/session';
4
+ import {
5
+ LOBE_CHAT_OBSERVATION_ID,
6
+ LOBE_CHAT_TRACE_ID,
7
+ TracePayload,
8
+ TraceTagMap,
9
+ } from '@/const/trace';
10
+ import { ChatStreamCallbacks, ChatStreamPayload } from '@/libs/agent-runtime';
11
+ import { TraceClient } from '@/libs/traces';
12
+
13
+ export interface AgentChatOptions {
14
+ enableTrace?: boolean;
15
+ provider: string;
16
+ trace?: TracePayload;
17
+ }
18
+
19
+ export const createTraceOptions = (
20
+ payload: ChatStreamPayload,
21
+ { trace: tracePayload, provider }: AgentChatOptions,
22
+ ) => {
23
+ const { messages, model, tools, ...parameters } = payload;
24
+ // create a trace to monitor the completion
25
+ const traceClient = new TraceClient();
26
+ const messageLength = messages.length;
27
+ const systemRole = messages.find((message) => message.role === 'system')?.content;
28
+
29
+ const trace = traceClient.createTrace({
30
+ id: tracePayload?.traceId,
31
+ input: messages,
32
+ metadata: { messageLength, model, provider, systemRole, tools },
33
+ name: tracePayload?.traceName,
34
+ sessionId: tracePayload?.topicId
35
+ ? tracePayload.topicId
36
+ : `${tracePayload?.sessionId || INBOX_SESSION_ID}@default`,
37
+ tags: tracePayload?.tags,
38
+ userId: tracePayload?.userId,
39
+ });
40
+
41
+ const generation = trace?.generation({
42
+ input: messages,
43
+ metadata: { messageLength, model, provider },
44
+ model,
45
+ modelParameters: parameters as any,
46
+ name: `Chat Completion (${provider})`,
47
+ startTime: new Date(),
48
+ });
49
+
50
+ return {
51
+ callback: {
52
+ onCompletion: async ({ text, thinking, usage, grounding, toolsCalling }) => {
53
+ const output =
54
+ // if the toolsCalling is not empty, we need to return the toolsCalling
55
+ !!toolsCalling && toolsCalling.length > 0
56
+ ? !!text
57
+ ? // tools calling with thinking and text
58
+ { text, thinking, toolsCalling }
59
+ : toolsCalling
60
+ : !!thinking
61
+ ? { text, thinking }
62
+ : text;
63
+
64
+ generation?.update({
65
+ endTime: new Date(),
66
+ metadata: { grounding, thinking },
67
+ output,
68
+ usage: usage
69
+ ? {
70
+ completionTokens: usage.outputTextTokens,
71
+ input: usage.totalInputTokens,
72
+ output: usage.totalOutputTokens,
73
+ promptTokens: usage.inputTextTokens,
74
+ totalTokens: usage.totalTokens,
75
+ }
76
+ : undefined,
77
+ });
78
+
79
+ trace?.update({ output });
80
+ },
81
+
82
+ onFinal: () => {
83
+ after(async () => {
84
+ try {
85
+ await traceClient.shutdownAsync();
86
+ } catch (e) {
87
+ console.error('TraceClient shutdown error:', e);
88
+ }
89
+ });
90
+ },
91
+
92
+ onStart: () => {
93
+ generation?.update({ completionStartTime: new Date() });
94
+ },
95
+
96
+ onToolsCalling: async () => {
97
+ trace?.update({
98
+ tags: [...(tracePayload?.tags || []), TraceTagMap.ToolsCalling],
99
+ });
100
+ },
101
+ } as ChatStreamCallbacks,
102
+ headers: {
103
+ [LOBE_CHAT_OBSERVATION_ID]: generation?.id,
104
+ [LOBE_CHAT_TRACE_ID]: trace?.id,
105
+ },
106
+ };
107
+ };
@@ -388,6 +388,12 @@ export const generateAIChat: StateCreator<
388
388
  });
389
389
  }
390
390
  },
391
+ trace: {
392
+ traceId: params?.traceId,
393
+ sessionId: get().activeId,
394
+ topicId: get().activeTopicId,
395
+ traceName: TraceNameMap.SearchIntentRecognition,
396
+ },
391
397
  abortController,
392
398
  onMessageHandle: async (chunk) => {
393
399
  if (chunk.type === 'tool_calls') {
@@ -2,6 +2,7 @@ import { isObject } from 'lodash-es';
2
2
 
3
3
  import { MESSAGE_CANCEL_FLAT } from '@/const/message';
4
4
  import { LOBE_CHAT_OBSERVATION_ID, LOBE_CHAT_TRACE_ID } from '@/const/trace';
5
+ import { parseToolCalls } from '@/libs/agent-runtime';
5
6
  import { ChatErrorType } from '@/types/fetch';
6
7
  import { SmoothingParams } from '@/types/llm';
7
8
  import {
@@ -18,7 +19,6 @@ import { nanoid } from '@/utils/uuid';
18
19
 
19
20
  import { fetchEventSource } from './fetchEventSource';
20
21
  import { getMessageError } from './parseError';
21
- import { parseToolCalls } from './parseToolCalls';
22
22
 
23
23
  type SSEFinishType = 'done' | 'error' | 'abort';
24
24