@nocobase/plugin-ai 2.1.0-beta.24 → 2.1.0-beta.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/dist/ai/ai-employees/dara.js +1 -0
  2. package/dist/ai/tools/sub-agents/shared.js +3 -1
  3. package/dist/client/343.6f36d97dd122c5b6.js +10 -0
  4. package/dist/client/{559.133d286a0a0a1d93.js → 559.39872901b9053629.js} +1 -1
  5. package/dist/client/646.afa699c92cd556f3.js +10 -0
  6. package/dist/client/ai-employees/types.d.ts +2 -0
  7. package/dist/client/components/skill-settings.d.ts +2 -0
  8. package/dist/client/index.js +3 -3
  9. package/dist/client/llm-providers/mimo/ModelSettings.d.ts +10 -0
  10. package/dist/client/llm-providers/mimo/index.d.ts +10 -0
  11. package/dist/client/llm-providers/xai/ModelSettings.d.ts +10 -0
  12. package/dist/client/llm-providers/xai/index.d.ts +10 -0
  13. package/dist/collections/ai-employees.d.ts +7 -0
  14. package/dist/collections/ai-employees.js +13 -0
  15. package/dist/externalVersion.js +15 -15
  16. package/dist/locale/en-US.json +2 -0
  17. package/dist/locale/zh-CN.json +2 -0
  18. package/dist/node_modules/@langchain/xai/LICENSE +21 -0
  19. package/dist/node_modules/@langchain/xai/dist/_virtual/rolldown_runtime.cjs +25 -0
  20. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.cjs +568 -0
  21. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.cts +619 -0
  22. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.d.ts +619 -0
  23. package/dist/node_modules/@langchain/xai/dist/chat_models/completions.js +566 -0
  24. package/dist/node_modules/@langchain/xai/dist/chat_models/index.cjs +2 -0
  25. package/dist/node_modules/@langchain/xai/dist/chat_models/index.d.ts +3 -0
  26. package/dist/node_modules/@langchain/xai/dist/chat_models/index.js +2 -0
  27. package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.cts +1178 -0
  28. package/dist/node_modules/@langchain/xai/dist/chat_models/responses-types.d.ts +1178 -0
  29. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.cjs +233 -0
  30. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.cts +70 -0
  31. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.d.ts +70 -0
  32. package/dist/node_modules/@langchain/xai/dist/chat_models/responses.js +232 -0
  33. package/dist/node_modules/@langchain/xai/dist/converters/responses.cjs +168 -0
  34. package/dist/node_modules/@langchain/xai/dist/converters/responses.js +164 -0
  35. package/dist/node_modules/@langchain/xai/dist/index.cjs +7 -0
  36. package/dist/node_modules/@langchain/xai/dist/index.d.cts +5 -0
  37. package/dist/node_modules/@langchain/xai/dist/index.d.ts +6 -0
  38. package/dist/node_modules/@langchain/xai/dist/index.js +6 -0
  39. package/dist/node_modules/@langchain/xai/dist/live_search.cjs +54 -0
  40. package/dist/node_modules/@langchain/xai/dist/live_search.d.cts +145 -0
  41. package/dist/node_modules/@langchain/xai/dist/live_search.d.ts +145 -0
  42. package/dist/node_modules/@langchain/xai/dist/live_search.js +51 -0
  43. package/dist/node_modules/@langchain/xai/dist/profiles.cjs +289 -0
  44. package/dist/node_modules/@langchain/xai/dist/profiles.js +288 -0
  45. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.cjs +52 -0
  46. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.cts +64 -0
  47. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.d.ts +64 -0
  48. package/dist/node_modules/@langchain/xai/dist/tools/code_execution.js +50 -0
  49. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.cjs +60 -0
  50. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.cts +90 -0
  51. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.d.ts +90 -0
  52. package/dist/node_modules/@langchain/xai/dist/tools/collections_search.js +58 -0
  53. package/dist/node_modules/@langchain/xai/dist/tools/index.cjs +18 -0
  54. package/dist/node_modules/@langchain/xai/dist/tools/index.d.cts +18 -0
  55. package/dist/node_modules/@langchain/xai/dist/tools/index.d.ts +18 -0
  56. package/dist/node_modules/@langchain/xai/dist/tools/index.js +18 -0
  57. package/dist/node_modules/@langchain/xai/dist/tools/live_search.cjs +94 -0
  58. package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.cts +149 -0
  59. package/dist/node_modules/@langchain/xai/dist/tools/live_search.d.ts +149 -0
  60. package/dist/node_modules/@langchain/xai/dist/tools/live_search.js +91 -0
  61. package/dist/node_modules/@langchain/xai/dist/tools/web_search.cjs +57 -0
  62. package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.cts +104 -0
  63. package/dist/node_modules/@langchain/xai/dist/tools/web_search.d.ts +104 -0
  64. package/dist/node_modules/@langchain/xai/dist/tools/web_search.js +55 -0
  65. package/dist/node_modules/@langchain/xai/dist/tools/x_search.cjs +63 -0
  66. package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.cts +145 -0
  67. package/dist/node_modules/@langchain/xai/dist/tools/x_search.d.ts +145 -0
  68. package/dist/node_modules/@langchain/xai/dist/tools/x_search.js +61 -0
  69. package/dist/node_modules/@langchain/xai/package.json +1 -0
  70. package/dist/node_modules/fast-glob/package.json +1 -1
  71. package/dist/node_modules/flexsearch/package.json +1 -1
  72. package/dist/node_modules/fs-extra/package.json +1 -1
  73. package/dist/node_modules/jsonrepair/package.json +1 -1
  74. package/dist/node_modules/nodejs-snowflake/package.json +1 -1
  75. package/dist/node_modules/openai/package.json +1 -1
  76. package/dist/node_modules/zod/package.json +1 -1
  77. package/dist/server/ai-employees/ai-employee.js +20 -10
  78. package/dist/server/llm-providers/common/reasoning.js +2 -4
  79. package/dist/server/llm-providers/mimo.d.ts +37 -0
  80. package/dist/server/llm-providers/mimo.js +156 -0
  81. package/dist/server/llm-providers/xai.d.ts +17 -0
  82. package/dist/server/llm-providers/xai.js +88 -0
  83. package/dist/server/migrations/20260428175558-update-ai-employee-category.d.ts +14 -0
  84. package/dist/server/migrations/20260428175558-update-ai-employee-category.js +55 -0
  85. package/dist/server/migrations/20260429175132-ai-employee-deprecated-orin.d.ts +14 -0
  86. package/dist/server/migrations/20260429175132-ai-employee-deprecated-orin.js +53 -0
  87. package/dist/server/plugin.js +5 -0
  88. package/dist/server/resource/aiEmployees.js +10 -1
  89. package/dist/server/workflow/nodes/employee/files.js +7 -4
  90. package/dist/server/workflow/nodes/employee/index.js +136 -132
  91. package/dist/server/workflow/nodes/employee/types.d.ts +1 -1
  92. package/package.json +3 -2
  93. package/dist/client/343.83f7d96664e4e038.js +0 -10
  94. package/dist/client/646.cba98d80e9e6ea74.js +0 -10
@@ -0,0 +1,168 @@
1
+ const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
2
+ const __langchain_core_messages = require_rolldown_runtime.__toESM(require("@langchain/core/messages"));
3
+ const __langchain_core_outputs = require_rolldown_runtime.__toESM(require("@langchain/core/outputs"));
4
+
5
+ //#region src/converters/responses.ts
6
+ /**
7
+ * Converts a single LangChain BaseMessage to xAI Responses API input format.
8
+ *
9
+ * @param message - The LangChain message to convert
10
+ * @returns The xAI Responses API input item
11
+ */
12
+ function convertMessageToResponsesInput(message) {
13
+ if (message.type === "human") {
14
+ const content = typeof message.content === "string" ? message.content : message.content.map((part) => {
15
+ if (typeof part === "string") return {
16
+ type: "input_text",
17
+ text: part
18
+ };
19
+ if (part.type === "text") return {
20
+ type: "input_text",
21
+ text: part.text
22
+ };
23
+ if (part.type === "image_url") {
24
+ const imageUrlPart = part;
25
+ const imageUrl = typeof imageUrlPart.image_url === "string" ? imageUrlPart.image_url : imageUrlPart.image_url.url;
26
+ return {
27
+ type: "input_image",
28
+ image_url: imageUrl,
29
+ detail: "auto"
30
+ };
31
+ }
32
+ return {
33
+ type: "input_text",
34
+ text: ""
35
+ };
36
+ });
37
+ return {
38
+ role: "user",
39
+ content
40
+ };
41
+ }
42
+ if (message.type === "system") return {
43
+ role: "system",
44
+ content: typeof message.content === "string" ? message.content : message.content.map((part) => typeof part === "string" ? part : part.text || "").join("")
45
+ };
46
+ if (message.type === "ai") {
47
+ const aiMessage = message;
48
+ return {
49
+ type: "message",
50
+ role: "assistant",
51
+ text: typeof aiMessage.content === "string" ? aiMessage.content : ""
52
+ };
53
+ }
54
+ return {
55
+ role: "user",
56
+ content: typeof message.content === "string" ? message.content : JSON.stringify(message.content)
57
+ };
58
+ }
59
+ /**
60
+ * Converts an array of LangChain BaseMessages to xAI Responses API input format.
61
+ *
62
+ * @param messages - Array of LangChain messages to convert
63
+ * @returns Array of xAI Responses API input items
64
+ */
65
+ function convertMessagesToResponsesInput(messages) {
66
+ return messages.map(convertMessageToResponsesInput);
67
+ }
68
+ /**
69
+ * Converts xAI usage statistics to LangChain UsageMetadata format.
70
+ *
71
+ * @param usage - The xAI usage statistics
72
+ * @returns LangChain UsageMetadata object
73
+ */
74
+ function convertUsageToUsageMetadata(usage) {
75
+ return {
76
+ input_tokens: usage?.input_tokens ?? 0,
77
+ output_tokens: usage?.output_tokens ?? 0,
78
+ total_tokens: usage?.total_tokens ?? 0,
79
+ input_token_details: { ...usage?.input_tokens_details?.cached_tokens != null && { cache_read: usage.input_tokens_details.cached_tokens } },
80
+ output_token_details: { ...usage?.output_tokens_details?.reasoning_tokens != null && { reasoning: usage.output_tokens_details.reasoning_tokens } }
81
+ };
82
+ }
83
+ /**
84
+ * Extracts text content from xAI response output items.
85
+ *
86
+ * @param output - Array of xAI response output items
87
+ * @returns Concatenated text content
88
+ */
89
+ function extractTextFromOutput(output) {
90
+ const textParts = [];
91
+ for (const item of output) if (item.type === "message" && "content" in item) {
92
+ for (const contentItem of item.content) if (contentItem.type === "output_text") textParts.push(contentItem.text);
93
+ }
94
+ return textParts.join("");
95
+ }
96
+ /**
97
+ * Converts an xAI Response to a LangChain AIMessage.
98
+ *
99
+ * @param response - The xAI API response
100
+ * @returns LangChain AIMessage
101
+ */
102
+ function convertResponseToAIMessage(response) {
103
+ const text = extractTextFromOutput(response.output);
104
+ const responseMetadata = {
105
+ model_provider: "xai",
106
+ model: response.model,
107
+ created_at: response.created_at,
108
+ id: response.id,
109
+ status: response.status,
110
+ object: response.object
111
+ };
112
+ if (response.incomplete_details) responseMetadata.incomplete_details = response.incomplete_details;
113
+ return new __langchain_core_messages.AIMessage({
114
+ content: text,
115
+ usage_metadata: convertUsageToUsageMetadata(response.usage),
116
+ response_metadata: responseMetadata,
117
+ additional_kwargs: { ...response.reasoning && { reasoning: response.reasoning } }
118
+ });
119
+ }
120
+ /**
121
+ * Converts an xAI streaming event to a LangChain ChatGenerationChunk.
122
+ *
123
+ * @param event - The xAI streaming event
124
+ * @returns ChatGenerationChunk or null if the event doesn't produce a chunk
125
+ */
126
+ function convertStreamEventToChunk(event) {
127
+ const responseMetadata = { model_provider: "xai" };
128
+ if (event.type === "response.output_text.delta") return new __langchain_core_outputs.ChatGenerationChunk({
129
+ text: event.delta,
130
+ message: new __langchain_core_messages.AIMessageChunk({
131
+ content: event.delta,
132
+ response_metadata: responseMetadata
133
+ })
134
+ });
135
+ if (event.type === "response.created") {
136
+ responseMetadata.id = event.response.id;
137
+ responseMetadata.model = event.response.model;
138
+ return new __langchain_core_outputs.ChatGenerationChunk({
139
+ text: "",
140
+ message: new __langchain_core_messages.AIMessageChunk({
141
+ content: "",
142
+ response_metadata: responseMetadata
143
+ })
144
+ });
145
+ }
146
+ if (event.type === "response.completed") {
147
+ const aiMessage = convertResponseToAIMessage(event.response);
148
+ return new __langchain_core_outputs.ChatGenerationChunk({
149
+ text: "",
150
+ message: new __langchain_core_messages.AIMessageChunk({
151
+ content: "",
152
+ usage_metadata: aiMessage.usage_metadata,
153
+ response_metadata: {
154
+ ...responseMetadata,
155
+ ...aiMessage.response_metadata
156
+ }
157
+ })
158
+ });
159
+ }
160
+ return null;
161
+ }
162
+
163
+ //#endregion
164
+ exports.convertMessagesToResponsesInput = convertMessagesToResponsesInput;
165
+ exports.convertResponseToAIMessage = convertResponseToAIMessage;
166
+ exports.convertStreamEventToChunk = convertStreamEventToChunk;
167
+ exports.extractTextFromOutput = extractTextFromOutput;
168
+ //# sourceMappingURL=responses.cjs.map
@@ -0,0 +1,164 @@
1
+ import { AIMessage, AIMessageChunk } from "@langchain/core/messages";
2
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
3
+
4
+ //#region src/converters/responses.ts
5
+ /**
6
+ * Converts a single LangChain BaseMessage to xAI Responses API input format.
7
+ *
8
+ * @param message - The LangChain message to convert
9
+ * @returns The xAI Responses API input item
10
+ */
11
+ function convertMessageToResponsesInput(message) {
12
+ if (message.type === "human") {
13
+ const content = typeof message.content === "string" ? message.content : message.content.map((part) => {
14
+ if (typeof part === "string") return {
15
+ type: "input_text",
16
+ text: part
17
+ };
18
+ if (part.type === "text") return {
19
+ type: "input_text",
20
+ text: part.text
21
+ };
22
+ if (part.type === "image_url") {
23
+ const imageUrlPart = part;
24
+ const imageUrl = typeof imageUrlPart.image_url === "string" ? imageUrlPart.image_url : imageUrlPart.image_url.url;
25
+ return {
26
+ type: "input_image",
27
+ image_url: imageUrl,
28
+ detail: "auto"
29
+ };
30
+ }
31
+ return {
32
+ type: "input_text",
33
+ text: ""
34
+ };
35
+ });
36
+ return {
37
+ role: "user",
38
+ content
39
+ };
40
+ }
41
+ if (message.type === "system") return {
42
+ role: "system",
43
+ content: typeof message.content === "string" ? message.content : message.content.map((part) => typeof part === "string" ? part : part.text || "").join("")
44
+ };
45
+ if (message.type === "ai") {
46
+ const aiMessage = message;
47
+ return {
48
+ type: "message",
49
+ role: "assistant",
50
+ text: typeof aiMessage.content === "string" ? aiMessage.content : ""
51
+ };
52
+ }
53
+ return {
54
+ role: "user",
55
+ content: typeof message.content === "string" ? message.content : JSON.stringify(message.content)
56
+ };
57
+ }
58
+ /**
59
+ * Converts an array of LangChain BaseMessages to xAI Responses API input format.
60
+ *
61
+ * @param messages - Array of LangChain messages to convert
62
+ * @returns Array of xAI Responses API input items
63
+ */
64
+ function convertMessagesToResponsesInput(messages) {
65
+ return messages.map(convertMessageToResponsesInput);
66
+ }
67
+ /**
68
+ * Converts xAI usage statistics to LangChain UsageMetadata format.
69
+ *
70
+ * @param usage - The xAI usage statistics
71
+ * @returns LangChain UsageMetadata object
72
+ */
73
+ function convertUsageToUsageMetadata(usage) {
74
+ return {
75
+ input_tokens: usage?.input_tokens ?? 0,
76
+ output_tokens: usage?.output_tokens ?? 0,
77
+ total_tokens: usage?.total_tokens ?? 0,
78
+ input_token_details: { ...usage?.input_tokens_details?.cached_tokens != null && { cache_read: usage.input_tokens_details.cached_tokens } },
79
+ output_token_details: { ...usage?.output_tokens_details?.reasoning_tokens != null && { reasoning: usage.output_tokens_details.reasoning_tokens } }
80
+ };
81
+ }
82
+ /**
83
+ * Extracts text content from xAI response output items.
84
+ *
85
+ * @param output - Array of xAI response output items
86
+ * @returns Concatenated text content
87
+ */
88
+ function extractTextFromOutput(output) {
89
+ const textParts = [];
90
+ for (const item of output) if (item.type === "message" && "content" in item) {
91
+ for (const contentItem of item.content) if (contentItem.type === "output_text") textParts.push(contentItem.text);
92
+ }
93
+ return textParts.join("");
94
+ }
95
+ /**
96
+ * Converts an xAI Response to a LangChain AIMessage.
97
+ *
98
+ * @param response - The xAI API response
99
+ * @returns LangChain AIMessage
100
+ */
101
+ function convertResponseToAIMessage(response) {
102
+ const text = extractTextFromOutput(response.output);
103
+ const responseMetadata = {
104
+ model_provider: "xai",
105
+ model: response.model,
106
+ created_at: response.created_at,
107
+ id: response.id,
108
+ status: response.status,
109
+ object: response.object
110
+ };
111
+ if (response.incomplete_details) responseMetadata.incomplete_details = response.incomplete_details;
112
+ return new AIMessage({
113
+ content: text,
114
+ usage_metadata: convertUsageToUsageMetadata(response.usage),
115
+ response_metadata: responseMetadata,
116
+ additional_kwargs: { ...response.reasoning && { reasoning: response.reasoning } }
117
+ });
118
+ }
119
+ /**
120
+ * Converts an xAI streaming event to a LangChain ChatGenerationChunk.
121
+ *
122
+ * @param event - The xAI streaming event
123
+ * @returns ChatGenerationChunk or null if the event doesn't produce a chunk
124
+ */
125
+ function convertStreamEventToChunk(event) {
126
+ const responseMetadata = { model_provider: "xai" };
127
+ if (event.type === "response.output_text.delta") return new ChatGenerationChunk({
128
+ text: event.delta,
129
+ message: new AIMessageChunk({
130
+ content: event.delta,
131
+ response_metadata: responseMetadata
132
+ })
133
+ });
134
+ if (event.type === "response.created") {
135
+ responseMetadata.id = event.response.id;
136
+ responseMetadata.model = event.response.model;
137
+ return new ChatGenerationChunk({
138
+ text: "",
139
+ message: new AIMessageChunk({
140
+ content: "",
141
+ response_metadata: responseMetadata
142
+ })
143
+ });
144
+ }
145
+ if (event.type === "response.completed") {
146
+ const aiMessage = convertResponseToAIMessage(event.response);
147
+ return new ChatGenerationChunk({
148
+ text: "",
149
+ message: new AIMessageChunk({
150
+ content: "",
151
+ usage_metadata: aiMessage.usage_metadata,
152
+ response_metadata: {
153
+ ...responseMetadata,
154
+ ...aiMessage.response_metadata
155
+ }
156
+ })
157
+ });
158
+ }
159
+ return null;
160
+ }
161
+
162
+ //#endregion
163
+ export { convertMessagesToResponsesInput, convertResponseToAIMessage, convertStreamEventToChunk, extractTextFromOutput };
164
+ //# sourceMappingURL=responses.js.map