@aj-archipelago/cortex 1.3.62 → 1.3.63

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (211) hide show
  1. package/.github/workflows/cortex-file-handler-test.yml +61 -0
  2. package/README.md +31 -7
  3. package/config/default.example.json +15 -0
  4. package/config.js +133 -12
  5. package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +22 -0
  6. package/helper-apps/cortex-autogen2/Dockerfile +31 -0
  7. package/helper-apps/cortex-autogen2/Dockerfile.worker +41 -0
  8. package/helper-apps/cortex-autogen2/README.md +183 -0
  9. package/helper-apps/cortex-autogen2/__init__.py +1 -0
  10. package/helper-apps/cortex-autogen2/agents.py +131 -0
  11. package/helper-apps/cortex-autogen2/docker-compose.yml +20 -0
  12. package/helper-apps/cortex-autogen2/function_app.py +55 -0
  13. package/helper-apps/cortex-autogen2/host.json +15 -0
  14. package/helper-apps/cortex-autogen2/main.py +126 -0
  15. package/helper-apps/cortex-autogen2/poetry.lock +3652 -0
  16. package/helper-apps/cortex-autogen2/pyproject.toml +36 -0
  17. package/helper-apps/cortex-autogen2/requirements.txt +20 -0
  18. package/helper-apps/cortex-autogen2/send_task.py +105 -0
  19. package/helper-apps/cortex-autogen2/services/__init__.py +1 -0
  20. package/helper-apps/cortex-autogen2/services/azure_queue.py +85 -0
  21. package/helper-apps/cortex-autogen2/services/redis_publisher.py +153 -0
  22. package/helper-apps/cortex-autogen2/task_processor.py +488 -0
  23. package/helper-apps/cortex-autogen2/tools/__init__.py +24 -0
  24. package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +175 -0
  25. package/helper-apps/cortex-autogen2/tools/azure_foundry_agents.py +601 -0
  26. package/helper-apps/cortex-autogen2/tools/coding_tools.py +72 -0
  27. package/helper-apps/cortex-autogen2/tools/download_tools.py +48 -0
  28. package/helper-apps/cortex-autogen2/tools/file_tools.py +545 -0
  29. package/helper-apps/cortex-autogen2/tools/search_tools.py +646 -0
  30. package/helper-apps/cortex-azure-cleaner/README.md +36 -0
  31. package/helper-apps/cortex-file-converter/README.md +93 -0
  32. package/helper-apps/cortex-file-converter/key_to_pdf.py +104 -0
  33. package/helper-apps/cortex-file-converter/list_blob_extensions.py +89 -0
  34. package/helper-apps/cortex-file-converter/process_azure_keynotes.py +181 -0
  35. package/helper-apps/cortex-file-converter/requirements.txt +1 -0
  36. package/helper-apps/cortex-file-handler/.env.test.azure.ci +7 -0
  37. package/helper-apps/cortex-file-handler/.env.test.azure.sample +1 -1
  38. package/helper-apps/cortex-file-handler/.env.test.gcs.ci +10 -0
  39. package/helper-apps/cortex-file-handler/.env.test.gcs.sample +2 -2
  40. package/helper-apps/cortex-file-handler/INTERFACE.md +41 -0
  41. package/helper-apps/cortex-file-handler/package.json +1 -1
  42. package/helper-apps/cortex-file-handler/scripts/setup-azure-container.js +41 -17
  43. package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +30 -15
  44. package/helper-apps/cortex-file-handler/scripts/test-azure.sh +32 -6
  45. package/helper-apps/cortex-file-handler/scripts/test-gcs.sh +24 -2
  46. package/helper-apps/cortex-file-handler/scripts/validate-env.js +128 -0
  47. package/helper-apps/cortex-file-handler/src/blobHandler.js +161 -51
  48. package/helper-apps/cortex-file-handler/src/constants.js +3 -0
  49. package/helper-apps/cortex-file-handler/src/fileChunker.js +10 -8
  50. package/helper-apps/cortex-file-handler/src/index.js +116 -9
  51. package/helper-apps/cortex-file-handler/src/redis.js +61 -1
  52. package/helper-apps/cortex-file-handler/src/services/ConversionService.js +11 -8
  53. package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +2 -2
  54. package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +88 -6
  55. package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +58 -0
  56. package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +25 -5
  57. package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +9 -0
  58. package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +120 -16
  59. package/helper-apps/cortex-file-handler/src/start.js +27 -17
  60. package/helper-apps/cortex-file-handler/tests/FileConversionService.test.js +52 -1
  61. package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +40 -0
  62. package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +553 -0
  63. package/helper-apps/cortex-file-handler/tests/cleanup.test.js +46 -52
  64. package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +451 -0
  65. package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +229 -0
  66. package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +392 -0
  67. package/helper-apps/cortex-file-handler/tests/conversionResilience.test.js +7 -2
  68. package/helper-apps/cortex-file-handler/tests/deleteOperations.test.js +348 -0
  69. package/helper-apps/cortex-file-handler/tests/fileChunker.test.js +23 -2
  70. package/helper-apps/cortex-file-handler/tests/fileUpload.test.js +11 -5
  71. package/helper-apps/cortex-file-handler/tests/getOperations.test.js +58 -24
  72. package/helper-apps/cortex-file-handler/tests/postOperations.test.js +11 -4
  73. package/helper-apps/cortex-file-handler/tests/shortLivedUrlConversion.test.js +225 -0
  74. package/helper-apps/cortex-file-handler/tests/start.test.js +8 -12
  75. package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +80 -0
  76. package/helper-apps/cortex-file-handler/tests/storage/StorageService.test.js +388 -22
  77. package/helper-apps/cortex-file-handler/tests/testUtils.helper.js +74 -0
  78. package/lib/cortexResponse.js +153 -0
  79. package/lib/entityConstants.js +21 -3
  80. package/lib/logger.js +21 -4
  81. package/lib/pathwayTools.js +28 -9
  82. package/lib/util.js +49 -0
  83. package/package.json +1 -1
  84. package/pathways/basePathway.js +1 -0
  85. package/pathways/bing_afagent.js +54 -1
  86. package/pathways/call_tools.js +2 -3
  87. package/pathways/chat_jarvis.js +1 -1
  88. package/pathways/google_cse.js +27 -0
  89. package/pathways/grok_live_search.js +18 -0
  90. package/pathways/system/entity/memory/sys_memory_lookup_required.js +1 -0
  91. package/pathways/system/entity/memory/sys_memory_required.js +1 -0
  92. package/pathways/system/entity/memory/sys_search_memory.js +1 -0
  93. package/pathways/system/entity/sys_entity_agent.js +56 -4
  94. package/pathways/system/entity/sys_generator_quick.js +1 -0
  95. package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +26 -0
  96. package/pathways/system/entity/tools/sys_tool_google_search.js +141 -0
  97. package/pathways/system/entity/tools/sys_tool_grok_x_search.js +237 -0
  98. package/pathways/system/entity/tools/sys_tool_image.js +1 -1
  99. package/pathways/system/rest_streaming/sys_claude_37_sonnet.js +21 -0
  100. package/pathways/system/rest_streaming/sys_claude_41_opus.js +21 -0
  101. package/pathways/system/rest_streaming/sys_claude_4_sonnet.js +21 -0
  102. package/pathways/system/rest_streaming/sys_google_gemini_25_flash.js +25 -0
  103. package/pathways/system/rest_streaming/{sys_google_gemini_chat.js → sys_google_gemini_25_pro.js} +6 -4
  104. package/pathways/system/rest_streaming/sys_grok_4.js +23 -0
  105. package/pathways/system/rest_streaming/sys_grok_4_fast_non_reasoning.js +23 -0
  106. package/pathways/system/rest_streaming/sys_grok_4_fast_reasoning.js +23 -0
  107. package/pathways/system/rest_streaming/sys_openai_chat.js +3 -0
  108. package/pathways/system/rest_streaming/sys_openai_chat_gpt41.js +22 -0
  109. package/pathways/system/rest_streaming/sys_openai_chat_gpt41_mini.js +21 -0
  110. package/pathways/system/rest_streaming/sys_openai_chat_gpt41_nano.js +21 -0
  111. package/pathways/system/rest_streaming/{sys_claude_35_sonnet.js → sys_openai_chat_gpt4_omni.js} +6 -4
  112. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_omni_mini.js +21 -0
  113. package/pathways/system/rest_streaming/{sys_claude_3_haiku.js → sys_openai_chat_gpt5.js} +7 -5
  114. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_chat.js +21 -0
  115. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_mini.js +21 -0
  116. package/pathways/system/rest_streaming/sys_openai_chat_gpt5_nano.js +21 -0
  117. package/pathways/system/rest_streaming/{sys_openai_chat_o1.js → sys_openai_chat_o3.js} +6 -3
  118. package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +3 -0
  119. package/pathways/system/workspaces/run_workspace_prompt.js +99 -0
  120. package/pathways/vision.js +1 -1
  121. package/server/graphql.js +1 -1
  122. package/server/modelExecutor.js +8 -0
  123. package/server/pathwayResolver.js +166 -16
  124. package/server/pathwayResponseParser.js +16 -8
  125. package/server/plugins/azureFoundryAgentsPlugin.js +1 -1
  126. package/server/plugins/claude3VertexPlugin.js +193 -45
  127. package/server/plugins/gemini15ChatPlugin.js +21 -0
  128. package/server/plugins/gemini15VisionPlugin.js +360 -0
  129. package/server/plugins/googleCsePlugin.js +94 -0
  130. package/server/plugins/grokVisionPlugin.js +365 -0
  131. package/server/plugins/modelPlugin.js +3 -1
  132. package/server/plugins/openAiChatPlugin.js +106 -13
  133. package/server/plugins/openAiVisionPlugin.js +42 -30
  134. package/server/resolver.js +28 -4
  135. package/server/rest.js +270 -53
  136. package/server/typeDef.js +1 -0
  137. package/tests/{mocks.js → helpers/mocks.js} +5 -2
  138. package/tests/{server.js → helpers/server.js} +2 -2
  139. package/tests/helpers/sseAssert.js +23 -0
  140. package/tests/helpers/sseClient.js +73 -0
  141. package/tests/helpers/subscriptionAssert.js +11 -0
  142. package/tests/helpers/subscriptions.js +113 -0
  143. package/tests/{sublong.srt → integration/features/translate/sublong.srt} +4543 -4543
  144. package/tests/integration/features/translate/translate_chunking_stream.test.js +100 -0
  145. package/tests/{translate_srt.test.js → integration/features/translate/translate_srt.test.js} +2 -2
  146. package/tests/integration/graphql/async/stream/agentic.test.js +477 -0
  147. package/tests/integration/graphql/async/stream/subscription_streaming.test.js +62 -0
  148. package/tests/integration/graphql/async/stream/sys_entity_start_streaming.test.js +71 -0
  149. package/tests/integration/graphql/async/stream/vendors/claude_streaming.test.js +56 -0
  150. package/tests/integration/graphql/async/stream/vendors/gemini_streaming.test.js +66 -0
  151. package/tests/integration/graphql/async/stream/vendors/grok_streaming.test.js +56 -0
  152. package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +72 -0
  153. package/tests/integration/graphql/features/google/sysToolGoogleSearch.test.js +96 -0
  154. package/tests/integration/graphql/features/grok/grok.test.js +688 -0
  155. package/tests/integration/graphql/features/grok/grok_x_search_tool.test.js +354 -0
  156. package/tests/{main.test.js → integration/graphql/features/main.test.js} +1 -1
  157. package/tests/{call_tools.test.js → integration/graphql/features/tools/call_tools.test.js} +2 -2
  158. package/tests/{vision.test.js → integration/graphql/features/vision/vision.test.js} +1 -1
  159. package/tests/integration/graphql/subscriptions/connection.test.js +26 -0
  160. package/tests/{openai_api.test.js → integration/rest/oai/openai_api.test.js} +63 -238
  161. package/tests/integration/rest/oai/tool_calling_api.test.js +343 -0
  162. package/tests/integration/rest/oai/tool_calling_streaming.test.js +85 -0
  163. package/tests/integration/rest/vendors/claude_streaming.test.js +47 -0
  164. package/tests/integration/rest/vendors/claude_tool_calling_streaming.test.js +75 -0
  165. package/tests/integration/rest/vendors/gemini_streaming.test.js +47 -0
  166. package/tests/integration/rest/vendors/gemini_tool_calling_streaming.test.js +75 -0
  167. package/tests/integration/rest/vendors/grok_streaming.test.js +55 -0
  168. package/tests/integration/rest/vendors/grok_tool_calling_streaming.test.js +75 -0
  169. package/tests/{azureAuthTokenHelper.test.js → unit/core/azureAuthTokenHelper.test.js} +1 -1
  170. package/tests/{chunkfunction.test.js → unit/core/chunkfunction.test.js} +2 -2
  171. package/tests/{config.test.js → unit/core/config.test.js} +3 -3
  172. package/tests/{encodeCache.test.js → unit/core/encodeCache.test.js} +1 -1
  173. package/tests/{fastLruCache.test.js → unit/core/fastLruCache.test.js} +1 -1
  174. package/tests/{handleBars.test.js → unit/core/handleBars.test.js} +1 -1
  175. package/tests/{memoryfunction.test.js → unit/core/memoryfunction.test.js} +2 -2
  176. package/tests/unit/core/mergeResolver.test.js +952 -0
  177. package/tests/{parser.test.js → unit/core/parser.test.js} +3 -3
  178. package/tests/unit/core/pathwayResolver.test.js +187 -0
  179. package/tests/{requestMonitor.test.js → unit/core/requestMonitor.test.js} +1 -1
  180. package/tests/{requestMonitorDurationEstimator.test.js → unit/core/requestMonitorDurationEstimator.test.js} +1 -1
  181. package/tests/{truncateMessages.test.js → unit/core/truncateMessages.test.js} +3 -3
  182. package/tests/{util.test.js → unit/core/util.test.js} +1 -1
  183. package/tests/{apptekTranslatePlugin.test.js → unit/plugins/apptekTranslatePlugin.test.js} +3 -3
  184. package/tests/{azureFoundryAgents.test.js → unit/plugins/azureFoundryAgents.test.js} +136 -1
  185. package/tests/{claude3VertexPlugin.test.js → unit/plugins/claude3VertexPlugin.test.js} +32 -10
  186. package/tests/{claude3VertexToolConversion.test.js → unit/plugins/claude3VertexToolConversion.test.js} +3 -3
  187. package/tests/unit/plugins/googleCsePlugin.test.js +111 -0
  188. package/tests/unit/plugins/grokVisionPlugin.test.js +1392 -0
  189. package/tests/{modelPlugin.test.js → unit/plugins/modelPlugin.test.js} +3 -3
  190. package/tests/{multimodal_conversion.test.js → unit/plugins/multimodal_conversion.test.js} +4 -4
  191. package/tests/{openAiChatPlugin.test.js → unit/plugins/openAiChatPlugin.test.js} +13 -4
  192. package/tests/{openAiToolPlugin.test.js → unit/plugins/openAiToolPlugin.test.js} +35 -27
  193. package/tests/{tokenHandlingTests.test.js → unit/plugins/tokenHandlingTests.test.js} +5 -5
  194. package/tests/{translate_apptek.test.js → unit/plugins/translate_apptek.test.js} +3 -3
  195. package/tests/{streaming.test.js → unit/plugins.streaming/plugin_stream_events.test.js} +19 -58
  196. package/helper-apps/mogrt-handler/tests/test-files/test.gif +0 -1
  197. package/helper-apps/mogrt-handler/tests/test-files/test.mogrt +0 -1
  198. package/helper-apps/mogrt-handler/tests/test-files/test.mp4 +0 -1
  199. package/pathways/system/rest_streaming/sys_openai_chat_gpt4.js +0 -19
  200. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_32.js +0 -19
  201. package/pathways/system/rest_streaming/sys_openai_chat_gpt4_turbo.js +0 -19
  202. package/pathways/system/workspaces/run_claude35_sonnet.js +0 -21
  203. package/pathways/system/workspaces/run_claude3_haiku.js +0 -20
  204. package/pathways/system/workspaces/run_gpt35turbo.js +0 -20
  205. package/pathways/system/workspaces/run_gpt4.js +0 -20
  206. package/pathways/system/workspaces/run_gpt4_32.js +0 -20
  207. package/tests/agentic.test.js +0 -256
  208. package/tests/pathwayResolver.test.js +0 -78
  209. package/tests/subscription.test.js +0 -387
  210. /package/tests/{subchunk.srt → integration/features/translate/subchunk.srt} +0 -0
  211. /package/tests/{subhorizontal.srt → integration/features/translate/subhorizontal.srt} +0 -0
@@ -0,0 +1,365 @@
1
+ import OpenAIVisionPlugin from './openAiVisionPlugin.js';
2
+ import logger from '../../lib/logger.js';
3
+ import { extractCitationTitle } from '../../lib/util.js';
4
+ import CortexResponse from '../../lib/cortexResponse.js';
5
+
6
+ export function safeJsonParse(content) {
7
+ try {
8
+ const parsedContent = JSON.parse(content);
9
+ return (typeof parsedContent === 'object' && parsedContent !== null) ? parsedContent : content;
10
+ } catch (e) {
11
+ return content;
12
+ }
13
+ }
14
+
15
+ class GrokVisionPlugin extends OpenAIVisionPlugin {
16
+
17
+ constructor(pathway, model) {
18
+ super(pathway, model);
19
+ // Grok is always multimodal, so we inherit all vision capabilities from OpenAIVisionPlugin
20
+ }
21
+
22
+ // Override the logging function to display Grok-specific messages
23
+ logRequestData(data, responseData, prompt) {
24
+ const { stream, messages } = data;
25
+ if (messages && messages.length > 1) {
26
+ logger.info(`[grok request sent containing ${messages.length} messages]`);
27
+ let totalLength = 0;
28
+ let totalUnits;
29
+ messages.forEach((message, index) => {
30
+ //message.content string or array
31
+ const content = message.content === undefined ? JSON.stringify(message) : (Array.isArray(message.content) ? message.content.map(item => {
32
+ if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
33
+ return JSON.stringify({
34
+ type: 'image_url',
35
+ image_url: { url: '* base64 data truncated for log *' }
36
+ });
37
+ }
38
+ return JSON.stringify(item);
39
+ }).join(', ') : message.content);
40
+ const { length, units } = this.getLength(content);
41
+ const displayContent = this.shortenContent(content);
42
+
43
+ let logMessage = `message ${index + 1}: role: ${message.role}, ${units}: ${length}, content: "${displayContent}"`;
44
+
45
+ // Add tool calls to log if they exist
46
+ if (message.role === 'assistant' && message.tool_calls) {
47
+ logMessage += `, tool_calls: ${JSON.stringify(message.tool_calls)}`;
48
+ }
49
+
50
+ logger.verbose(logMessage);
51
+ totalLength += length;
52
+ totalUnits = units;
53
+ });
54
+ logger.info(`[grok request contained ${totalLength} ${totalUnits}]`);
55
+ } else {
56
+ const message = messages[0];
57
+ const content = Array.isArray(message.content) ? message.content.map(item => {
58
+ if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
59
+ return JSON.stringify({
60
+ type: 'image_url',
61
+ image_url: { url: '* base64 data truncated for log *' }
62
+ });
63
+ }
64
+ return JSON.stringify(item);
65
+ }).join(', ') : message.content;
66
+ const { length, units } = this.getLength(content);
67
+ logger.info(`[grok request sent containing ${length} ${units}]`);
68
+ logger.verbose(`${this.shortenContent(content)}`);
69
+ }
70
+ if (stream) {
71
+ logger.info(`[grok response received as an SSE stream]`);
72
+ } else {
73
+ const parsedResponse = this.parseResponse(responseData);
74
+
75
+ if (typeof parsedResponse === 'string') {
76
+ const { length, units } = this.getLength(parsedResponse);
77
+ logger.info(`[grok response received containing ${length} ${units}]`);
78
+ logger.verbose(`${this.shortenContent(parsedResponse)}`);
79
+ } else {
80
+ logger.info(`[grok response received containing object]`);
81
+ logger.verbose(`${JSON.stringify(parsedResponse)}`);
82
+ }
83
+ }
84
+
85
+ prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
86
+ }
87
+
88
+
89
+ // Validate live search parameters according to X.AI documentation
90
+ validateSearchParameters(searchParams) {
91
+ const errors = [];
92
+
93
+ // Validate 'mode' parameter
94
+ if (searchParams.mode !== undefined) {
95
+ const validModes = ['off', 'auto', 'on'];
96
+ if (!validModes.includes(searchParams.mode)) {
97
+ errors.push(`Invalid 'mode' parameter: ${searchParams.mode}. Must be one of: ${validModes.join(', ')}`);
98
+ }
99
+ }
100
+
101
+ // Validate 'sources' parameter
102
+ if (searchParams.sources !== undefined) {
103
+ if (!Array.isArray(searchParams.sources)) {
104
+ errors.push("'sources' must be an array");
105
+ } else {
106
+ const validSourceTypes = ['web', 'news', 'x', 'rss'];
107
+ searchParams.sources.forEach((source, index) => {
108
+ if (!source || typeof source !== 'object') {
109
+ errors.push(`Source at index ${index} must be an object`);
110
+ return;
111
+ }
112
+
113
+ if (!validSourceTypes.includes(source.type)) {
114
+ errors.push(`Invalid source type at index ${index}: ${source.type}. Must be one of: ${validSourceTypes.join(', ')}`);
115
+ }
116
+
117
+ // Validate source-specific parameters
118
+ if (source.type === 'web' || source.type === 'news') {
119
+ if (source.country !== undefined && typeof source.country !== 'string') {
120
+ errors.push(`Source at index ${index}: 'country' must be a string`);
121
+ }
122
+ if (source.excluded_websites !== undefined && !Array.isArray(source.excluded_websites)) {
123
+ errors.push(`Source at index ${index}: 'excluded_websites' must be an array`);
124
+ }
125
+ if (source.allowed_websites !== undefined && !Array.isArray(source.allowed_websites)) {
126
+ errors.push(`Source at index ${index}: 'allowed_websites' must be an array`);
127
+ }
128
+ if (source.safe_search !== undefined && typeof source.safe_search !== 'boolean') {
129
+ errors.push(`Source at index ${index}: 'safe_search' must be a boolean`);
130
+ }
131
+ }
132
+
133
+ if (source.type === 'x') {
134
+ if (source.included_x_handles !== undefined && !Array.isArray(source.included_x_handles)) {
135
+ errors.push(`Source at index ${index}: 'included_x_handles' must be an array`);
136
+ } else if (source.included_x_handles !== undefined && source.included_x_handles.length > 10) {
137
+ errors.push(`Source at index ${index}: 'included_x_handles' can have a maximum of 10 items`);
138
+ }
139
+
140
+ if (source.excluded_x_handles !== undefined && !Array.isArray(source.excluded_x_handles)) {
141
+ errors.push(`Source at index ${index}: 'excluded_x_handles' must be an array`);
142
+ } else if (source.excluded_x_handles !== undefined && source.excluded_x_handles.length > 10) {
143
+ errors.push(`Source at index ${index}: 'excluded_x_handles' can have a maximum of 10 items`);
144
+ }
145
+
146
+ // Check that both handles arrays are not specified simultaneously
147
+ if (source.included_x_handles !== undefined && source.excluded_x_handles !== undefined) {
148
+ errors.push(`Source at index ${index}: 'included_x_handles' and 'excluded_x_handles' cannot be specified simultaneously`);
149
+ }
150
+
151
+ if (source.post_favorite_count !== undefined && typeof source.post_favorite_count !== 'number') {
152
+ errors.push(`Source at index ${index}: 'post_favorite_count' must be a number`);
153
+ }
154
+ if (source.post_view_count !== undefined && typeof source.post_view_count !== 'number') {
155
+ errors.push(`Source at index ${index}: 'post_view_count' must be a number`);
156
+ }
157
+ }
158
+
159
+ if (source.type === 'rss') {
160
+ if (source.links !== undefined && !Array.isArray(source.links)) {
161
+ errors.push(`Source at index ${index}: 'links' must be an array`);
162
+ } else if (source.links !== undefined && source.links.length > 1) {
163
+ errors.push(`Source at index ${index}: 'links' can only have one item`);
164
+ }
165
+ }
166
+ });
167
+ }
168
+ }
169
+
170
+ // Validate 'return_citations' parameter
171
+ if (searchParams.return_citations !== undefined && typeof searchParams.return_citations !== 'boolean') {
172
+ errors.push("'return_citations' must be a boolean");
173
+ }
174
+
175
+ // Validate date parameters
176
+ const dateFormat = /^\d{4}-\d{2}-\d{2}$/;
177
+ ['from_date', 'to_date'].forEach(dateField => {
178
+ if (searchParams[dateField] !== undefined) {
179
+ if (typeof searchParams[dateField] !== 'string') {
180
+ errors.push(`'${dateField}' must be a string`);
181
+ } else if (!dateFormat.test(searchParams[dateField])) {
182
+ errors.push(`'${dateField}' must be in YYYY-MM-DD format`);
183
+ } else {
184
+ // Validate that the date is actually valid
185
+ const date = new Date(searchParams[dateField]);
186
+ if (isNaN(date.getTime()) || date.toISOString().split('T')[0] !== searchParams[dateField]) {
187
+ errors.push(`'${dateField}' is not a valid date`);
188
+ }
189
+ }
190
+ }
191
+ });
192
+
193
+ // Validate 'max_search_results' parameter
194
+ if (searchParams.max_search_results !== undefined) {
195
+ if (typeof searchParams.max_search_results !== 'number' || !Number.isInteger(searchParams.max_search_results)) {
196
+ errors.push("'max_search_results' must be an integer");
197
+ } else if (searchParams.max_search_results <= 0) {
198
+ errors.push("'max_search_results' must be a positive integer");
199
+ } else if (searchParams.max_search_results > 50) {
200
+ errors.push("'max_search_results' must be 50 or less");
201
+ }
202
+ }
203
+
204
+ if (errors.length > 0) {
205
+ throw new Error(`Live Search parameter validation failed:\n${errors.join('\n')}`);
206
+ }
207
+
208
+ return true;
209
+ }
210
+
211
+ async getRequestParameters(text, parameters, prompt) {
212
+ const requestParameters = await super.getRequestParameters(text, parameters, prompt);
213
+
214
+ let search_parameters = {};
215
+ if (parameters.search_parameters) {
216
+ try {
217
+ search_parameters = JSON.parse(parameters.search_parameters);
218
+ } catch (error) {
219
+ throw new Error(`Invalid 'search_parameters' parameter: ${error.message}`);
220
+ }
221
+ }
222
+
223
+ // Validate search parameters before including them
224
+ if (Object.keys(search_parameters).length > 0) {
225
+ this.validateSearchParameters(search_parameters);
226
+ }
227
+
228
+ // only set search_parameters if it's not undefined or empty
229
+ if (Object.keys(search_parameters).length > 0) {
230
+ requestParameters.search_parameters = search_parameters;
231
+ }
232
+
233
+ return requestParameters;
234
+ }
235
+
236
+ async execute(text, parameters, prompt, cortexRequest) {
237
+ const requestParameters = await this.getRequestParameters(text, parameters, prompt);
238
+ const { stream } = parameters;
239
+
240
+ cortexRequest.data = {
241
+ ...(cortexRequest.data || {}),
242
+ ...requestParameters,
243
+ };
244
+ cortexRequest.params = {}; // query params
245
+ cortexRequest.stream = stream;
246
+
247
+ return this.executeRequest(cortexRequest);
248
+
249
+ }
250
+
251
+ // Override processStreamEvent to handle Grok streaming format
252
+ processStreamEvent(event, requestProgress) {
253
+ // First, let the parent handle the basic streaming logic
254
+ const processedProgress = super.processStreamEvent(event, requestProgress);
255
+
256
+ return processedProgress;
257
+ }
258
+
259
+ // Override tryParseMessages to preserve X.AI vision detail field
260
+ async tryParseMessages(messages) {
261
+ return await Promise.all(messages.map(async message => {
262
+ try {
263
+ // Handle tool-related message types
264
+ if (message.role === "tool" || (message.role === "assistant" && message.tool_calls)) {
265
+ return {
266
+ ...message
267
+ };
268
+ }
269
+
270
+ if (Array.isArray(message.content)) {
271
+ return {
272
+ ...message,
273
+ content: await Promise.all(message.content.map(async item => {
274
+ const parsedItem = safeJsonParse(item);
275
+
276
+ if (typeof parsedItem === 'string') {
277
+ return { type: 'text', text: parsedItem };
278
+ }
279
+
280
+ if (typeof parsedItem === 'object' && parsedItem !== null) {
281
+ // Handle both 'image' and 'image_url' types
282
+ if (parsedItem.type === 'image' || parsedItem.type === 'image_url') {
283
+ const url = parsedItem.url || parsedItem.image_url?.url;
284
+ const detail = parsedItem.image_url?.detail || parsedItem.detail;
285
+ if (url && await this.validateImageUrl(url)) {
286
+ const imageUrl = { url };
287
+ if (detail) {
288
+ imageUrl.detail = detail;
289
+ }
290
+ return { type: 'image_url', image_url: imageUrl };
291
+ }
292
+ return { type: 'text', text: typeof item === 'string' ? item : JSON.stringify(item) };
293
+ }
294
+ }
295
+
296
+ return parsedItem;
297
+ }))
298
+ };
299
+ }
300
+ } catch (e) {
301
+ return message;
302
+ }
303
+ return message;
304
+ }));
305
+ }
306
+
307
+ // Override parseResponse to handle Grok-specific response fields
308
+ parseResponse(data) {
309
+ if (!data) return "";
310
+ const { choices } = data;
311
+ if (!choices || !choices.length) {
312
+ return data;
313
+ }
314
+
315
+ // if we got a choices array back with more than one choice, return the whole array
316
+ if (choices.length > 1) {
317
+ return choices;
318
+ }
319
+
320
+ const choice = choices[0];
321
+ const message = choice.message;
322
+
323
+ // Create standardized CortexResponse object
324
+ const cortexResponse = new CortexResponse({
325
+ output_text: message.content || "",
326
+ finishReason: choice.finish_reason || 'stop',
327
+ usage: data.usage || null,
328
+ metadata: {
329
+ model: this.modelName
330
+ }
331
+ });
332
+
333
+ // Handle tool calls
334
+ if (message.tool_calls) {
335
+ cortexResponse.toolCalls = message.tool_calls;
336
+ }
337
+
338
+ // Handle Grok-specific Live Search data
339
+ if (data.citations) {
340
+ cortexResponse.citations = data.citations.map(url => ({
341
+ title: extractCitationTitle(url),
342
+ url: url,
343
+ content: extractCitationTitle(url)
344
+ }));
345
+ }
346
+
347
+ if (data.search_queries) {
348
+ cortexResponse.searchQueries = data.search_queries;
349
+ }
350
+
351
+ if (data.web_search_results) {
352
+ cortexResponse.searchResults = data.web_search_results;
353
+ }
354
+
355
+ if (data.real_time_data) {
356
+ cortexResponse.realTimeData = data.real_time_data;
357
+ }
358
+
359
+ // Return the CortexResponse object
360
+ return cortexResponse;
361
+ }
362
+
363
+ }
364
+
365
+ export default GrokVisionPlugin;
@@ -608,7 +608,6 @@ class ModelPlugin {
608
608
  let parsedMessage;
609
609
  try {
610
610
  parsedMessage = JSON.parse(event.data);
611
- requestProgress.data = event.data;
612
611
  } catch (error) {
613
612
  throw new Error(`Could not parse stream data: ${error}`);
614
613
  }
@@ -619,6 +618,9 @@ class ModelPlugin {
619
618
  throw new Error(streamError);
620
619
  }
621
620
 
621
+ // Set the data for the event
622
+ requestProgress.data = event.data;
623
+
622
624
  // finish reason can be in different places in the message
623
625
  const finishReason = parsedMessage?.choices?.[0]?.finish_reason || parsedMessage?.candidates?.[0]?.finishReason;
624
626
  if (finishReason) {
@@ -1,6 +1,7 @@
1
1
  // OpenAIChatPlugin.js
2
2
  import ModelPlugin from './modelPlugin.js';
3
3
  import logger from '../../lib/logger.js';
4
+ import CortexResponse from '../../lib/cortexResponse.js';
4
5
 
5
6
  class OpenAIChatPlugin extends ModelPlugin {
6
7
  constructor(pathway, model) {
@@ -45,15 +46,24 @@ class OpenAIChatPlugin extends ModelPlugin {
45
46
  // Set up parameters specific to the OpenAI Chat API
46
47
  getRequestParameters(text, parameters, prompt) {
47
48
  const { modelPromptText, modelPromptMessages, tokenLength, modelPrompt } = this.getCompiledPrompt(text, parameters, prompt);
48
- const { stream } = parameters;
49
-
49
+ let { stream, tools, functions } = parameters;
50
+
51
+ if (typeof tools === 'string') {
52
+ tools = JSON.parse(tools);
53
+ }
54
+
55
+ if (typeof functions === 'string') {
56
+ functions = JSON.parse(functions);
57
+ }
58
+
50
59
  // Define the model's max token length
51
60
  const modelTargetTokenLength = this.getModelMaxPromptTokens();
52
-
61
+
53
62
  let requestMessages = modelPromptMessages || [{ "role": "user", "content": modelPromptText }];
54
63
 
55
64
  // Check if the messages are in Palm format and convert them to OpenAI format if necessary
56
65
  const isPalmFormat = requestMessages.some(message => 'author' in message);
66
+
57
67
  if (isPalmFormat) {
58
68
  const context = modelPrompt.context || '';
59
69
  const examples = modelPrompt.examples || [];
@@ -65,11 +75,13 @@ class OpenAIChatPlugin extends ModelPlugin {
65
75
  // Remove older messages until the token length is within the model's limit
66
76
  requestMessages = this.truncateMessagesToTargetLength(requestMessages, modelTargetTokenLength);
67
77
  }
68
-
78
+
69
79
  const requestParameters = {
70
80
  messages: requestMessages,
71
81
  temperature: this.temperature ?? 0.7,
72
82
  ...(stream !== undefined ? { stream } : {}),
83
+ ...(tools && tools.length > 0 ? { tools, tool_choice: parameters.tool_choice || 'auto' } : {}),
84
+ ...(functions && functions.length > 0 ? { functions } : {}),
73
85
  };
74
86
 
75
87
  return requestParameters;
@@ -80,7 +92,7 @@ class OpenAIChatPlugin extends ModelPlugin {
80
92
  const requestParameters = this.getRequestParameters(text, parameters, prompt);
81
93
 
82
94
  cortexRequest.data = { ...(cortexRequest.data || {}), ...requestParameters };
83
- cortexRequest.params = {}; // query params
95
+ cortexRequest.params = {};
84
96
 
85
97
  return this.executeRequest(cortexRequest);
86
98
  }
@@ -98,9 +110,86 @@ class OpenAIChatPlugin extends ModelPlugin {
98
110
  return choices;
99
111
  }
100
112
 
101
- // otherwise, return the first choice
102
- const messageResult = choices[0].message && choices[0].message.content && choices[0].message.content.trim();
103
- return messageResult ?? null;
113
+ const choice = choices[0];
114
+ const message = choice.message;
115
+ if (!message) {
116
+ return null;
117
+ }
118
+
119
+ // Create standardized CortexResponse object
120
+ const cortexResponse = new CortexResponse({
121
+ output_text: message.content || "",
122
+ finishReason: choice.finish_reason || 'stop',
123
+ usage: data.usage || null,
124
+ metadata: {
125
+ model: this.modelName
126
+ }
127
+ });
128
+
129
+ // Handle tool calls
130
+ if (message.tool_calls) {
131
+ cortexResponse.toolCalls = message.tool_calls;
132
+ } else if (message.function_call) {
133
+ cortexResponse.functionCall = message.function_call;
134
+ }
135
+
136
+ return cortexResponse;
137
+ }
138
+
139
+ // Override processStreamEvent to handle OpenAI Chat streaming format
140
+ processStreamEvent(event, requestProgress) {
141
+ // check for end of stream or in-stream errors
142
+ if (event.data.trim() === '[DONE]') {
143
+ requestProgress.progress = 1;
144
+ } else {
145
+ let parsedMessage;
146
+ try {
147
+ parsedMessage = JSON.parse(event.data);
148
+ } catch (error) {
149
+ throw new Error(`Could not parse stream data: ${error}`);
150
+ }
151
+
152
+ // error can be in different places in the message
153
+ const streamError = parsedMessage?.error || parsedMessage?.choices?.[0]?.delta?.content?.error || parsedMessage?.choices?.[0]?.text?.error;
154
+ if (streamError) {
155
+ throw new Error(streamError);
156
+ }
157
+
158
+ // Check if this is an empty/idle event that we should skip
159
+ const delta = parsedMessage?.choices?.[0]?.delta;
160
+ const isEmptyEvent = !delta ||
161
+ (Object.keys(delta).length === 0) ||
162
+ (Object.keys(delta).length === 1 && delta.content === '') ||
163
+ (Object.keys(delta).length === 1 && delta.tool_calls && delta.tool_calls.length === 0);
164
+
165
+ // Skip publishing empty events unless they have a finish_reason
166
+ const hasFinishReason = parsedMessage?.choices?.[0]?.finish_reason || parsedMessage?.candidates?.[0]?.finishReason;
167
+
168
+ if (isEmptyEvent && !hasFinishReason) {
169
+ // Return requestProgress without setting data to prevent publishing
170
+ return requestProgress;
171
+ }
172
+
173
+ // Set the data for non-empty events or events with finish_reason
174
+ requestProgress.data = event.data;
175
+
176
+ // finish reason can be in different places in the message
177
+ const finishReason = parsedMessage?.choices?.[0]?.finish_reason || parsedMessage?.candidates?.[0]?.finishReason;
178
+ if (finishReason) {
179
+ switch (finishReason.toLowerCase()) {
180
+ case 'safety':
181
+ const safetyRatings = JSON.stringify(parsedMessage?.candidates?.[0]?.safetyRatings) || '';
182
+ logger.warn(`Request ${this.requestId} was blocked by the safety filter. ${safetyRatings}`);
183
+ requestProgress.data = `\n\nResponse blocked by safety filter: ${safetyRatings}`;
184
+ requestProgress.progress = 1;
185
+ break;
186
+ default:
187
+ requestProgress.progress = 1;
188
+ break;
189
+ }
190
+ }
191
+ }
192
+ return requestProgress;
104
193
  }
105
194
 
106
195
  // Override the logging function to display the messages and responses
@@ -135,11 +224,15 @@ class OpenAIChatPlugin extends ModelPlugin {
135
224
 
136
225
  if (stream) {
137
226
  logger.info(`[response received as an SSE stream]`);
138
- } else {
139
- const responseText = this.parseResponse(responseData);
140
- const { length, units } = this.getLength(responseText);
141
- logger.info(`[response received containing ${length} ${units}]`);
142
- logger.verbose(`${this.shortenContent(responseText)}`);
227
+ } else {
228
+ if (typeof responseData === 'string') {
229
+ const { length, units } = this.getLength(responseData);
230
+ logger.info(`[response received containing ${length} ${units}]`);
231
+ logger.verbose(`${this.shortenContent(responseData)}`);
232
+ } else {
233
+ logger.info(`[response received containing object]`);
234
+ logger.verbose(`${JSON.stringify(responseData)}`);
235
+ }
143
236
  }
144
237
 
145
238
  prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
@@ -2,6 +2,7 @@ import OpenAIChatPlugin from './openAiChatPlugin.js';
2
2
  import logger from '../../lib/logger.js';
3
3
  import { requestState } from '../requestState.js';
4
4
  import { addCitationsToResolver } from '../../lib/pathwayTools.js';
5
+ import CortexResponse from '../../lib/cortexResponse.js';
5
6
  function safeJsonParse(content) {
6
7
  try {
7
8
  const parsedContent = JSON.parse(content);
@@ -113,16 +114,14 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
113
114
  }
114
115
  if (stream) {
115
116
  logger.info(`[response received as an SSE stream]`);
116
- } else {
117
- const parsedResponse = this.parseResponse(responseData);
118
-
119
- if (typeof parsedResponse === 'string') {
120
- const { length, units } = this.getLength(parsedResponse);
117
+ } else {
118
+ if (typeof responseData === 'string') {
119
+ const { length, units } = this.getLength(responseData);
121
120
  logger.info(`[response received containing ${length} ${units}]`);
122
- logger.verbose(`${this.shortenContent(parsedResponse)}`);
121
+ logger.verbose(`${this.shortenContent(responseData)}`);
123
122
  } else {
124
123
  logger.info(`[response received containing object]`);
125
- logger.verbose(`${JSON.stringify(parsedResponse)}`);
124
+ logger.verbose(`${JSON.stringify(responseData)}`);
126
125
  }
127
126
  }
128
127
 
@@ -135,15 +134,6 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
135
134
 
136
135
  requestParameters.messages = await this.tryParseMessages(requestParameters.messages);
137
136
 
138
- // Add tools support if provided in parameters
139
- if (parameters.tools) {
140
- requestParameters.tools = parameters.tools;
141
- }
142
-
143
- if (parameters.tool_choice) {
144
- requestParameters.tool_choice = parameters.tool_choice;
145
- }
146
-
147
137
  const modelMaxReturnTokens = this.getModelMaxReturnTokens();
148
138
  const maxTokensPrompt = this.promptParameters.max_tokens;
149
139
  const maxTokensModel = this.getModelMaxTokenLength() * (1 - this.getPromptTokenRatio());
@@ -165,7 +155,7 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
165
155
  ...(cortexRequest.data || {}),
166
156
  ...requestParameters,
167
157
  };
168
- cortexRequest.params = {}; // query params
158
+ cortexRequest.params = {};
169
159
  cortexRequest.stream = stream;
170
160
 
171
161
  return this.executeRequest(cortexRequest);
@@ -179,24 +169,30 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
179
169
  return data;
180
170
  }
181
171
 
182
- // if we got a choices array back with more than one choice, return the whole array
183
- if (choices.length > 1) {
184
- return choices;
185
- }
186
-
187
172
  const choice = choices[0];
188
173
  const message = choice.message;
174
+ if (!message) {
175
+ return null;
176
+ }
177
+
178
+ // Create standardized CortexResponse object
179
+ const cortexResponse = new CortexResponse({
180
+ output_text: message.content || "",
181
+ finishReason: choice.finish_reason || 'stop',
182
+ usage: data.usage || null,
183
+ metadata: {
184
+ model: this.modelName
185
+ }
186
+ });
189
187
 
190
- // Handle tool calls in the response
188
+ // Handle tool calls
191
189
  if (message.tool_calls) {
192
- return {
193
- role: message.role,
194
- content: message.content || "",
195
- tool_calls: message.tool_calls
196
- };
190
+ cortexResponse.toolCalls = message.tool_calls;
191
+ } else if (message.function_call) {
192
+ cortexResponse.functionCall = message.function_call;
197
193
  }
198
194
 
199
- return message.content || "";
195
+ return cortexResponse;
200
196
  }
201
197
 
202
198
  processStreamEvent(event, requestProgress) {
@@ -210,7 +206,6 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
210
206
  let parsedMessage;
211
207
  try {
212
208
  parsedMessage = JSON.parse(event.data);
213
- requestProgress.data = event.data;
214
209
  } catch (error) {
215
210
  // Clear buffers on error
216
211
  this.toolCallsBuffer = [];
@@ -228,6 +223,23 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
228
223
  }
229
224
 
230
225
  const delta = parsedMessage?.choices?.[0]?.delta;
226
+
227
+ // Check if this is an empty/idle event that we should skip
228
+ const isEmptyEvent = !delta ||
229
+ (Object.keys(delta).length === 0) ||
230
+ (Object.keys(delta).length === 1 && delta.content === '') ||
231
+ (Object.keys(delta).length === 1 && delta.tool_calls && delta.tool_calls.length === 0);
232
+
233
+ // Skip publishing empty events unless they have a finish_reason
234
+ const hasFinishReason = parsedMessage?.choices?.[0]?.finish_reason;
235
+
236
+ if (isEmptyEvent && !hasFinishReason) {
237
+ // Return requestProgress without setting data to prevent publishing
238
+ return requestProgress;
239
+ }
240
+
241
+ // Set the data for non-empty events or events with finish_reason
242
+ requestProgress.data = event.data;
231
243
 
232
244
  // Accumulate content
233
245
  if (delta?.content) {