@aj-archipelago/cortex 1.3.62 → 1.3.63
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/cortex-file-handler-test.yml +61 -0
- package/README.md +31 -7
- package/config/default.example.json +15 -0
- package/config.js +133 -12
- package/helper-apps/cortex-autogen2/DigiCertGlobalRootCA.crt.pem +22 -0
- package/helper-apps/cortex-autogen2/Dockerfile +31 -0
- package/helper-apps/cortex-autogen2/Dockerfile.worker +41 -0
- package/helper-apps/cortex-autogen2/README.md +183 -0
- package/helper-apps/cortex-autogen2/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/agents.py +131 -0
- package/helper-apps/cortex-autogen2/docker-compose.yml +20 -0
- package/helper-apps/cortex-autogen2/function_app.py +55 -0
- package/helper-apps/cortex-autogen2/host.json +15 -0
- package/helper-apps/cortex-autogen2/main.py +126 -0
- package/helper-apps/cortex-autogen2/poetry.lock +3652 -0
- package/helper-apps/cortex-autogen2/pyproject.toml +36 -0
- package/helper-apps/cortex-autogen2/requirements.txt +20 -0
- package/helper-apps/cortex-autogen2/send_task.py +105 -0
- package/helper-apps/cortex-autogen2/services/__init__.py +1 -0
- package/helper-apps/cortex-autogen2/services/azure_queue.py +85 -0
- package/helper-apps/cortex-autogen2/services/redis_publisher.py +153 -0
- package/helper-apps/cortex-autogen2/task_processor.py +488 -0
- package/helper-apps/cortex-autogen2/tools/__init__.py +24 -0
- package/helper-apps/cortex-autogen2/tools/azure_blob_tools.py +175 -0
- package/helper-apps/cortex-autogen2/tools/azure_foundry_agents.py +601 -0
- package/helper-apps/cortex-autogen2/tools/coding_tools.py +72 -0
- package/helper-apps/cortex-autogen2/tools/download_tools.py +48 -0
- package/helper-apps/cortex-autogen2/tools/file_tools.py +545 -0
- package/helper-apps/cortex-autogen2/tools/search_tools.py +646 -0
- package/helper-apps/cortex-azure-cleaner/README.md +36 -0
- package/helper-apps/cortex-file-converter/README.md +93 -0
- package/helper-apps/cortex-file-converter/key_to_pdf.py +104 -0
- package/helper-apps/cortex-file-converter/list_blob_extensions.py +89 -0
- package/helper-apps/cortex-file-converter/process_azure_keynotes.py +181 -0
- package/helper-apps/cortex-file-converter/requirements.txt +1 -0
- package/helper-apps/cortex-file-handler/.env.test.azure.ci +7 -0
- package/helper-apps/cortex-file-handler/.env.test.azure.sample +1 -1
- package/helper-apps/cortex-file-handler/.env.test.gcs.ci +10 -0
- package/helper-apps/cortex-file-handler/.env.test.gcs.sample +2 -2
- package/helper-apps/cortex-file-handler/INTERFACE.md +41 -0
- package/helper-apps/cortex-file-handler/package.json +1 -1
- package/helper-apps/cortex-file-handler/scripts/setup-azure-container.js +41 -17
- package/helper-apps/cortex-file-handler/scripts/setup-test-containers.js +30 -15
- package/helper-apps/cortex-file-handler/scripts/test-azure.sh +32 -6
- package/helper-apps/cortex-file-handler/scripts/test-gcs.sh +24 -2
- package/helper-apps/cortex-file-handler/scripts/validate-env.js +128 -0
- package/helper-apps/cortex-file-handler/src/blobHandler.js +161 -51
- package/helper-apps/cortex-file-handler/src/constants.js +3 -0
- package/helper-apps/cortex-file-handler/src/fileChunker.js +10 -8
- package/helper-apps/cortex-file-handler/src/index.js +116 -9
- package/helper-apps/cortex-file-handler/src/redis.js +61 -1
- package/helper-apps/cortex-file-handler/src/services/ConversionService.js +11 -8
- package/helper-apps/cortex-file-handler/src/services/FileConversionService.js +2 -2
- package/helper-apps/cortex-file-handler/src/services/storage/AzureStorageProvider.js +88 -6
- package/helper-apps/cortex-file-handler/src/services/storage/GCSStorageProvider.js +58 -0
- package/helper-apps/cortex-file-handler/src/services/storage/StorageFactory.js +25 -5
- package/helper-apps/cortex-file-handler/src/services/storage/StorageProvider.js +9 -0
- package/helper-apps/cortex-file-handler/src/services/storage/StorageService.js +120 -16
- package/helper-apps/cortex-file-handler/src/start.js +27 -17
- package/helper-apps/cortex-file-handler/tests/FileConversionService.test.js +52 -1
- package/helper-apps/cortex-file-handler/tests/blobHandler.test.js +40 -0
- package/helper-apps/cortex-file-handler/tests/checkHashShortLived.test.js +553 -0
- package/helper-apps/cortex-file-handler/tests/cleanup.test.js +46 -52
- package/helper-apps/cortex-file-handler/tests/containerConversionFlow.test.js +451 -0
- package/helper-apps/cortex-file-handler/tests/containerNameParsing.test.js +229 -0
- package/helper-apps/cortex-file-handler/tests/containerParameterFlow.test.js +392 -0
- package/helper-apps/cortex-file-handler/tests/conversionResilience.test.js +7 -2
- package/helper-apps/cortex-file-handler/tests/deleteOperations.test.js +348 -0
- package/helper-apps/cortex-file-handler/tests/fileChunker.test.js +23 -2
- package/helper-apps/cortex-file-handler/tests/fileUpload.test.js +11 -5
- package/helper-apps/cortex-file-handler/tests/getOperations.test.js +58 -24
- package/helper-apps/cortex-file-handler/tests/postOperations.test.js +11 -4
- package/helper-apps/cortex-file-handler/tests/shortLivedUrlConversion.test.js +225 -0
- package/helper-apps/cortex-file-handler/tests/start.test.js +8 -12
- package/helper-apps/cortex-file-handler/tests/storage/StorageFactory.test.js +80 -0
- package/helper-apps/cortex-file-handler/tests/storage/StorageService.test.js +388 -22
- package/helper-apps/cortex-file-handler/tests/testUtils.helper.js +74 -0
- package/lib/cortexResponse.js +153 -0
- package/lib/entityConstants.js +21 -3
- package/lib/logger.js +21 -4
- package/lib/pathwayTools.js +28 -9
- package/lib/util.js +49 -0
- package/package.json +1 -1
- package/pathways/basePathway.js +1 -0
- package/pathways/bing_afagent.js +54 -1
- package/pathways/call_tools.js +2 -3
- package/pathways/chat_jarvis.js +1 -1
- package/pathways/google_cse.js +27 -0
- package/pathways/grok_live_search.js +18 -0
- package/pathways/system/entity/memory/sys_memory_lookup_required.js +1 -0
- package/pathways/system/entity/memory/sys_memory_required.js +1 -0
- package/pathways/system/entity/memory/sys_search_memory.js +1 -0
- package/pathways/system/entity/sys_entity_agent.js +56 -4
- package/pathways/system/entity/sys_generator_quick.js +1 -0
- package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +26 -0
- package/pathways/system/entity/tools/sys_tool_google_search.js +141 -0
- package/pathways/system/entity/tools/sys_tool_grok_x_search.js +237 -0
- package/pathways/system/entity/tools/sys_tool_image.js +1 -1
- package/pathways/system/rest_streaming/sys_claude_37_sonnet.js +21 -0
- package/pathways/system/rest_streaming/sys_claude_41_opus.js +21 -0
- package/pathways/system/rest_streaming/sys_claude_4_sonnet.js +21 -0
- package/pathways/system/rest_streaming/sys_google_gemini_25_flash.js +25 -0
- package/pathways/system/rest_streaming/{sys_google_gemini_chat.js → sys_google_gemini_25_pro.js} +6 -4
- package/pathways/system/rest_streaming/sys_grok_4.js +23 -0
- package/pathways/system/rest_streaming/sys_grok_4_fast_non_reasoning.js +23 -0
- package/pathways/system/rest_streaming/sys_grok_4_fast_reasoning.js +23 -0
- package/pathways/system/rest_streaming/sys_openai_chat.js +3 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41.js +22 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41_mini.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt41_nano.js +21 -0
- package/pathways/system/rest_streaming/{sys_claude_35_sonnet.js → sys_openai_chat_gpt4_omni.js} +6 -4
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_omni_mini.js +21 -0
- package/pathways/system/rest_streaming/{sys_claude_3_haiku.js → sys_openai_chat_gpt5.js} +7 -5
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_chat.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_mini.js +21 -0
- package/pathways/system/rest_streaming/sys_openai_chat_gpt5_nano.js +21 -0
- package/pathways/system/rest_streaming/{sys_openai_chat_o1.js → sys_openai_chat_o3.js} +6 -3
- package/pathways/system/rest_streaming/sys_openai_chat_o3_mini.js +3 -0
- package/pathways/system/workspaces/run_workspace_prompt.js +99 -0
- package/pathways/vision.js +1 -1
- package/server/graphql.js +1 -1
- package/server/modelExecutor.js +8 -0
- package/server/pathwayResolver.js +166 -16
- package/server/pathwayResponseParser.js +16 -8
- package/server/plugins/azureFoundryAgentsPlugin.js +1 -1
- package/server/plugins/claude3VertexPlugin.js +193 -45
- package/server/plugins/gemini15ChatPlugin.js +21 -0
- package/server/plugins/gemini15VisionPlugin.js +360 -0
- package/server/plugins/googleCsePlugin.js +94 -0
- package/server/plugins/grokVisionPlugin.js +365 -0
- package/server/plugins/modelPlugin.js +3 -1
- package/server/plugins/openAiChatPlugin.js +106 -13
- package/server/plugins/openAiVisionPlugin.js +42 -30
- package/server/resolver.js +28 -4
- package/server/rest.js +270 -53
- package/server/typeDef.js +1 -0
- package/tests/{mocks.js → helpers/mocks.js} +5 -2
- package/tests/{server.js → helpers/server.js} +2 -2
- package/tests/helpers/sseAssert.js +23 -0
- package/tests/helpers/sseClient.js +73 -0
- package/tests/helpers/subscriptionAssert.js +11 -0
- package/tests/helpers/subscriptions.js +113 -0
- package/tests/{sublong.srt → integration/features/translate/sublong.srt} +4543 -4543
- package/tests/integration/features/translate/translate_chunking_stream.test.js +100 -0
- package/tests/{translate_srt.test.js → integration/features/translate/translate_srt.test.js} +2 -2
- package/tests/integration/graphql/async/stream/agentic.test.js +477 -0
- package/tests/integration/graphql/async/stream/subscription_streaming.test.js +62 -0
- package/tests/integration/graphql/async/stream/sys_entity_start_streaming.test.js +71 -0
- package/tests/integration/graphql/async/stream/vendors/claude_streaming.test.js +56 -0
- package/tests/integration/graphql/async/stream/vendors/gemini_streaming.test.js +66 -0
- package/tests/integration/graphql/async/stream/vendors/grok_streaming.test.js +56 -0
- package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +72 -0
- package/tests/integration/graphql/features/google/sysToolGoogleSearch.test.js +96 -0
- package/tests/integration/graphql/features/grok/grok.test.js +688 -0
- package/tests/integration/graphql/features/grok/grok_x_search_tool.test.js +354 -0
- package/tests/{main.test.js → integration/graphql/features/main.test.js} +1 -1
- package/tests/{call_tools.test.js → integration/graphql/features/tools/call_tools.test.js} +2 -2
- package/tests/{vision.test.js → integration/graphql/features/vision/vision.test.js} +1 -1
- package/tests/integration/graphql/subscriptions/connection.test.js +26 -0
- package/tests/{openai_api.test.js → integration/rest/oai/openai_api.test.js} +63 -238
- package/tests/integration/rest/oai/tool_calling_api.test.js +343 -0
- package/tests/integration/rest/oai/tool_calling_streaming.test.js +85 -0
- package/tests/integration/rest/vendors/claude_streaming.test.js +47 -0
- package/tests/integration/rest/vendors/claude_tool_calling_streaming.test.js +75 -0
- package/tests/integration/rest/vendors/gemini_streaming.test.js +47 -0
- package/tests/integration/rest/vendors/gemini_tool_calling_streaming.test.js +75 -0
- package/tests/integration/rest/vendors/grok_streaming.test.js +55 -0
- package/tests/integration/rest/vendors/grok_tool_calling_streaming.test.js +75 -0
- package/tests/{azureAuthTokenHelper.test.js → unit/core/azureAuthTokenHelper.test.js} +1 -1
- package/tests/{chunkfunction.test.js → unit/core/chunkfunction.test.js} +2 -2
- package/tests/{config.test.js → unit/core/config.test.js} +3 -3
- package/tests/{encodeCache.test.js → unit/core/encodeCache.test.js} +1 -1
- package/tests/{fastLruCache.test.js → unit/core/fastLruCache.test.js} +1 -1
- package/tests/{handleBars.test.js → unit/core/handleBars.test.js} +1 -1
- package/tests/{memoryfunction.test.js → unit/core/memoryfunction.test.js} +2 -2
- package/tests/unit/core/mergeResolver.test.js +952 -0
- package/tests/{parser.test.js → unit/core/parser.test.js} +3 -3
- package/tests/unit/core/pathwayResolver.test.js +187 -0
- package/tests/{requestMonitor.test.js → unit/core/requestMonitor.test.js} +1 -1
- package/tests/{requestMonitorDurationEstimator.test.js → unit/core/requestMonitorDurationEstimator.test.js} +1 -1
- package/tests/{truncateMessages.test.js → unit/core/truncateMessages.test.js} +3 -3
- package/tests/{util.test.js → unit/core/util.test.js} +1 -1
- package/tests/{apptekTranslatePlugin.test.js → unit/plugins/apptekTranslatePlugin.test.js} +3 -3
- package/tests/{azureFoundryAgents.test.js → unit/plugins/azureFoundryAgents.test.js} +136 -1
- package/tests/{claude3VertexPlugin.test.js → unit/plugins/claude3VertexPlugin.test.js} +32 -10
- package/tests/{claude3VertexToolConversion.test.js → unit/plugins/claude3VertexToolConversion.test.js} +3 -3
- package/tests/unit/plugins/googleCsePlugin.test.js +111 -0
- package/tests/unit/plugins/grokVisionPlugin.test.js +1392 -0
- package/tests/{modelPlugin.test.js → unit/plugins/modelPlugin.test.js} +3 -3
- package/tests/{multimodal_conversion.test.js → unit/plugins/multimodal_conversion.test.js} +4 -4
- package/tests/{openAiChatPlugin.test.js → unit/plugins/openAiChatPlugin.test.js} +13 -4
- package/tests/{openAiToolPlugin.test.js → unit/plugins/openAiToolPlugin.test.js} +35 -27
- package/tests/{tokenHandlingTests.test.js → unit/plugins/tokenHandlingTests.test.js} +5 -5
- package/tests/{translate_apptek.test.js → unit/plugins/translate_apptek.test.js} +3 -3
- package/tests/{streaming.test.js → unit/plugins.streaming/plugin_stream_events.test.js} +19 -58
- package/helper-apps/mogrt-handler/tests/test-files/test.gif +0 -1
- package/helper-apps/mogrt-handler/tests/test-files/test.mogrt +0 -1
- package/helper-apps/mogrt-handler/tests/test-files/test.mp4 +0 -1
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4.js +0 -19
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_32.js +0 -19
- package/pathways/system/rest_streaming/sys_openai_chat_gpt4_turbo.js +0 -19
- package/pathways/system/workspaces/run_claude35_sonnet.js +0 -21
- package/pathways/system/workspaces/run_claude3_haiku.js +0 -20
- package/pathways/system/workspaces/run_gpt35turbo.js +0 -20
- package/pathways/system/workspaces/run_gpt4.js +0 -20
- package/pathways/system/workspaces/run_gpt4_32.js +0 -20
- package/tests/agentic.test.js +0 -256
- package/tests/pathwayResolver.test.js +0 -78
- package/tests/subscription.test.js +0 -387
- /package/tests/{subchunk.srt → integration/features/translate/subchunk.srt} +0 -0
- /package/tests/{subhorizontal.srt → integration/features/translate/subhorizontal.srt} +0 -0
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
// tool_calling_api.test.js
|
|
2
|
+
|
|
3
|
+
import test from 'ava';
|
|
4
|
+
import got from 'got';
|
|
5
|
+
import axios from 'axios';
|
|
6
|
+
import serverFactory from '../../../../index.js';
|
|
7
|
+
|
|
8
|
+
const API_BASE = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
9
|
+
|
|
10
|
+
let testServer;
|
|
11
|
+
|
|
12
|
+
test.before(async () => {
|
|
13
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
14
|
+
const { server, startServer } = await serverFactory();
|
|
15
|
+
startServer && await startServer();
|
|
16
|
+
testServer = server;
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test.after.always('cleanup', async () => {
|
|
20
|
+
if (testServer) {
|
|
21
|
+
await testServer.stop();
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
async function connectToSSEEndpoint(url, endpoint, payload, t, customAssertions) {
|
|
26
|
+
return new Promise((resolve, reject) => {
|
|
27
|
+
try {
|
|
28
|
+
const instance = axios.create({
|
|
29
|
+
baseURL: url,
|
|
30
|
+
responseType: 'stream',
|
|
31
|
+
});
|
|
32
|
+
|
|
33
|
+
instance.post(endpoint, payload).then(response => {
|
|
34
|
+
const responseData = response.data;
|
|
35
|
+
const incomingMessage = Array.isArray(responseData) && responseData.length > 0 ? responseData[0] : responseData;
|
|
36
|
+
let eventCount = 0;
|
|
37
|
+
|
|
38
|
+
incomingMessage.on('data', data => {
|
|
39
|
+
const events = data.toString().split('\n');
|
|
40
|
+
|
|
41
|
+
events.forEach(event => {
|
|
42
|
+
eventCount++;
|
|
43
|
+
|
|
44
|
+
if (event.trim() === '') return;
|
|
45
|
+
|
|
46
|
+
if (event.trim() === 'data: [DONE]') {
|
|
47
|
+
t.truthy(eventCount > 1);
|
|
48
|
+
resolve();
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const message = event.replace(/^data: /, '');
|
|
53
|
+
const messageJson = JSON.parse(message);
|
|
54
|
+
|
|
55
|
+
customAssertions(t, messageJson);
|
|
56
|
+
});
|
|
57
|
+
});
|
|
58
|
+
}).catch(error => {
|
|
59
|
+
console.error('Error connecting to SSE endpoint:', error);
|
|
60
|
+
reject(error);
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
} catch (error) {
|
|
64
|
+
console.error('Error connecting to SSE endpoint:', error);
|
|
65
|
+
reject(error);
|
|
66
|
+
}
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
test('POST /chat/completions should handle function calling', async (t) => {
|
|
71
|
+
const response = await got.post(`${API_BASE}/chat/completions`, {
|
|
72
|
+
json: {
|
|
73
|
+
model: 'gpt-4.1',
|
|
74
|
+
messages: [{ role: 'user', content: 'I need to know the weather in Boston. You MUST use the get_weather function to get this information. Do not respond without calling the function first.' }],
|
|
75
|
+
functions: [{
|
|
76
|
+
name: 'get_weather',
|
|
77
|
+
description: 'Get the current weather in a given location',
|
|
78
|
+
parameters: {
|
|
79
|
+
type: 'object',
|
|
80
|
+
properties: {
|
|
81
|
+
location: {
|
|
82
|
+
type: 'string',
|
|
83
|
+
description: 'The city and state, e.g. San Francisco, CA'
|
|
84
|
+
},
|
|
85
|
+
unit: {
|
|
86
|
+
type: 'string',
|
|
87
|
+
enum: ['celsius', 'fahrenheit']
|
|
88
|
+
}
|
|
89
|
+
},
|
|
90
|
+
required: ['location']
|
|
91
|
+
}
|
|
92
|
+
}],
|
|
93
|
+
stream: false,
|
|
94
|
+
},
|
|
95
|
+
responseType: 'json',
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
t.is(response.statusCode, 200);
|
|
99
|
+
t.is(response.body.object, 'chat.completion');
|
|
100
|
+
t.true(Array.isArray(response.body.choices));
|
|
101
|
+
const choice = response.body.choices[0];
|
|
102
|
+
|
|
103
|
+
// CRITICAL: Function calling must actually occur - test fails if no function call
|
|
104
|
+
t.is(choice.finish_reason, 'function_call', 'Expected function_call finish_reason but got: ' + choice.finish_reason);
|
|
105
|
+
t.truthy(choice.message.function_call, 'Expected function_call in message but got none');
|
|
106
|
+
t.is(choice.message.function_call.name, 'get_weather', 'Expected get_weather function call');
|
|
107
|
+
t.truthy(choice.message.function_call.arguments, 'Expected function call arguments');
|
|
108
|
+
|
|
109
|
+
// Validate the arguments are proper JSON and contain expected fields
|
|
110
|
+
try {
|
|
111
|
+
const args = JSON.parse(choice.message.function_call.arguments);
|
|
112
|
+
t.truthy(args.location, 'Expected location in function call arguments');
|
|
113
|
+
t.true(typeof args.location === 'string', 'Location should be a string');
|
|
114
|
+
} catch (e) {
|
|
115
|
+
t.fail(`Function call arguments should be valid JSON: ${choice.message.function_call.arguments}`);
|
|
116
|
+
}
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
test('POST /chat/completions should handle tool calling', async (t) => {
|
|
120
|
+
const response = await got.post(`${API_BASE}/chat/completions`, {
|
|
121
|
+
json: {
|
|
122
|
+
model: 'gpt-4.1',
|
|
123
|
+
messages: [{
|
|
124
|
+
role: 'user',
|
|
125
|
+
content: 'I need to know the weather in Boston. You MUST use the get_weather tool to get this information. Do not respond without calling the tool first.'
|
|
126
|
+
}],
|
|
127
|
+
tools: [{
|
|
128
|
+
type: 'function',
|
|
129
|
+
function: {
|
|
130
|
+
name: 'get_weather',
|
|
131
|
+
description: 'Get the current weather in a given location',
|
|
132
|
+
parameters: {
|
|
133
|
+
type: 'object',
|
|
134
|
+
properties: {
|
|
135
|
+
location: {
|
|
136
|
+
type: 'string',
|
|
137
|
+
description: 'The city and state, e.g. San Francisco, CA'
|
|
138
|
+
},
|
|
139
|
+
unit: {
|
|
140
|
+
type: 'string',
|
|
141
|
+
enum: ['celsius', 'fahrenheit']
|
|
142
|
+
}
|
|
143
|
+
},
|
|
144
|
+
required: ['location']
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
}],
|
|
148
|
+
tool_choice: 'auto',
|
|
149
|
+
stream: false,
|
|
150
|
+
},
|
|
151
|
+
responseType: 'json',
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
t.is(response.statusCode, 200);
|
|
155
|
+
t.is(response.body.object, 'chat.completion');
|
|
156
|
+
t.is(response.body.model, 'gpt-4.1');
|
|
157
|
+
t.is(response.body.choices.length, 1);
|
|
158
|
+
|
|
159
|
+
const choice = response.body.choices[0];
|
|
160
|
+
t.is(choice.message.role, 'assistant');
|
|
161
|
+
|
|
162
|
+
// Check if the response contains tool calls
|
|
163
|
+
if (choice.message.tool_calls) {
|
|
164
|
+
t.true(Array.isArray(choice.message.tool_calls));
|
|
165
|
+
t.is(choice.message.tool_calls.length, 1);
|
|
166
|
+
|
|
167
|
+
const toolCall = choice.message.tool_calls[0];
|
|
168
|
+
t.is(toolCall.type, 'function');
|
|
169
|
+
t.is(toolCall.function.name, 'get_weather');
|
|
170
|
+
t.truthy(toolCall.function.arguments);
|
|
171
|
+
|
|
172
|
+
// Parse the arguments to make sure they're valid JSON
|
|
173
|
+
try {
|
|
174
|
+
const args = JSON.parse(toolCall.function.arguments);
|
|
175
|
+
t.truthy(args.location);
|
|
176
|
+
} catch (e) {
|
|
177
|
+
t.fail(`Tool call arguments should be valid JSON: ${toolCall.function.arguments}`);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
t.is(choice.finish_reason, 'tool_calls');
|
|
181
|
+
} else {
|
|
182
|
+
// FAIL if no tool calls are returned - this is what we're testing
|
|
183
|
+
t.fail(`Expected tool calls but got none. Response: ${JSON.stringify(choice.message, null, 2)}`);
|
|
184
|
+
}
|
|
185
|
+
});
|
|
186
|
+
|
|
187
|
+
test('POST SSE: /v1/chat/completions with tool calling should send proper streaming events', async (t) => {
|
|
188
|
+
const payload = {
|
|
189
|
+
model: 'gpt-4.1',
|
|
190
|
+
messages: [{
|
|
191
|
+
role: 'user',
|
|
192
|
+
content: 'I need to know the weather in Boston. You MUST use the get_weather tool to get this information. Do not respond without calling the tool first.'
|
|
193
|
+
}],
|
|
194
|
+
tools: [{
|
|
195
|
+
type: 'function',
|
|
196
|
+
function: {
|
|
197
|
+
name: 'get_weather',
|
|
198
|
+
description: 'Get the current weather in a given location',
|
|
199
|
+
parameters: {
|
|
200
|
+
type: 'object',
|
|
201
|
+
properties: {
|
|
202
|
+
location: {
|
|
203
|
+
type: 'string',
|
|
204
|
+
description: 'The city and state, e.g. San Francisco, CA'
|
|
205
|
+
},
|
|
206
|
+
unit: {
|
|
207
|
+
type: 'string',
|
|
208
|
+
enum: ['celsius', 'fahrenheit']
|
|
209
|
+
}
|
|
210
|
+
},
|
|
211
|
+
required: ['location']
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
}],
|
|
215
|
+
tool_choice: 'auto',
|
|
216
|
+
stream: true,
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
const url = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
220
|
+
|
|
221
|
+
let toolCallDetected = false;
|
|
222
|
+
let finalChunkReceived = false;
|
|
223
|
+
|
|
224
|
+
const toolCallingStreamingAssertions = (t, messageJson) => {
|
|
225
|
+
t.truthy(messageJson.id);
|
|
226
|
+
t.is(messageJson.object, 'chat.completion.chunk');
|
|
227
|
+
t.truthy(messageJson.choices[0].delta);
|
|
228
|
+
|
|
229
|
+
const delta = messageJson.choices[0].delta;
|
|
230
|
+
const finishReason = messageJson.choices[0].finish_reason;
|
|
231
|
+
|
|
232
|
+
// Check if this is a tool call chunk
|
|
233
|
+
if (delta.tool_calls) {
|
|
234
|
+
toolCallDetected = true;
|
|
235
|
+
t.truthy(delta.tool_calls);
|
|
236
|
+
|
|
237
|
+
// Only check finish_reason on the final chunk
|
|
238
|
+
if (finishReason === 'tool_calls') {
|
|
239
|
+
// This is the final tool call chunk
|
|
240
|
+
finalChunkReceived = true;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Validate tool call structure
|
|
244
|
+
const toolCall = delta.tool_calls[0];
|
|
245
|
+
if (toolCall && toolCall.function && toolCall.function.name) {
|
|
246
|
+
t.is(toolCall.function.name, 'get_weather', 'Expected get_weather tool call');
|
|
247
|
+
}
|
|
248
|
+
} else if (finishReason === 'stop') {
|
|
249
|
+
finalChunkReceived = true;
|
|
250
|
+
// Final chunk for tool calls might have empty delta, which is valid
|
|
251
|
+
} else if (finishReason === 'tool_calls') {
|
|
252
|
+
// Final chunk with tool_calls finish reason but no tool_calls in delta
|
|
253
|
+
toolCallDetected = true;
|
|
254
|
+
finalChunkReceived = true;
|
|
255
|
+
}
|
|
256
|
+
};
|
|
257
|
+
|
|
258
|
+
await connectToSSEEndpoint(url, '/chat/completions', payload, t, toolCallingStreamingAssertions);
|
|
259
|
+
|
|
260
|
+
// CRITICAL: Verify that tool calls were actually detected in the stream
|
|
261
|
+
t.true(toolCallDetected, 'Expected tool calls to be detected in the streaming response but none were found');
|
|
262
|
+
// For tool calls, we don't expect a final chunk with stop finish_reason
|
|
263
|
+
// The final chunk should have finish_reason: "tool_calls"
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
test('POST SSE: /v1/chat/completions with tool calling should send proper streaming events with reasoning model', async (t) => {
|
|
267
|
+
const payload = {
|
|
268
|
+
model: 'o3-mini',
|
|
269
|
+
messages: [{
|
|
270
|
+
role: 'user',
|
|
271
|
+
content: 'I need to know the weather in Boston. You MUST use the get_weather tool to get this information. Do not respond without calling the tool first.'
|
|
272
|
+
}],
|
|
273
|
+
tools: [{
|
|
274
|
+
type: 'function',
|
|
275
|
+
function: {
|
|
276
|
+
name: 'get_weather',
|
|
277
|
+
description: 'Get the current weather in a given location',
|
|
278
|
+
parameters: {
|
|
279
|
+
type: 'object',
|
|
280
|
+
properties: {
|
|
281
|
+
location: {
|
|
282
|
+
type: 'string',
|
|
283
|
+
description: 'The city and state, e.g. San Francisco, CA'
|
|
284
|
+
},
|
|
285
|
+
unit: {
|
|
286
|
+
type: 'string',
|
|
287
|
+
enum: ['celsius', 'fahrenheit']
|
|
288
|
+
}
|
|
289
|
+
},
|
|
290
|
+
required: ['location']
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}],
|
|
294
|
+
tool_choice: 'auto',
|
|
295
|
+
stream: true,
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
const url = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
299
|
+
|
|
300
|
+
let toolCallDetected = false;
|
|
301
|
+
let finalChunkReceived = false;
|
|
302
|
+
|
|
303
|
+
const toolCallingStreamingAssertions = (t, messageJson) => {
|
|
304
|
+
t.truthy(messageJson.id);
|
|
305
|
+
t.is(messageJson.object, 'chat.completion.chunk');
|
|
306
|
+
t.truthy(messageJson.choices[0].delta);
|
|
307
|
+
|
|
308
|
+
const delta = messageJson.choices[0].delta;
|
|
309
|
+
const finishReason = messageJson.choices[0].finish_reason;
|
|
310
|
+
|
|
311
|
+
// Check if this is a tool call chunk
|
|
312
|
+
if (delta.tool_calls) {
|
|
313
|
+
toolCallDetected = true;
|
|
314
|
+
t.truthy(delta.tool_calls);
|
|
315
|
+
|
|
316
|
+
// Only check finish_reason on the final chunk
|
|
317
|
+
if (finishReason === 'tool_calls') {
|
|
318
|
+
// This is the final tool call chunk
|
|
319
|
+
finalChunkReceived = true;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// Validate tool call structure
|
|
323
|
+
const toolCall = delta.tool_calls[0];
|
|
324
|
+
if (toolCall && toolCall.function && toolCall.function.name) {
|
|
325
|
+
t.is(toolCall.function.name, 'get_weather', 'Expected get_weather tool call');
|
|
326
|
+
}
|
|
327
|
+
} else if (finishReason === 'stop') {
|
|
328
|
+
finalChunkReceived = true;
|
|
329
|
+
// Final chunk for tool calls might have empty delta, which is valid
|
|
330
|
+
} else if (finishReason === 'tool_calls') {
|
|
331
|
+
// Final chunk with tool_calls finish reason but no tool_calls in delta
|
|
332
|
+
toolCallDetected = true;
|
|
333
|
+
finalChunkReceived = true;
|
|
334
|
+
}
|
|
335
|
+
};
|
|
336
|
+
|
|
337
|
+
await connectToSSEEndpoint(url, '/chat/completions', payload, t, toolCallingStreamingAssertions);
|
|
338
|
+
|
|
339
|
+
// CRITICAL: Verify that tool calls were actually detected in the stream
|
|
340
|
+
t.true(toolCallDetected, 'Expected tool calls to be detected in the streaming response but none were found');
|
|
341
|
+
// For tool calls, we don't expect a final chunk with stop finish_reason
|
|
342
|
+
// The final chunk should have finish_reason: "tool_calls"
|
|
343
|
+
});
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import test from 'ava';
|
|
2
|
+
import serverFactory from '../../../../index.js';
|
|
3
|
+
import { collectSSEChunks } from '../../../helpers/sseAssert.js';
|
|
4
|
+
import got from 'got';
|
|
5
|
+
|
|
6
|
+
let testServer;
|
|
7
|
+
|
|
8
|
+
test.before(async () => {
|
|
9
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
10
|
+
const { server, startServer } = await serverFactory();
|
|
11
|
+
startServer && await startServer();
|
|
12
|
+
testServer = server;
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
test.after.always('cleanup', async () => {
|
|
16
|
+
if (testServer) await testServer.stop();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test('Streaming tool_calls appear as OAI deltas and reconstruct into valid arguments', async (t) => {
|
|
20
|
+
// pick any OpenAI-compatible model
|
|
21
|
+
const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
22
|
+
let model = 'gpt-4o';
|
|
23
|
+
try {
|
|
24
|
+
const res = await got(`${baseUrl}/models`, { responseType: 'json' });
|
|
25
|
+
const ids = (res.body?.data || []).map(m => m.id);
|
|
26
|
+
model = ids.find(id => /^oai-|^gpt|^openai/i.test(id)) || model;
|
|
27
|
+
} catch (_) {}
|
|
28
|
+
|
|
29
|
+
const payload = {
|
|
30
|
+
model,
|
|
31
|
+
messages: [
|
|
32
|
+
{ role: 'system', content: 'You are a helpful assistant. If the user asks to sum numbers, call the sum tool.' },
|
|
33
|
+
{ role: 'user', content: 'Sum 2 and 3.' }
|
|
34
|
+
],
|
|
35
|
+
tools: [
|
|
36
|
+
{
|
|
37
|
+
type: 'function',
|
|
38
|
+
function: {
|
|
39
|
+
name: 'sum',
|
|
40
|
+
description: 'Sum two numbers',
|
|
41
|
+
parameters: {
|
|
42
|
+
type: 'object',
|
|
43
|
+
properties: { a: { type: 'number' }, b: { type: 'number' } },
|
|
44
|
+
required: ['a', 'b']
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
],
|
|
49
|
+
stream: true,
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
let chunks;
|
|
53
|
+
try {
|
|
54
|
+
chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
|
|
55
|
+
} catch (err) {
|
|
56
|
+
if (err?.response?.status === 404) {
|
|
57
|
+
t.pass('Skipping - tool-calling streaming endpoint not available');
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
throw err;
|
|
61
|
+
}
|
|
62
|
+
t.true(chunks.length > 0);
|
|
63
|
+
|
|
64
|
+
// Gather tool_call name and arguments deltas
|
|
65
|
+
let toolName = '';
|
|
66
|
+
let argsBuffer = '';
|
|
67
|
+
let sawToolCall = false;
|
|
68
|
+
for (const ch of chunks) {
|
|
69
|
+
const tc = ch?.choices?.[0]?.delta?.tool_calls?.[0];
|
|
70
|
+
if (tc) {
|
|
71
|
+
sawToolCall = true;
|
|
72
|
+
if (tc.function?.name) toolName = tc.function.name || toolName;
|
|
73
|
+
if (tc.function?.arguments) argsBuffer += tc.function.arguments;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
t.true(sawToolCall);
|
|
78
|
+
t.is(toolName, 'sum');
|
|
79
|
+
// Arguments may be streamed as partial JSON; assert that we received JSON-like content
|
|
80
|
+
if (argsBuffer) {
|
|
81
|
+
t.true(/[\{\}"]/g.test(argsBuffer));
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import test from 'ava';
|
|
2
|
+
import serverFactory from '../../../../index.js';
|
|
3
|
+
import got from 'got';
|
|
4
|
+
import { collectSSEChunks, assertOAIChatChunkBasics, assertAnyContentDelta } from '../../../helpers/sseAssert.js';
|
|
5
|
+
|
|
6
|
+
let testServer;
|
|
7
|
+
|
|
8
|
+
test.before(async () => {
|
|
9
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
10
|
+
const { server, startServer } = await serverFactory();
|
|
11
|
+
startServer && await startServer();
|
|
12
|
+
testServer = server;
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
test.after.always('cleanup', async () => {
|
|
16
|
+
if (testServer) await testServer.stop();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test('Claude SSE chat stream returns OAI-style chunks', async (t) => {
|
|
20
|
+
const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
21
|
+
|
|
22
|
+
// Pick an available Claude model from /models
|
|
23
|
+
let model = null;
|
|
24
|
+
try {
|
|
25
|
+
const res = await got(`${baseUrl}/models`, { responseType: 'json' });
|
|
26
|
+
const ids = (res.body?.data || []).map(m => m.id);
|
|
27
|
+
model = ids.find(id => /^claude-/i.test(id));
|
|
28
|
+
} catch (_) {}
|
|
29
|
+
|
|
30
|
+
if (!model) {
|
|
31
|
+
t.pass('Skipping - no Claude model configured');
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const payload = {
|
|
36
|
+
model,
|
|
37
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
38
|
+
stream: true,
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
|
|
42
|
+
t.true(chunks.length > 0);
|
|
43
|
+
chunks.forEach(ch => assertOAIChatChunkBasics(t, ch));
|
|
44
|
+
t.true(assertAnyContentDelta(chunks));
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import test from 'ava';
|
|
2
|
+
import serverFactory from '../../../../index.js';
|
|
3
|
+
import { collectSSEChunks } from '../../../helpers/sseAssert.js';
|
|
4
|
+
import got from 'got';
|
|
5
|
+
|
|
6
|
+
let testServer;
|
|
7
|
+
|
|
8
|
+
test.before(async () => {
|
|
9
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
10
|
+
const { server, startServer } = await serverFactory();
|
|
11
|
+
startServer && await startServer();
|
|
12
|
+
testServer = server;
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
test.after.always('cleanup', async () => {
|
|
16
|
+
if (testServer) await testServer.stop();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test('Claude streaming tool_calls appear as OAI deltas', async (t) => {
|
|
20
|
+
const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
21
|
+
|
|
22
|
+
// pick a Claude-compatible model
|
|
23
|
+
let model = 'claude-4-sonnet-vertex';
|
|
24
|
+
try {
|
|
25
|
+
const res = await got(`${baseUrl}/models`, { responseType: 'json' });
|
|
26
|
+
const ids = (res.body?.data || []).map(m => m.id);
|
|
27
|
+
model = ids.find(id => /^claude|^anthropic/i.test(id)) || model;
|
|
28
|
+
} catch (_) {}
|
|
29
|
+
|
|
30
|
+
const payload = {
|
|
31
|
+
model,
|
|
32
|
+
messages: [
|
|
33
|
+
{ role: 'system', content: 'If the user asks to sum numbers, call the sum tool.' },
|
|
34
|
+
{ role: 'user', content: 'Sum 2 and 3.' }
|
|
35
|
+
],
|
|
36
|
+
tool_choice: { type: 'function', function: 'sum' },
|
|
37
|
+
tools: [
|
|
38
|
+
{
|
|
39
|
+
type: 'function',
|
|
40
|
+
function: {
|
|
41
|
+
name: 'sum',
|
|
42
|
+
description: 'Sum two numbers',
|
|
43
|
+
parameters: {
|
|
44
|
+
type: 'object',
|
|
45
|
+
properties: { a: { type: 'number' }, b: { type: 'number' } },
|
|
46
|
+
required: ['a', 'b']
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
],
|
|
51
|
+
stream: true,
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
|
|
55
|
+
|
|
56
|
+
t.true(chunks.length > 0);
|
|
57
|
+
|
|
58
|
+
let sawToolCall = false;
|
|
59
|
+
let toolName = '';
|
|
60
|
+
let argsBuffer = '';
|
|
61
|
+
for (const ch of chunks) {
|
|
62
|
+
const tc = ch?.choices?.[0]?.delta?.tool_calls?.[0];
|
|
63
|
+
if (tc) {
|
|
64
|
+
sawToolCall = true;
|
|
65
|
+
if (tc.function?.name) toolName = tc.function.name || toolName;
|
|
66
|
+
if (tc.function?.arguments) argsBuffer += tc.function.arguments;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
t.true(sawToolCall);
|
|
71
|
+
t.is(toolName, 'sum');
|
|
72
|
+
if (argsBuffer) t.true(/[\{\}"]/g.test(argsBuffer));
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import test from 'ava';
|
|
2
|
+
import serverFactory from '../../../../index.js';
|
|
3
|
+
import got from 'got';
|
|
4
|
+
import { collectSSEChunks, assertOAIChatChunkBasics, assertAnyContentDelta } from '../../../helpers/sseAssert.js';
|
|
5
|
+
|
|
6
|
+
let testServer;
|
|
7
|
+
|
|
8
|
+
test.before(async () => {
|
|
9
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
10
|
+
const { server, startServer } = await serverFactory();
|
|
11
|
+
startServer && await startServer();
|
|
12
|
+
testServer = server;
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
test.after.always('cleanup', async () => {
|
|
16
|
+
if (testServer) await testServer.stop();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test('Gemini SSE chat stream returns OAI-style chunks', async (t) => {
|
|
20
|
+
const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
21
|
+
|
|
22
|
+
// Pick an available Gemini model from /models
|
|
23
|
+
let model = null;
|
|
24
|
+
try {
|
|
25
|
+
const res = await got(`${baseUrl}/models`, { responseType: 'json' });
|
|
26
|
+
const ids = (res.body?.data || []).map(m => m.id);
|
|
27
|
+
model = ids.find(id => /gemini/i.test(id));
|
|
28
|
+
} catch (_) {}
|
|
29
|
+
|
|
30
|
+
if (!model) {
|
|
31
|
+
t.pass('Skipping - no Gemini model configured');
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const payload = {
|
|
36
|
+
model,
|
|
37
|
+
messages: [{ role: 'user', content: 'Hi there!' }],
|
|
38
|
+
stream: true,
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
|
|
42
|
+
t.true(chunks.length > 0);
|
|
43
|
+
chunks.forEach(ch => assertOAIChatChunkBasics(t, ch));
|
|
44
|
+
t.true(assertAnyContentDelta(chunks));
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import test from 'ava';
|
|
2
|
+
import serverFactory from '../../../../index.js';
|
|
3
|
+
import { collectSSEChunks } from '../../../helpers/sseAssert.js';
|
|
4
|
+
import got from 'got';
|
|
5
|
+
|
|
6
|
+
let testServer;
|
|
7
|
+
|
|
8
|
+
test.before(async () => {
|
|
9
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
10
|
+
const { server, startServer } = await serverFactory();
|
|
11
|
+
startServer && await startServer();
|
|
12
|
+
testServer = server;
|
|
13
|
+
});
|
|
14
|
+
|
|
15
|
+
test.after.always('cleanup', async () => {
|
|
16
|
+
if (testServer) await testServer.stop();
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test('Gemini streaming tool_calls appear as OAI deltas', async (t) => {
|
|
20
|
+
const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
|
|
21
|
+
|
|
22
|
+
// pick a Gemini-compatible model
|
|
23
|
+
let model = 'gemini-flash-25-vision';
|
|
24
|
+
try {
|
|
25
|
+
const res = await got(`${baseUrl}/models`, { responseType: 'json' });
|
|
26
|
+
const ids = (res.body?.data || []).map(m => m.id);
|
|
27
|
+
model = ids.find(id => /^gemini|^google/i.test(id)) || model;
|
|
28
|
+
} catch (_) {}
|
|
29
|
+
|
|
30
|
+
const payload = {
|
|
31
|
+
model,
|
|
32
|
+
messages: [
|
|
33
|
+
{ role: 'system', content: 'If the user asks to sum numbers, call the sum tool.' },
|
|
34
|
+
{ role: 'user', content: 'Sum 2 and 3.' }
|
|
35
|
+
],
|
|
36
|
+
tool_choice: { type: 'function', function: 'sum' },
|
|
37
|
+
tools: [
|
|
38
|
+
{
|
|
39
|
+
type: 'function',
|
|
40
|
+
function: {
|
|
41
|
+
name: 'sum',
|
|
42
|
+
description: 'Sum two numbers',
|
|
43
|
+
parameters: {
|
|
44
|
+
type: 'object',
|
|
45
|
+
properties: { a: { type: 'number' }, b: { type: 'number' } },
|
|
46
|
+
required: ['a', 'b']
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
],
|
|
51
|
+
stream: true,
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
|
|
55
|
+
|
|
56
|
+
t.true(chunks.length > 0);
|
|
57
|
+
|
|
58
|
+
let sawToolCall = false;
|
|
59
|
+
let toolName = '';
|
|
60
|
+
let argsBuffer = '';
|
|
61
|
+
for (const ch of chunks) {
|
|
62
|
+
const tc = ch?.choices?.[0]?.delta?.tool_calls?.[0];
|
|
63
|
+
if (tc) {
|
|
64
|
+
sawToolCall = true;
|
|
65
|
+
if (tc.function?.name) toolName = tc.function.name || toolName;
|
|
66
|
+
if (tc.function?.arguments) argsBuffer += tc.function.arguments;
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
t.true(sawToolCall);
|
|
71
|
+
t.is(toolName, 'sum');
|
|
72
|
+
if (argsBuffer) t.true(/[\{\}"]/g.test(argsBuffer));
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
|