@aj-archipelago/cortex 1.4.32 → 1.4.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,197 @@
1
+ /**
2
+ * E2E integration test for direct Anthropic API access (not via Vertex AI)
3
+ * Tests the CLAUDE-ANTHROPIC plugin type with Claude models
4
+ *
5
+ * Run with: npm test -- tests/integration/rest/vendors/claude_anthropic_direct.test.js
6
+ */
7
+ import test from 'ava';
8
+ import serverFactory from '../../../../index.js';
9
+ import got from 'got';
10
+ import { collectSSEChunks, assertOAIChatChunkBasics, assertAnyContentDelta } from '../../../helpers/sseAssert.js';
11
+
12
+ let testServer;
13
+
14
+ test.before(async () => {
15
+ process.env.CORTEX_ENABLE_REST = 'true';
16
+ const { server, startServer } = await serverFactory();
17
+ startServer && await startServer();
18
+ testServer = server;
19
+ });
20
+
21
+ test.after.always('cleanup', async () => {
22
+ if (testServer) await testServer.stop();
23
+ });
24
+
25
+ // Test: Basic streaming chat completion with Claude 4.5 Sonnet via direct Anthropic API
26
+ test('Claude 4.5 Sonnet (direct Anthropic) SSE streaming chat', async (t) => {
27
+ const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
28
+
29
+ // Use the direct Anthropic model (claude-45-sonnet)
30
+ const model = 'claude-45-sonnet';
31
+
32
+ // Verify the model is available
33
+ try {
34
+ const res = await got(`${baseUrl}/models`, { responseType: 'json' });
35
+ const ids = (res.body?.data || []).map(m => m.id);
36
+ if (!ids.includes(model)) {
37
+ t.pass(`Skipping - model ${model} not configured`);
38
+ return;
39
+ }
40
+ } catch (err) {
41
+ t.fail(`Failed to get models: ${err.message}`);
42
+ return;
43
+ }
44
+
45
+ const payload = {
46
+ model,
47
+ messages: [{ role: 'user', content: 'Say "Hello from Anthropic direct API!" and nothing else.' }],
48
+ stream: true,
49
+ };
50
+
51
+ const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
52
+ t.true(chunks.length > 0, 'Should receive SSE chunks');
53
+ chunks.forEach(ch => assertOAIChatChunkBasics(t, ch));
54
+ t.true(assertAnyContentDelta(chunks), 'Should have content delta in chunks');
55
+
56
+ // Log the full response for debugging
57
+ const fullContent = chunks
58
+ .map(c => c?.choices?.[0]?.delta?.content || '')
59
+ .join('');
60
+ t.log(`Response: ${fullContent}`);
61
+ });
62
+
63
+ // Test: Non-streaming chat completion with Claude 4.5 Sonnet via direct Anthropic API
64
+ test('Claude 4.5 Sonnet (direct Anthropic) non-streaming chat', async (t) => {
65
+ const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
66
+ const model = 'claude-45-sonnet';
67
+
68
+ // Verify the model is available
69
+ try {
70
+ const res = await got(`${baseUrl}/models`, { responseType: 'json' });
71
+ const ids = (res.body?.data || []).map(m => m.id);
72
+ if (!ids.includes(model)) {
73
+ t.pass(`Skipping - model ${model} not configured`);
74
+ return;
75
+ }
76
+ } catch (err) {
77
+ t.fail(`Failed to get models: ${err.message}`);
78
+ return;
79
+ }
80
+
81
+ const payload = {
82
+ model,
83
+ messages: [{ role: 'user', content: 'What is 2 + 2? Reply with just the number.' }],
84
+ stream: false,
85
+ };
86
+
87
+ try {
88
+ const response = await got.post(`${baseUrl}/chat/completions`, {
89
+ json: payload,
90
+ responseType: 'json',
91
+ timeout: { request: 60000 }
92
+ });
93
+
94
+ t.truthy(response.body, 'Should have response body');
95
+ t.truthy(response.body.choices, 'Should have choices');
96
+ t.truthy(response.body.choices[0].message, 'Should have message');
97
+ t.truthy(response.body.choices[0].message.content, 'Should have content');
98
+
99
+ t.log(`Response: ${response.body.choices[0].message.content}`);
100
+ } catch (err) {
101
+ t.fail(`Request failed: ${err.message}`);
102
+ }
103
+ });
104
+
105
+ // Test: Chat with system message
106
+ test('Claude 4.5 Sonnet (direct Anthropic) with system message', async (t) => {
107
+ const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
108
+ const model = 'claude-45-sonnet';
109
+
110
+ // Verify the model is available
111
+ try {
112
+ const res = await got(`${baseUrl}/models`, { responseType: 'json' });
113
+ const ids = (res.body?.data || []).map(m => m.id);
114
+ if (!ids.includes(model)) {
115
+ t.pass(`Skipping - model ${model} not configured`);
116
+ return;
117
+ }
118
+ } catch (err) {
119
+ t.fail(`Failed to get models: ${err.message}`);
120
+ return;
121
+ }
122
+
123
+ const payload = {
124
+ model,
125
+ messages: [
126
+ { role: 'system', content: 'You are a pirate. Always respond in pirate speak.' },
127
+ { role: 'user', content: 'Hello!' }
128
+ ],
129
+ stream: true,
130
+ };
131
+
132
+ const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
133
+ t.true(chunks.length > 0, 'Should receive SSE chunks');
134
+ chunks.forEach(ch => assertOAIChatChunkBasics(t, ch));
135
+ t.true(assertAnyContentDelta(chunks), 'Should have content delta in chunks');
136
+
137
+ const fullContent = chunks
138
+ .map(c => c?.choices?.[0]?.delta?.content || '')
139
+ .join('');
140
+ t.log(`Response: ${fullContent}`);
141
+ // Should contain pirate-like language
142
+ t.regex(fullContent.toLowerCase(), /ahoy|arr|matey|ye|cap|sail|treasure/i, 'Should respond in pirate speak');
143
+ });
144
+
145
+ // Test: Document block support (PDF)
146
+ test('Claude 4.5 Sonnet (direct Anthropic) with PDF document', async (t) => {
147
+ const baseUrl = `http://localhost:${process.env.CORTEX_PORT}/v1`;
148
+ const model = 'claude-45-sonnet';
149
+
150
+ // Verify the model is available
151
+ try {
152
+ const res = await got(`${baseUrl}/models`, { responseType: 'json' });
153
+ const ids = (res.body?.data || []).map(m => m.id);
154
+ if (!ids.includes(model)) {
155
+ t.pass(`Skipping - model ${model} not configured`);
156
+ return;
157
+ }
158
+ } catch (err) {
159
+ t.fail(`Failed to get models: ${err.message}`);
160
+ return;
161
+ }
162
+
163
+ // Minimal valid PDF
164
+ const pdfContent = '%PDF-1.4\n%âãÏÓ\n1 0 obj\n<</Type/Catalog/Pages 2 0 R>>\nendobj\n2 0 obj\n<</Type/Pages/Kids[3 0 R]/Count 1>>\nendobj\n3 0 obj\n<</Type/Page/Parent 2 0 R/Resources<</Font<</F1 4 0 R>>>>/MediaBox[0 0 612 792]/Contents 5 0 R>>\nendobj\n4 0 obj\n<</Type/Font/Subtype/Type1/BaseFont/Helvetica>>\nendobj\n5 0 obj\n<</Length 44>>\nstream\nBT\n/F1 12 Tf\n100 700 Td\n(Test PDF Doc) Tj\nET\nendstream\nendobj\nxref\n0 6\n0000000000 65535 f\n0000000010 00000 n\n0000000053 00000 n\n0000000102 00000 n\n0000000211 00000 n\n0000000280 00000 n\ntrailer\n<</Size 6/Root 1 0 R>>\nstartxref\n369\n%%EOF';
165
+ const base64Pdf = Buffer.from(pdfContent).toString('base64');
166
+
167
+ const payload = {
168
+ model,
169
+ messages: [
170
+ {
171
+ role: 'user',
172
+ content: [
173
+ { type: 'text', text: 'What text is in this PDF? Reply with just the text you see.' },
174
+ {
175
+ type: 'document',
176
+ source: {
177
+ type: 'base64',
178
+ media_type: 'application/pdf',
179
+ data: base64Pdf
180
+ }
181
+ }
182
+ ]
183
+ }
184
+ ],
185
+ stream: true,
186
+ };
187
+
188
+ const chunks = await collectSSEChunks(baseUrl, '/chat/completions', payload);
189
+ t.true(chunks.length > 0, 'Should receive SSE chunks');
190
+ chunks.forEach(ch => assertOAIChatChunkBasics(t, ch));
191
+ t.true(assertAnyContentDelta(chunks), 'Should have content delta in chunks');
192
+
193
+ const fullContent = chunks
194
+ .map(c => c?.choices?.[0]?.delta?.content || '')
195
+ .join('');
196
+ t.log(`Response: ${fullContent}`);
197
+ });
@@ -0,0 +1,236 @@
1
+ import test from 'ava';
2
+ import ClaudeAnthropicPlugin from '../../../server/plugins/claudeAnthropicPlugin.js';
3
+ import { mockPathwayResolverMessages } from '../../helpers/mocks.js';
4
+ import { config } from '../../../config.js';
5
+
6
+ // Create a mock model config that matches Anthropic direct API format
7
+ const anthropicModel = {
8
+ ...mockPathwayResolverMessages.model,
9
+ type: 'CLAUDE-ANTHROPIC',
10
+ params: {
11
+ model: 'claude-sonnet-4-20250514'
12
+ },
13
+ endpoints: [
14
+ {
15
+ name: 'Anthropic Claude Sonnet 4',
16
+ url: 'https://api.anthropic.com/v1/messages',
17
+ headers: {
18
+ 'x-api-key': '{{ANTHROPIC_API_KEY}}',
19
+ 'Content-Type': 'application/json'
20
+ },
21
+ params: {
22
+ model: 'claude-sonnet-4-20250514'
23
+ },
24
+ requestsPerSecond: 10
25
+ }
26
+ ],
27
+ maxTokenLength: 200000,
28
+ maxReturnTokens: 64000,
29
+ maxImageSize: 31457280,
30
+ supportsStreaming: true
31
+ };
32
+
33
+ const { pathway } = mockPathwayResolverMessages;
34
+
35
+ test('constructor', (t) => {
36
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
37
+ t.is(plugin.config, config);
38
+ t.is(plugin.pathwayPrompt, mockPathwayResolverMessages.pathway.prompt);
39
+ t.true(plugin.isMultiModal);
40
+ });
41
+
42
+ test('parseResponse - text content response', (t) => {
43
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
44
+
45
+ const dataWithTextContent = {
46
+ content: [
47
+ { type: 'text', text: 'Hello from Anthropic!' }
48
+ ],
49
+ usage: { input_tokens: 10, output_tokens: 5 },
50
+ stop_reason: 'end_turn'
51
+ };
52
+ const result = plugin.parseResponse(dataWithTextContent);
53
+ t.truthy(result.output_text === 'Hello from Anthropic!');
54
+ t.truthy(result.finishReason === 'stop');
55
+ t.truthy(result.usage);
56
+ });
57
+
58
+ test('parseResponse - tool calls response', (t) => {
59
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
60
+
61
+ const dataWithToolCalls = {
62
+ content: [
63
+ {
64
+ type: 'tool_use',
65
+ id: 'tool_anthropic_1',
66
+ name: 'get_weather',
67
+ input: { location: 'San Francisco' }
68
+ }
69
+ ],
70
+ usage: { input_tokens: 15, output_tokens: 8 },
71
+ stop_reason: 'tool_use'
72
+ };
73
+ const result = plugin.parseResponse(dataWithToolCalls);
74
+ t.truthy(result.output_text === '');
75
+ t.truthy(result.finishReason === 'tool_calls');
76
+ t.truthy(result.toolCalls);
77
+ t.truthy(result.toolCalls.length === 1);
78
+ t.truthy(result.toolCalls[0].id === 'tool_anthropic_1');
79
+ t.truthy(result.toolCalls[0].function.name === 'get_weather');
80
+ t.truthy(result.toolCalls[0].function.arguments === '{"location":"San Francisco"}');
81
+ });
82
+
83
+ test('getRequestParameters includes model in body', async (t) => {
84
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
85
+
86
+ const messages = [
87
+ { role: 'user', content: 'Hello' }
88
+ ];
89
+
90
+ const parameters = { messages };
91
+ const requestParams = await plugin.getRequestParameters('', parameters, {});
92
+
93
+ // Should have model in request body
94
+ t.is(requestParams.model, 'claude-sonnet-4-20250514');
95
+
96
+ // Should NOT have anthropic_version in body (it's a Vertex thing)
97
+ t.is(requestParams.anthropic_version, undefined);
98
+ });
99
+
100
+ test('convertMessagesToClaudeVertex preserves message conversion from parent', async (t) => {
101
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
102
+
103
+ // Test message conversion directly - this tests the inherited behavior
104
+ const messages = [
105
+ { role: 'system', content: 'You are a helpful assistant.' },
106
+ { role: 'user', content: 'What is 2+2?' }
107
+ ];
108
+
109
+ const output = await plugin.convertMessagesToClaudeVertex(messages);
110
+
111
+ // System message should be extracted
112
+ t.is(output.system, 'You are a helpful assistant.');
113
+
114
+ // User message should be converted to Claude format
115
+ t.is(output.modifiedMessages.length, 1);
116
+ t.is(output.modifiedMessages[0].role, 'user');
117
+ t.deepEqual(output.modifiedMessages[0].content, [{ type: 'text', text: 'What is 2+2?' }]);
118
+ });
119
+
120
+ test('handles tool_use and tool_result in messages', async (t) => {
121
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
122
+
123
+ // Test that tool messages are handled correctly via message conversion
124
+ const messages = [
125
+ { role: 'user', content: 'Search for cats' },
126
+ {
127
+ role: 'assistant',
128
+ content: [
129
+ {
130
+ type: 'tool_use',
131
+ id: 'tool_1',
132
+ name: 'search',
133
+ input: { query: 'cats' }
134
+ }
135
+ ]
136
+ },
137
+ {
138
+ role: 'user',
139
+ content: [
140
+ {
141
+ type: 'tool_result',
142
+ tool_use_id: 'tool_1',
143
+ content: 'Found 100 results about cats'
144
+ }
145
+ ]
146
+ }
147
+ ];
148
+
149
+ const output = await plugin.convertMessagesToClaudeVertex(messages);
150
+
151
+ // Should have 3 messages with proper roles and content types
152
+ t.is(output.modifiedMessages.length, 3);
153
+ t.is(output.modifiedMessages[0].role, 'user');
154
+ t.is(output.modifiedMessages[1].role, 'assistant');
155
+ t.is(output.modifiedMessages[2].role, 'user');
156
+
157
+ // Tool use should be preserved
158
+ t.is(output.modifiedMessages[1].content[0].type, 'tool_use');
159
+ t.is(output.modifiedMessages[1].content[0].name, 'search');
160
+
161
+ // Tool result should be preserved
162
+ t.is(output.modifiedMessages[2].content[0].type, 'tool_result');
163
+ });
164
+
165
+ test('convertMessagesToClaudeVertex inherits from parent', async (t) => {
166
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
167
+
168
+ // Test with document block - should work same as Claude4VertexPlugin
169
+ const base64Pdf = Buffer.from('Sample PDF content').toString('base64');
170
+
171
+ const messages = [
172
+ {
173
+ role: 'user',
174
+ content: [
175
+ { type: 'text', text: 'Analyze this' },
176
+ {
177
+ type: 'document',
178
+ source: {
179
+ type: 'base64',
180
+ media_type: 'application/pdf',
181
+ data: base64Pdf
182
+ }
183
+ }
184
+ ]
185
+ }
186
+ ];
187
+
188
+ const output = await plugin.convertMessagesToClaudeVertex(messages);
189
+
190
+ // Should have both text and document blocks
191
+ t.is(output.modifiedMessages[0].content.length, 2);
192
+ t.is(output.modifiedMessages[0].content[0].type, 'text');
193
+ t.is(output.modifiedMessages[0].content[1].type, 'document');
194
+ });
195
+
196
+ test('SSE conversion inherits from parent', (t) => {
197
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
198
+
199
+ // Test content_block_delta event conversion
200
+ const claudeEvent = {
201
+ data: JSON.stringify({
202
+ type: 'content_block_delta',
203
+ delta: { type: 'text_delta', text: 'Hello' }
204
+ })
205
+ };
206
+
207
+ const openAIEvent = plugin.convertClaudeSSEToOpenAI(claudeEvent);
208
+ const parsed = JSON.parse(openAIEvent.data);
209
+
210
+ t.is(parsed.object, 'chat.completion.chunk');
211
+ t.is(parsed.choices[0].delta.content, 'Hello');
212
+ });
213
+
214
+ test('SSE conversion handles tool call events', (t) => {
215
+ const plugin = new ClaudeAnthropicPlugin(pathway, anthropicModel);
216
+
217
+ // Test content_block_start for tool_use
218
+ const toolStartEvent = {
219
+ data: JSON.stringify({
220
+ type: 'content_block_start',
221
+ index: 0,
222
+ content_block: {
223
+ type: 'tool_use',
224
+ id: 'call_123',
225
+ name: 'get_weather'
226
+ }
227
+ })
228
+ };
229
+
230
+ const openAIEvent = plugin.convertClaudeSSEToOpenAI(toolStartEvent);
231
+ const parsed = JSON.parse(openAIEvent.data);
232
+
233
+ t.is(parsed.object, 'chat.completion.chunk');
234
+ t.truthy(parsed.choices[0].delta.tool_calls);
235
+ t.is(parsed.choices[0].delta.tool_calls[0].function.name, 'get_weather');
236
+ });