@aj-archipelago/cortex 1.0.4 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,31 @@
1
+ //test_palm_chat.mjs
2
+ // Test for handling of prompts in the PaLM chat format for Cortex
3
+
4
+ import { Prompt } from '../graphql/prompt.js';
5
+
6
+ // Description: Have a chat with a bot that uses context to understand the conversation
7
+ export default {
8
+ prompt:
9
+ [
10
+ new Prompt({
11
+ context: "Instructions:\nYou an AI entity working a global media network. You are truthful, kind, and helpful. Your expertise includes journalism, journalistic ethics, researching and composing documents, and technology. You know the current date and time - it is {{now}}.",
12
+ examples: [
13
+ {
14
+ input: {"content": "What is your expertise?"},
15
+ output: {"content": "I am an expert in journalism and journalistic ethics."}
16
+ }],
17
+ messages: [
18
+ {"author": "user", "content": "Hi how are you today?"},
19
+ {"author": "assistant", "content": "I am doing well. How are you?"},
20
+ {"author": "user", "content": "I am doing well. What is your name?"},
21
+ {"author": "assistant", "content": "My name is Hula. What is your name?"},
22
+ {"author": "user", "content": "My name is Bob. What is your expertise?"},
23
+ ]}),
24
+ ],
25
+ inputParameters: {
26
+ chatHistory: [],
27
+ contextId: ``,
28
+ },
29
+ model: 'palm-chat',
30
+ useInputChunking: false,
31
+ }
@@ -4,6 +4,7 @@ export default {
4
4
  inputParameters: {
5
5
  file: ``,
6
6
  language: ``,
7
+ responseFormat: `text`,
7
8
  },
8
9
  timeout: 1800, // in seconds
9
10
  };
@@ -14,6 +14,7 @@ export default {
14
14
  },
15
15
 
16
16
  // Set the timeout for the translation process, in seconds.
17
- timeout: 300,
17
+ timeout: 400,
18
+ inputChunkSize: 500,
18
19
  };
19
20
 
@@ -13,10 +13,11 @@ test.after.always(async () => {
13
13
  });
14
14
 
15
15
  test('chunking test of translate endpoint with huge text', async t => {
16
- t.timeout(180000);
16
+ t.timeout(400000);
17
17
  const response = await testServer.executeOperation({
18
- query: 'query translate($text: String!) { translate(text: $text) { result } }',
18
+ query: 'query translate($text: String!, $to: String) { translate(text: $text, to: $to) { result } }',
19
19
  variables: {
20
+ to: 'en',
20
21
  text: `Lorem ipsum dolor sit amet, consectetur adipiscing elit. In id erat sem. Phasellus ac dapibus purus, in fermentum nunc. Mauris quis rutrum magna. Quisque rutrum, augue vel blandit posuere, augue magna convallis turpis, nec elementum augue mauris sit amet nunc. Aenean sit amet leo est. Nunc ante ex, blandit et felis ut, iaculis lacinia est. Phasellus dictum orci id libero ullamcorper tempor.
21
22
 
22
23
  Vivamus id pharetra odio.Sed consectetur leo sed tortor dictum venenatis.Donec gravida libero non accumsan suscipit.Donec lectus turpis, ullamcorper eu pulvinar iaculis, ornare ut risus.Phasellus aliquam, turpis quis viverra condimentum, risus est pretium metus, in porta ipsum tortor vitae elit.Pellentesque id finibus erat.In suscipit, sapien non posuere dignissim, augue nisl ultrices tortor, sit amet eleifend nibh elit at risus.
@@ -63,7 +64,7 @@ Mauris diam dolor, maximus et ultrices sed, semper sed felis.Morbi ac eros tellu
63
64
  });
64
65
 
65
66
  test('chunking test of translate endpoint with single long text sentence', async t => {
66
- t.timeout(180000);
67
+ t.timeout(400000);
67
68
  const response = await testServer.executeOperation({
68
69
  query: 'query translate($text: String!) { translate(text: $text) { result } }',
69
70
  variables: {
@@ -76,7 +77,7 @@ test('chunking test of translate endpoint with single long text sentence', async
76
77
  });
77
78
 
78
79
  test('chunking test of translate endpoint with two long text sentence', async t => {
79
- t.timeout(180000);
80
+ t.timeout(400000);
80
81
  const response = await testServer.executeOperation({
81
82
  query: 'query translate($text: String!) { translate(text: $text) { result } }',
82
83
  variables: {
@@ -89,10 +90,11 @@ test('chunking test of translate endpoint with two long text sentence', async t
89
90
  });
90
91
 
91
92
  test('chunking test...', async t => {
92
- t.timeout(180000);
93
+ t.timeout(400000);
93
94
  const response = await testServer.executeOperation({
94
- query: 'query translate($text: String!) { translate(text: $text) { result } }',
95
+ query: 'query translate($text: String!, $to: String) { translate(text: $text, to: $to) { result } }',
95
96
  variables: {
97
+ to: 'en',
96
98
  text: `
97
99
  صعدت روسيا هجماتها في أنحاء أوكرانيا، بعد يوم من إعلان الغرب مدّ كييف بدبابات قتالية، واستهدفت عشرات الصواريخ والمسيّرات الروسية العاصمة الأوكرانية ومدنا في الجنوب والشرق، واعتبر الكرملين أن الدبابات لن تغيّر من طبيعة المعركة، في حين أعلنت وزارة الدفاع الأوكرانية أن هناك تحضيرات قتالية روسية انطلاقا من القرم.
98
100
 
@@ -79,19 +79,7 @@ test('requestUrl', (t) => {
79
79
  t.is(modelPlugin.requestUrl(), expectedUrl, 'requestUrl should return the correct URL');
80
80
  });
81
81
 
82
- test('parseResponse - single choice', (t) => {
83
- const { modelPlugin } = t.context;
84
- const singleChoiceResponse = {
85
- choices: [{
86
- text: '42'
87
- }]
88
- };
89
-
90
- const result = modelPlugin.parseResponse(singleChoiceResponse);
91
- t.is(result, '42', 'parseResponse should return the correct value for a single choice response');
92
- });
93
-
94
- test('parseResponse - multiple choices', (t) => {
82
+ test('default parseResponse', (t) => {
95
83
  const { modelPlugin } = t.context;
96
84
  const multipleChoicesResponse = {
97
85
  choices: [
@@ -101,7 +89,7 @@ test('parseResponse - multiple choices', (t) => {
101
89
  };
102
90
 
103
91
  const result = modelPlugin.parseResponse(multipleChoicesResponse);
104
- t.deepEqual(result, multipleChoicesResponse.choices, 'parseResponse should return the choices array for multiple choices response');
92
+ t.deepEqual(result, multipleChoicesResponse, 'default parseResponse should return the entire multiple choices response object');
105
93
  });
106
94
 
107
95
  test('truncateMessagesToTargetLength', (t) => {
@@ -0,0 +1,125 @@
1
+ import test from 'ava';
2
+ import OpenAIChatPlugin from '../graphql/plugins/openAiChatPlugin.js';
3
+ import { mockConfig, mockPathwayString, mockPathwayFunction, mockPathwayMessages } from './mocks.js';
4
+
5
+ // Test the constructor
6
+ test('constructor', (t) => {
7
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
8
+ t.is(plugin.config, mockConfig);
9
+ t.is(plugin.pathwayPrompt, mockPathwayString.prompt);
10
+ });
11
+
12
+ // Test the convertPalmToOpenAIMessages function
13
+ test('convertPalmToOpenAIMessages', (t) => {
14
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
15
+ const context = 'This is a test context.';
16
+ const examples = [
17
+ {
18
+ input: { author: 'user', content: 'Hello' },
19
+ output: { author: 'assistant', content: 'Hi there!' },
20
+ },
21
+ ];
22
+ const messages = [
23
+ { author: 'user', content: 'How are you?' },
24
+ { author: 'assistant', content: 'I am doing well, thank you!' },
25
+ ];
26
+ const result = plugin.convertPalmToOpenAIMessages(context, examples, messages);
27
+ t.deepEqual(result, [
28
+ { role: 'system', content: 'This is a test context.' },
29
+ { role: 'user', content: 'Hello' },
30
+ { role: 'assistant', content: 'Hi there!' },
31
+ { role: 'user', content: 'How are you?' },
32
+ { role: 'assistant', content: 'I am doing well, thank you!' },
33
+ ]);
34
+ });
35
+
36
+ // Test the getRequestParameters function
37
+ test('getRequestParameters', async (t) => {
38
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
39
+ const text = 'Help me';
40
+ const parameters = { name: 'John', age: 30 };
41
+ const prompt = mockPathwayString.prompt;
42
+ const result = await plugin.getRequestParameters(text, parameters, prompt);
43
+ t.deepEqual(result, {
44
+ messages: [
45
+ { role: 'user', content: 'User: Help me\nAssistant: Please help John who is 30 years old.' },
46
+ ],
47
+ temperature: 0.7,
48
+ });
49
+ });
50
+
51
+ // Test the execute function
52
+ test('execute', async (t) => {
53
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
54
+ const text = 'Help me';
55
+ const parameters = { name: 'John', age: 30 };
56
+ const prompt = mockPathwayString.prompt;
57
+
58
+ // Mock the executeRequest function
59
+ plugin.executeRequest = () => {
60
+ return {
61
+ choices: [
62
+ {
63
+ message: {
64
+ content: 'Sure, I can help John who is 30 years old.',
65
+ },
66
+ },
67
+ ],
68
+ };
69
+ };
70
+
71
+ const result = await plugin.execute(text, parameters, prompt);
72
+ t.deepEqual(result, {
73
+ choices: [
74
+ {
75
+ message: {
76
+ content: 'Sure, I can help John who is 30 years old.',
77
+ },
78
+ },
79
+ ],
80
+ });
81
+ });
82
+
83
+ // Test the parseResponse function
84
+ test('parseResponse', (t) => {
85
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
86
+ const data = {
87
+ choices: [
88
+ {
89
+ message: {
90
+ content: 'Sure, I can help John who is 30 years old.',
91
+ },
92
+ },
93
+ ],
94
+ };
95
+ const result = plugin.parseResponse(data);
96
+ t.is(result, 'Sure, I can help John who is 30 years old.');
97
+ });
98
+
99
+ // Test the logRequestData function
100
+ test('logRequestData', (t) => {
101
+ const plugin = new OpenAIChatPlugin(mockConfig, mockPathwayString);
102
+ const data = {
103
+ messages: [
104
+ { role: 'user', content: 'User: Help me\nAssistant: Please help John who is 30 years old.' },
105
+ ],
106
+ };
107
+ const responseData = {
108
+ choices: [
109
+ {
110
+ message: {
111
+ content: 'Sure, I can help John who is 30 years old.',
112
+ },
113
+ },
114
+ ],
115
+ };
116
+ const prompt = mockPathwayString.prompt;
117
+
118
+ // Mock console.log function
119
+ const originalConsoleLog = console.log;
120
+ console.log = () => {};
121
+
122
+ t.notThrows(() => plugin.logRequestData(data, responseData, prompt));
123
+
124
+ console.log = originalConsoleLog;
125
+ });
@@ -0,0 +1,256 @@
1
+ // test_palmChatPlugin.js
2
+ import test from 'ava';
3
+ import PalmChatPlugin from '../graphql/plugins/palmChatPlugin.js';
4
+ import { mockConfig } from './mocks.js';
5
+
6
+ test.beforeEach((t) => {
7
+ const pathway = 'testPathway';
8
+ const palmChatPlugin = new PalmChatPlugin(mockConfig, pathway);
9
+ t.context = { palmChatPlugin };
10
+ });
11
+
12
+ test('convertMessagesToPalm', (t) => {
13
+ const { palmChatPlugin } = t.context;
14
+ const messages = [
15
+ { role: 'system', content: 'System Message' },
16
+ { role: 'user', content: 'User Message' },
17
+ { role: 'user', content: 'User Message 2'},
18
+ ];
19
+
20
+ const expectedResult = {
21
+ messages: [
22
+ { author: 'user', content: 'User Message\nUser Message 2' },
23
+ ],
24
+ context: 'System Message',
25
+ };
26
+
27
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
28
+ });
29
+
30
+ test('convertMessagesToPalm - already PaLM format', (t) => {
31
+ const { palmChatPlugin } = t.context;
32
+ const messages = [
33
+ { author: 'user', content: 'User Message' },
34
+ { author: 'user', content: 'User Message 2'},
35
+ ];
36
+
37
+ const expectedResult = {
38
+ messages: [
39
+ { author: 'user', content: 'User Message\nUser Message 2' },
40
+ ],
41
+ context: '',
42
+ };
43
+
44
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
45
+ });
46
+
47
+ test('convertMessagesToPalm - empty string roles', (t) => {
48
+ const { palmChatPlugin } = t.context;
49
+ const messages = [
50
+ { role: '', content: 'Empty role message' },
51
+ { role: 'user', content: 'User Message' },
52
+ ];
53
+
54
+ const expectedResult = {
55
+ messages: [
56
+ { author: 'user', content: 'User Message' },
57
+ ],
58
+ context: '',
59
+ };
60
+
61
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
62
+ });
63
+
64
+ test('convertMessagesToPalm - consecutive system messages', (t) => {
65
+ const { palmChatPlugin } = t.context;
66
+ const messages = [
67
+ { role: 'system', content: 'System Message 1' },
68
+ { role: 'system', content: 'System Message 2' },
69
+ { role: 'user', content: 'User Message' },
70
+ ];
71
+
72
+ const expectedResult = {
73
+ messages: [
74
+ { author: 'user', content: 'User Message' },
75
+ ],
76
+ context: 'System Message 1\nSystem Message 2',
77
+ };
78
+
79
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
80
+ });
81
+
82
+ test('convertMessagesToPalm - multiple authors', (t) => {
83
+ const { palmChatPlugin } = t.context;
84
+ const messages = [
85
+ { role: 'system', content: 'System Message' },
86
+ { author: 'user1', content: 'User1 Message' },
87
+ { author: 'user1', content: 'User1 Message 2' },
88
+ { author: 'user2', content: 'User2 Message' },
89
+ { author: 'assistant', content: 'Assistant Message' },
90
+ ];
91
+
92
+ const expectedResult = {
93
+ messages: [
94
+ { author: 'user1', content: 'User1 Message\nUser1 Message 2' },
95
+ { author: 'user2', content: 'User2 Message' },
96
+ { author: 'assistant', content: 'Assistant Message' },
97
+ ],
98
+ context: 'System Message',
99
+ };
100
+
101
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
102
+ });
103
+
104
+ test('convertMessagesToPalm - no messages', (t) => {
105
+ const { palmChatPlugin } = t.context;
106
+ const messages = [];
107
+
108
+ const expectedResult = {
109
+ messages: [],
110
+ context: '',
111
+ };
112
+
113
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
114
+ });
115
+
116
+ test('convertMessagesToPalm - only system messages', (t) => {
117
+ const { palmChatPlugin } = t.context;
118
+ const messages = [
119
+ { role: 'system', content: 'System Message 1' },
120
+ { role: 'system', content: 'System Message 2' },
121
+ ];
122
+
123
+ const expectedResult = {
124
+ messages: [],
125
+ context: 'System Message 1\nSystem Message 2',
126
+ };
127
+
128
+ t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
129
+ });
130
+
131
+ test('getCompiledContext', (t) => {
132
+ const { palmChatPlugin } = t.context;
133
+ const text = 'Hello';
134
+ const parameters = { name: 'John' };
135
+ const context = '{{text}} from {{name}}';
136
+
137
+ const expectedResult = 'Hello from John';
138
+
139
+ t.is(palmChatPlugin.getCompiledContext(text, parameters, context), expectedResult);
140
+ });
141
+
142
+ test('getCompiledExamples', (t) => {
143
+ const { palmChatPlugin } = t.context;
144
+ const text = 'Greetings';
145
+ const parameters = { name: 'Jane' };
146
+ const examples = [
147
+ {
148
+ input: { content: 'Input: {{text}} from {{name}}' },
149
+ output: { content: 'Output: {{text}} to {{name}}' },
150
+ },
151
+ ];
152
+
153
+ const expectedResult = [
154
+ {
155
+ input: { content: 'Input: Greetings from Jane' },
156
+ output: { content: 'Output: Greetings to Jane' },
157
+ },
158
+ ];
159
+
160
+ t.deepEqual(palmChatPlugin.getCompiledExamples(text, parameters, examples), expectedResult);
161
+ });
162
+
163
+ test('getRequestParameters', (t) => {
164
+ const { palmChatPlugin } = t.context;
165
+ const text = 'Hello';
166
+ const parameters = { stream: false, name: 'John'};
167
+ const messages = [
168
+ { role: 'system', content: 'System Message' },
169
+ { role: 'user', content: 'Hello' },
170
+ { role: 'assistant', content: 'What can I do for you?' },
171
+ { role: 'user', content: 'Be my assistant!' },
172
+ ];
173
+ const prompt = { context: '{{text}} from {{name}}', examples: [], messages };
174
+
175
+ const requestParameters = palmChatPlugin.getRequestParameters(text, parameters, prompt);
176
+ const requestMessages = requestParameters.instances[0].messages;
177
+
178
+ t.is(requestMessages[0].author, 'user');
179
+ t.is(requestMessages[0].content, 'Hello');
180
+ });
181
+
182
+ test('getSafetyAttributes', (t) => {
183
+ const { palmChatPlugin } = t.context;
184
+ const responseData = {
185
+ predictions: [
186
+ {
187
+ safetyAttributes: {
188
+ blocked: false,
189
+ },
190
+ },
191
+ ],
192
+ };
193
+
194
+ const expectedResult = {
195
+ blocked: false,
196
+ };
197
+
198
+ t.deepEqual(palmChatPlugin.getSafetyAttributes(responseData), expectedResult);
199
+ });
200
+
201
+ test('parseResponse', (t) => {
202
+ const { palmChatPlugin } = t.context;
203
+ const responseData = {
204
+ predictions: [
205
+ {
206
+ candidates: [
207
+ {
208
+ content: 'Hello, how can I help you today?',
209
+ },
210
+ ],
211
+ },
212
+ ],
213
+ };
214
+
215
+ const expectedResult = 'Hello, how can I help you today?';
216
+
217
+ t.is(palmChatPlugin.parseResponse(responseData), expectedResult);
218
+ });
219
+
220
+ test('logRequestData', (t) => {
221
+ const { palmChatPlugin } = t.context;
222
+ const data = {
223
+ instances: [
224
+ {
225
+ messages: [
226
+ { author: 'user', content: 'Hello' },
227
+ { author: 'assistant', content: 'How can I help you?' },
228
+ ],
229
+ },
230
+ ],
231
+ };
232
+ const responseData = {
233
+ predictions: [
234
+ {
235
+ candidates: [
236
+ {
237
+ content: 'Hello, how can I help you today?',
238
+ },
239
+ ],
240
+ },
241
+ ],
242
+ };
243
+ const prompt = { debugInfo: '' };
244
+
245
+ const consoleLog = console.log;
246
+ let logOutput = '';
247
+ console.log = (msg) => (logOutput += msg + '\n');
248
+
249
+ palmChatPlugin.logRequestData(data, responseData, prompt);
250
+
251
+ console.log = consoleLog;
252
+
253
+ t.true(logOutput.includes('Message 1:'));
254
+ t.true(logOutput.includes('Message 2:'));
255
+ t.true(logOutput.includes('> Hello, how can I help you today?'));
256
+ });
@@ -0,0 +1,87 @@
1
+ // palmCompletionPlugin.test.js
2
+
3
+ import test from 'ava';
4
+ import PalmCompletionPlugin from '../graphql/plugins/palmCompletionPlugin.js';
5
+ import { mockConfig } from './mocks.js';
6
+
7
+ test.beforeEach((t) => {
8
+ const pathway = 'testPathway';
9
+ const palmCompletionPlugin = new PalmCompletionPlugin(mockConfig, pathway);
10
+ t.context = { palmCompletionPlugin };
11
+ });
12
+
13
+ test('getRequestParameters', (t) => {
14
+ const { palmCompletionPlugin } = t.context;
15
+ const text = 'Hello';
16
+ const parameters = { stream: false, name: 'John' };
17
+ const prompt = {prompt:'{{text}} from {{name}}'};
18
+
19
+ const requestParameters = palmCompletionPlugin.getRequestParameters(text, parameters, prompt);
20
+ const requestPrompt = requestParameters.instances[0].prompt;
21
+
22
+ t.is(requestPrompt, 'Hello from John');
23
+ });
24
+
25
+ test('parseResponse', (t) => {
26
+ const { palmCompletionPlugin } = t.context;
27
+ const responseData = {
28
+ predictions: [
29
+ {
30
+ content: 'Hello, how can I help you today?',
31
+ },
32
+ ],
33
+ };
34
+
35
+ const expectedResult = 'Hello, how can I help you today?';
36
+
37
+ t.is(palmCompletionPlugin.parseResponse(responseData), expectedResult);
38
+ });
39
+
40
+ test('getSafetyAttributes', (t) => {
41
+ const { palmCompletionPlugin } = t.context;
42
+ const responseData = {
43
+ predictions: [
44
+ {
45
+ safetyAttributes: {
46
+ blocked: false,
47
+ },
48
+ },
49
+ ],
50
+ };
51
+
52
+ const expectedResult = {
53
+ blocked: false,
54
+ };
55
+
56
+ t.deepEqual(palmCompletionPlugin.getSafetyAttributes(responseData), expectedResult);
57
+ });
58
+
59
+ test('logRequestData', (t) => {
60
+ const { palmCompletionPlugin } = t.context;
61
+ const data = {
62
+ instances: [
63
+ {
64
+ prompt: 'Hello, how can I help you?',
65
+ },
66
+ ],
67
+ };
68
+ const responseData = {
69
+ predictions: [
70
+ {
71
+ content: 'Hello, how can I help you today?',
72
+ },
73
+ ],
74
+ };
75
+ const prompt = { debugInfo: '' };
76
+
77
+ const consoleLog = console.log;
78
+ let logOutput = '';
79
+ console.log = (msg) => (logOutput += msg + '\n');
80
+
81
+ palmCompletionPlugin.logRequestData(data, responseData, prompt);
82
+
83
+ console.log = consoleLog;
84
+
85
+ t.true(logOutput.includes('Hello, how can I help you?'));
86
+ t.true(logOutput.includes('> Hello, how can I help you today?'));
87
+ });