@aj-archipelago/cortex 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/config/default.example.json +18 -0
- package/config.js +28 -8
- package/helper_apps/MediaFileChunker/Dockerfile +20 -0
- package/helper_apps/MediaFileChunker/package-lock.json +18 -18
- package/helper_apps/MediaFileChunker/package.json +1 -1
- package/helper_apps/WhisperX/.dockerignore +27 -0
- package/helper_apps/WhisperX/Dockerfile +31 -0
- package/helper_apps/WhisperX/app-ts.py +76 -0
- package/helper_apps/WhisperX/app.py +115 -0
- package/helper_apps/WhisperX/docker-compose.debug.yml +12 -0
- package/helper_apps/WhisperX/docker-compose.yml +10 -0
- package/helper_apps/WhisperX/requirements.txt +6 -0
- package/index.js +1 -1
- package/lib/gcpAuthTokenHelper.js +37 -0
- package/lib/redisSubscription.js +1 -1
- package/package.json +9 -7
- package/pathways/basePathway.js +2 -2
- package/pathways/index.js +8 -2
- package/pathways/summary.js +2 -2
- package/pathways/sys_openai_chat.js +19 -0
- package/pathways/sys_openai_completion.js +11 -0
- package/pathways/{lc_test.mjs → test_langchain.mjs} +1 -1
- package/pathways/test_palm_chat.js +31 -0
- package/pathways/transcribe.js +3 -1
- package/pathways/translate.js +2 -1
- package/{graphql → server}/graphql.js +64 -62
- package/{graphql → server}/pathwayPrompter.js +9 -1
- package/{graphql → server}/pathwayResolver.js +46 -47
- package/{graphql → server}/plugins/azureTranslatePlugin.js +22 -0
- package/{graphql → server}/plugins/modelPlugin.js +15 -42
- package/server/plugins/openAiChatPlugin.js +134 -0
- package/{graphql → server}/plugins/openAiCompletionPlugin.js +38 -2
- package/{graphql → server}/plugins/openAiWhisperPlugin.js +59 -7
- package/server/plugins/palmChatPlugin.js +229 -0
- package/server/plugins/palmCompletionPlugin.js +134 -0
- package/{graphql → server}/prompt.js +11 -4
- package/server/rest.js +321 -0
- package/{graphql → server}/typeDef.js +30 -13
- package/tests/chunkfunction.test.js +1 -1
- package/tests/config.test.js +1 -1
- package/tests/main.test.js +282 -43
- package/tests/mocks.js +1 -1
- package/tests/modelPlugin.test.js +3 -15
- package/tests/openAiChatPlugin.test.js +125 -0
- package/tests/openai_api.test.js +147 -0
- package/tests/palmChatPlugin.test.js +256 -0
- package/tests/palmCompletionPlugin.test.js +87 -0
- package/tests/pathwayResolver.test.js +1 -1
- package/tests/server.js +23 -0
- package/tests/truncateMessages.test.js +1 -1
- package/graphql/plugins/openAiChatPlugin.js +0 -46
- package/tests/chunking.test.js +0 -155
- package/tests/translate.test.js +0 -126
- /package/{graphql → server}/chunker.js +0 -0
- /package/{graphql → server}/parser.js +0 -0
- /package/{graphql → server}/pathwayResponseParser.js +0 -0
- /package/{graphql → server}/plugins/localModelPlugin.js +0 -0
- /package/{graphql → server}/pubsub.js +0 -0
- /package/{graphql → server}/requestState.js +0 -0
- /package/{graphql → server}/resolver.js +0 -0
- /package/{graphql → server}/subscriptions.js +0 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
// openai_api.test.js
|
|
2
|
+
|
|
3
|
+
import test from 'ava';
|
|
4
|
+
import got from 'got';
|
|
5
|
+
import axios from 'axios';
|
|
6
|
+
import serverFactory from '../index.js';
|
|
7
|
+
|
|
8
|
+
const API_BASE = 'http://localhost:4000/v1';
|
|
9
|
+
|
|
10
|
+
let testServer;
|
|
11
|
+
|
|
12
|
+
test.before(async () => {
|
|
13
|
+
process.env.CORTEX_ENABLE_REST = 'true';
|
|
14
|
+
const { server, startServer } = await serverFactory();
|
|
15
|
+
startServer && await startServer();
|
|
16
|
+
testServer = server;
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
test.after.always('cleanup', async () => {
|
|
20
|
+
if (testServer) {
|
|
21
|
+
await testServer.stop();
|
|
22
|
+
}
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
test('GET /models', async (t) => {
|
|
26
|
+
const response = await got(`${API_BASE}/models`, { responseType: 'json' });
|
|
27
|
+
t.is(response.statusCode, 200);
|
|
28
|
+
t.is(response.body.object, 'list');
|
|
29
|
+
t.true(Array.isArray(response.body.data));
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
test('POST /completions', async (t) => {
|
|
33
|
+
const response = await got.post(`${API_BASE}/completions`, {
|
|
34
|
+
json: {
|
|
35
|
+
model: 'gpt-3.5-turbo',
|
|
36
|
+
prompt: 'Word to your motha!',
|
|
37
|
+
stream: false,
|
|
38
|
+
},
|
|
39
|
+
responseType: 'json',
|
|
40
|
+
});
|
|
41
|
+
|
|
42
|
+
t.is(response.statusCode, 200);
|
|
43
|
+
t.is(response.body.object, 'text_completion');
|
|
44
|
+
t.true(Array.isArray(response.body.choices));
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
test('POST /chat/completions', async (t) => {
|
|
49
|
+
const response = await got.post(`${API_BASE}/chat/completions`, {
|
|
50
|
+
json: {
|
|
51
|
+
model: 'gpt-3.5-turbo',
|
|
52
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
53
|
+
stream: false,
|
|
54
|
+
},
|
|
55
|
+
responseType: 'json',
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
t.is(response.statusCode, 200);
|
|
59
|
+
t.is(response.body.object, 'chat.completion');
|
|
60
|
+
t.true(Array.isArray(response.body.choices));
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
async function connectToSSEEndpoint(url, endpoint, payload, t, customAssertions) {
|
|
64
|
+
return new Promise(async (resolve, reject) => {
|
|
65
|
+
try {
|
|
66
|
+
const instance = axios.create({
|
|
67
|
+
baseURL: url,
|
|
68
|
+
responseType: 'stream',
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
const response = await instance.post(endpoint, payload);
|
|
72
|
+
const responseData = response.data;
|
|
73
|
+
|
|
74
|
+
const incomingMessage = Array.isArray(responseData) && responseData.length > 0 ? responseData[0] : responseData;
|
|
75
|
+
|
|
76
|
+
let eventCount = 0;
|
|
77
|
+
|
|
78
|
+
incomingMessage.on('data', data => {
|
|
79
|
+
const events = data.toString().split('\n');
|
|
80
|
+
|
|
81
|
+
events.forEach(event => {
|
|
82
|
+
eventCount++;
|
|
83
|
+
|
|
84
|
+
if (event.trim() === '') return;
|
|
85
|
+
|
|
86
|
+
if (event.trim() === 'data: [DONE]') {
|
|
87
|
+
t.truthy(eventCount > 1);
|
|
88
|
+
resolve();
|
|
89
|
+
return;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
const message = event.replace(/^data: /, '');
|
|
93
|
+
const messageJson = JSON.parse(message);
|
|
94
|
+
|
|
95
|
+
customAssertions(t, messageJson);
|
|
96
|
+
});
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
} catch (error) {
|
|
100
|
+
console.error('Error connecting to SSE endpoint:', error);
|
|
101
|
+
reject(error);
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
test('POST SSE: /v1/completions should send a series of events and a [DONE] event', async (t) => {
|
|
107
|
+
const payload = {
|
|
108
|
+
model: 'gpt-3.5-turbo',
|
|
109
|
+
prompt: 'Word to your motha!',
|
|
110
|
+
stream: true,
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
const url = 'http://localhost:4000/v1';
|
|
114
|
+
|
|
115
|
+
const completionsAssertions = (t, messageJson) => {
|
|
116
|
+
t.truthy(messageJson.id);
|
|
117
|
+
t.is(messageJson.object, 'text_completion');
|
|
118
|
+
t.truthy(messageJson.choices[0].finish_reason === null || messageJson.choices[0].finish_reason === 'stop');
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
await connectToSSEEndpoint(url, '/completions', payload, t, completionsAssertions);
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
test('POST SSE: /v1/chat/completions should send a series of events and a [DONE] event', async (t) => {
|
|
125
|
+
const payload = {
|
|
126
|
+
model: 'gpt-3.5-turbo',
|
|
127
|
+
messages: [
|
|
128
|
+
{
|
|
129
|
+
role: 'user',
|
|
130
|
+
content: 'Hello!',
|
|
131
|
+
},
|
|
132
|
+
],
|
|
133
|
+
stream: true,
|
|
134
|
+
};
|
|
135
|
+
|
|
136
|
+
const url = 'http://localhost:4000/v1';
|
|
137
|
+
|
|
138
|
+
const chatCompletionsAssertions = (t, messageJson) => {
|
|
139
|
+
t.truthy(messageJson.id);
|
|
140
|
+
t.is(messageJson.object, 'chat.completion.chunk');
|
|
141
|
+
t.truthy(messageJson.choices[0].delta);
|
|
142
|
+
t.truthy(messageJson.choices[0].finish_reason === null || messageJson.choices[0].finish_reason === 'stop');
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
await connectToSSEEndpoint(url, '/chat/completions', payload, t, chatCompletionsAssertions);
|
|
146
|
+
});
|
|
147
|
+
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
// test_palmChatPlugin.js
|
|
2
|
+
import test from 'ava';
|
|
3
|
+
import PalmChatPlugin from '../server/plugins/palmChatPlugin.js';
|
|
4
|
+
import { mockConfig } from './mocks.js';
|
|
5
|
+
|
|
6
|
+
test.beforeEach((t) => {
|
|
7
|
+
const pathway = 'testPathway';
|
|
8
|
+
const palmChatPlugin = new PalmChatPlugin(mockConfig, pathway);
|
|
9
|
+
t.context = { palmChatPlugin };
|
|
10
|
+
});
|
|
11
|
+
|
|
12
|
+
test('convertMessagesToPalm', (t) => {
|
|
13
|
+
const { palmChatPlugin } = t.context;
|
|
14
|
+
const messages = [
|
|
15
|
+
{ role: 'system', content: 'System Message' },
|
|
16
|
+
{ role: 'user', content: 'User Message' },
|
|
17
|
+
{ role: 'user', content: 'User Message 2'},
|
|
18
|
+
];
|
|
19
|
+
|
|
20
|
+
const expectedResult = {
|
|
21
|
+
modifiedMessages: [
|
|
22
|
+
{ author: 'user', content: 'User Message\nUser Message 2' },
|
|
23
|
+
],
|
|
24
|
+
context: 'System Message',
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
28
|
+
});
|
|
29
|
+
|
|
30
|
+
test('convertMessagesToPalm - already PaLM format', (t) => {
|
|
31
|
+
const { palmChatPlugin } = t.context;
|
|
32
|
+
const messages = [
|
|
33
|
+
{ author: 'user', content: 'User Message' },
|
|
34
|
+
{ author: 'user', content: 'User Message 2'},
|
|
35
|
+
];
|
|
36
|
+
|
|
37
|
+
const expectedResult = {
|
|
38
|
+
modifiedMessages: [
|
|
39
|
+
{ author: 'user', content: 'User Message\nUser Message 2' },
|
|
40
|
+
],
|
|
41
|
+
context: '',
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
test('convertMessagesToPalm - empty string roles', (t) => {
|
|
48
|
+
const { palmChatPlugin } = t.context;
|
|
49
|
+
const messages = [
|
|
50
|
+
{ role: '', content: 'Empty role message' },
|
|
51
|
+
{ role: 'user', content: 'User Message' },
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
const expectedResult = {
|
|
55
|
+
modifiedMessages: [
|
|
56
|
+
{ author: 'user', content: 'User Message' },
|
|
57
|
+
],
|
|
58
|
+
context: '',
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
test('convertMessagesToPalm - consecutive system messages', (t) => {
|
|
65
|
+
const { palmChatPlugin } = t.context;
|
|
66
|
+
const messages = [
|
|
67
|
+
{ role: 'system', content: 'System Message 1' },
|
|
68
|
+
{ role: 'system', content: 'System Message 2' },
|
|
69
|
+
{ role: 'user', content: 'User Message' },
|
|
70
|
+
];
|
|
71
|
+
|
|
72
|
+
const expectedResult = {
|
|
73
|
+
modifiedMessages: [
|
|
74
|
+
{ author: 'user', content: 'User Message' },
|
|
75
|
+
],
|
|
76
|
+
context: 'System Message 1\nSystem Message 2',
|
|
77
|
+
};
|
|
78
|
+
|
|
79
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
test('convertMessagesToPalm - multiple authors', (t) => {
|
|
83
|
+
const { palmChatPlugin } = t.context;
|
|
84
|
+
const messages = [
|
|
85
|
+
{ role: 'system', content: 'System Message' },
|
|
86
|
+
{ author: 'user1', content: 'User1 Message' },
|
|
87
|
+
{ author: 'user1', content: 'User1 Message 2' },
|
|
88
|
+
{ author: 'user2', content: 'User2 Message' },
|
|
89
|
+
{ author: 'assistant', content: 'Assistant Message' },
|
|
90
|
+
];
|
|
91
|
+
|
|
92
|
+
const expectedResult = {
|
|
93
|
+
modifiedMessages: [
|
|
94
|
+
{ author: 'user1', content: 'User1 Message\nUser1 Message 2' },
|
|
95
|
+
{ author: 'user2', content: 'User2 Message' },
|
|
96
|
+
{ author: 'assistant', content: 'Assistant Message' },
|
|
97
|
+
],
|
|
98
|
+
context: 'System Message',
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
test('convertMessagesToPalm - no messages', (t) => {
|
|
105
|
+
const { palmChatPlugin } = t.context;
|
|
106
|
+
const messages = [];
|
|
107
|
+
|
|
108
|
+
const expectedResult = {
|
|
109
|
+
modifiedMessages: [],
|
|
110
|
+
context: '',
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
test('convertMessagesToPalm - only system messages', (t) => {
|
|
117
|
+
const { palmChatPlugin } = t.context;
|
|
118
|
+
const messages = [
|
|
119
|
+
{ role: 'system', content: 'System Message 1' },
|
|
120
|
+
{ role: 'system', content: 'System Message 2' },
|
|
121
|
+
];
|
|
122
|
+
|
|
123
|
+
const expectedResult = {
|
|
124
|
+
modifiedMessages: [],
|
|
125
|
+
context: 'System Message 1\nSystem Message 2',
|
|
126
|
+
};
|
|
127
|
+
|
|
128
|
+
t.deepEqual(palmChatPlugin.convertMessagesToPalm(messages), expectedResult);
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
test('getCompiledContext', (t) => {
|
|
132
|
+
const { palmChatPlugin } = t.context;
|
|
133
|
+
const text = 'Hello';
|
|
134
|
+
const parameters = { name: 'John' };
|
|
135
|
+
const context = '{{text}} from {{name}}';
|
|
136
|
+
|
|
137
|
+
const expectedResult = 'Hello from John';
|
|
138
|
+
|
|
139
|
+
t.is(palmChatPlugin.getCompiledContext(text, parameters, context), expectedResult);
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
test('getCompiledExamples', (t) => {
|
|
143
|
+
const { palmChatPlugin } = t.context;
|
|
144
|
+
const text = 'Greetings';
|
|
145
|
+
const parameters = { name: 'Jane' };
|
|
146
|
+
const examples = [
|
|
147
|
+
{
|
|
148
|
+
input: { content: 'Input: {{text}} from {{name}}' },
|
|
149
|
+
output: { content: 'Output: {{text}} to {{name}}' },
|
|
150
|
+
},
|
|
151
|
+
];
|
|
152
|
+
|
|
153
|
+
const expectedResult = [
|
|
154
|
+
{
|
|
155
|
+
input: { content: 'Input: Greetings from Jane' },
|
|
156
|
+
output: { content: 'Output: Greetings to Jane' },
|
|
157
|
+
},
|
|
158
|
+
];
|
|
159
|
+
|
|
160
|
+
t.deepEqual(palmChatPlugin.getCompiledExamples(text, parameters, examples), expectedResult);
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
test('getRequestParameters', (t) => {
|
|
164
|
+
const { palmChatPlugin } = t.context;
|
|
165
|
+
const text = 'Hello';
|
|
166
|
+
const parameters = { stream: false, name: 'John'};
|
|
167
|
+
const messages = [
|
|
168
|
+
{ role: 'system', content: 'System Message' },
|
|
169
|
+
{ role: 'user', content: 'Hello' },
|
|
170
|
+
{ role: 'assistant', content: 'What can I do for you?' },
|
|
171
|
+
{ role: 'user', content: 'Be my assistant!' },
|
|
172
|
+
];
|
|
173
|
+
const prompt = { context: '{{text}} from {{name}}', examples: [], messages };
|
|
174
|
+
|
|
175
|
+
const requestParameters = palmChatPlugin.getRequestParameters(text, parameters, prompt);
|
|
176
|
+
const requestMessages = requestParameters.instances[0].messages;
|
|
177
|
+
|
|
178
|
+
t.is(requestMessages[0].author, 'user');
|
|
179
|
+
t.is(requestMessages[0].content, 'Hello');
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
test('getSafetyAttributes', (t) => {
|
|
183
|
+
const { palmChatPlugin } = t.context;
|
|
184
|
+
const responseData = {
|
|
185
|
+
predictions: [
|
|
186
|
+
{
|
|
187
|
+
safetyAttributes: {
|
|
188
|
+
blocked: false,
|
|
189
|
+
},
|
|
190
|
+
},
|
|
191
|
+
],
|
|
192
|
+
};
|
|
193
|
+
|
|
194
|
+
const expectedResult = {
|
|
195
|
+
blocked: false,
|
|
196
|
+
};
|
|
197
|
+
|
|
198
|
+
t.deepEqual(palmChatPlugin.getSafetyAttributes(responseData), expectedResult);
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
test('parseResponse', (t) => {
|
|
202
|
+
const { palmChatPlugin } = t.context;
|
|
203
|
+
const responseData = {
|
|
204
|
+
predictions: [
|
|
205
|
+
{
|
|
206
|
+
candidates: [
|
|
207
|
+
{
|
|
208
|
+
content: 'Hello, how can I help you today?',
|
|
209
|
+
},
|
|
210
|
+
],
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
};
|
|
214
|
+
|
|
215
|
+
const expectedResult = 'Hello, how can I help you today?';
|
|
216
|
+
|
|
217
|
+
t.is(palmChatPlugin.parseResponse(responseData), expectedResult);
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
test('logRequestData', (t) => {
|
|
221
|
+
const { palmChatPlugin } = t.context;
|
|
222
|
+
const data = {
|
|
223
|
+
instances: [
|
|
224
|
+
{
|
|
225
|
+
messages: [
|
|
226
|
+
{ author: 'user', content: 'Hello' },
|
|
227
|
+
{ author: 'assistant', content: 'How can I help you?' },
|
|
228
|
+
],
|
|
229
|
+
},
|
|
230
|
+
],
|
|
231
|
+
};
|
|
232
|
+
const responseData = {
|
|
233
|
+
predictions: [
|
|
234
|
+
{
|
|
235
|
+
candidates: [
|
|
236
|
+
{
|
|
237
|
+
content: 'Hello, how can I help you today?',
|
|
238
|
+
},
|
|
239
|
+
],
|
|
240
|
+
},
|
|
241
|
+
],
|
|
242
|
+
};
|
|
243
|
+
const prompt = { debugInfo: '' };
|
|
244
|
+
|
|
245
|
+
const consoleLog = console.log;
|
|
246
|
+
let logOutput = '';
|
|
247
|
+
console.log = (msg) => (logOutput += msg + '\n');
|
|
248
|
+
|
|
249
|
+
palmChatPlugin.logRequestData(data, responseData, prompt);
|
|
250
|
+
|
|
251
|
+
console.log = consoleLog;
|
|
252
|
+
|
|
253
|
+
t.true(logOutput.includes('Message 1:'));
|
|
254
|
+
t.true(logOutput.includes('Message 2:'));
|
|
255
|
+
t.true(logOutput.includes('> Hello, how can I help you today?'));
|
|
256
|
+
});
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
// palmCompletionPlugin.test.js
|
|
2
|
+
|
|
3
|
+
import test from 'ava';
|
|
4
|
+
import PalmCompletionPlugin from '../server/plugins/palmCompletionPlugin.js';
|
|
5
|
+
import { mockConfig } from './mocks.js';
|
|
6
|
+
|
|
7
|
+
test.beforeEach((t) => {
|
|
8
|
+
const pathway = 'testPathway';
|
|
9
|
+
const palmCompletionPlugin = new PalmCompletionPlugin(mockConfig, pathway);
|
|
10
|
+
t.context = { palmCompletionPlugin };
|
|
11
|
+
});
|
|
12
|
+
|
|
13
|
+
test('getRequestParameters', (t) => {
|
|
14
|
+
const { palmCompletionPlugin } = t.context;
|
|
15
|
+
const text = 'Hello';
|
|
16
|
+
const parameters = { stream: false, name: 'John' };
|
|
17
|
+
const prompt = {prompt:'{{text}} from {{name}}'};
|
|
18
|
+
|
|
19
|
+
const requestParameters = palmCompletionPlugin.getRequestParameters(text, parameters, prompt);
|
|
20
|
+
const requestPrompt = requestParameters.instances[0].prompt;
|
|
21
|
+
|
|
22
|
+
t.is(requestPrompt, 'Hello from John');
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
test('parseResponse', (t) => {
|
|
26
|
+
const { palmCompletionPlugin } = t.context;
|
|
27
|
+
const responseData = {
|
|
28
|
+
predictions: [
|
|
29
|
+
{
|
|
30
|
+
content: 'Hello, how can I help you today?',
|
|
31
|
+
},
|
|
32
|
+
],
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
const expectedResult = 'Hello, how can I help you today?';
|
|
36
|
+
|
|
37
|
+
t.is(palmCompletionPlugin.parseResponse(responseData), expectedResult);
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
test('getSafetyAttributes', (t) => {
|
|
41
|
+
const { palmCompletionPlugin } = t.context;
|
|
42
|
+
const responseData = {
|
|
43
|
+
predictions: [
|
|
44
|
+
{
|
|
45
|
+
safetyAttributes: {
|
|
46
|
+
blocked: false,
|
|
47
|
+
},
|
|
48
|
+
},
|
|
49
|
+
],
|
|
50
|
+
};
|
|
51
|
+
|
|
52
|
+
const expectedResult = {
|
|
53
|
+
blocked: false,
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
t.deepEqual(palmCompletionPlugin.getSafetyAttributes(responseData), expectedResult);
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
test('logRequestData', (t) => {
|
|
60
|
+
const { palmCompletionPlugin } = t.context;
|
|
61
|
+
const data = {
|
|
62
|
+
instances: [
|
|
63
|
+
{
|
|
64
|
+
prompt: 'Hello, how can I help you?',
|
|
65
|
+
},
|
|
66
|
+
],
|
|
67
|
+
};
|
|
68
|
+
const responseData = {
|
|
69
|
+
predictions: [
|
|
70
|
+
{
|
|
71
|
+
content: 'Hello, how can I help you today?',
|
|
72
|
+
},
|
|
73
|
+
],
|
|
74
|
+
};
|
|
75
|
+
const prompt = { debugInfo: '' };
|
|
76
|
+
|
|
77
|
+
const consoleLog = console.log;
|
|
78
|
+
let logOutput = '';
|
|
79
|
+
console.log = (msg) => (logOutput += msg + '\n');
|
|
80
|
+
|
|
81
|
+
palmCompletionPlugin.logRequestData(data, responseData, prompt);
|
|
82
|
+
|
|
83
|
+
console.log = consoleLog;
|
|
84
|
+
|
|
85
|
+
t.true(logOutput.includes('Hello, how can I help you?'));
|
|
86
|
+
t.true(logOutput.includes('> Hello, how can I help you today?'));
|
|
87
|
+
});
|
package/tests/server.js
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import 'dotenv/config'
|
|
2
|
+
import { ApolloServer } from 'apollo-server';
|
|
3
|
+
import { config } from '../config.js';
|
|
4
|
+
import typeDefsresolversFactory from '../index.js';
|
|
5
|
+
|
|
6
|
+
let typeDefs;
|
|
7
|
+
let resolvers;
|
|
8
|
+
|
|
9
|
+
const initTypeDefsResolvers = async () => {
|
|
10
|
+
const result = await typeDefsresolversFactory();
|
|
11
|
+
typeDefs = result.typeDefs;
|
|
12
|
+
resolvers = result.resolvers;
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
export const startTestServer = async () => {
|
|
16
|
+
await initTypeDefsResolvers();
|
|
17
|
+
|
|
18
|
+
return new ApolloServer({
|
|
19
|
+
typeDefs,
|
|
20
|
+
resolvers,
|
|
21
|
+
context: () => ({ config, requestState: {} }),
|
|
22
|
+
});
|
|
23
|
+
};
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
// ModelPlugin.test.js
|
|
2
2
|
import test from 'ava';
|
|
3
|
-
import ModelPlugin from '../
|
|
3
|
+
import ModelPlugin from '../server/plugins/modelPlugin.js';
|
|
4
4
|
import { encode } from 'gpt-3-encoder';
|
|
5
5
|
import { mockConfig, mockPathwayString } from './mocks.js';
|
|
6
6
|
|
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
// OpenAIChatPlugin.js
|
|
2
|
-
import ModelPlugin from './modelPlugin.js';
|
|
3
|
-
|
|
4
|
-
class OpenAIChatPlugin extends ModelPlugin {
|
|
5
|
-
constructor(config, pathway) {
|
|
6
|
-
super(config, pathway);
|
|
7
|
-
}
|
|
8
|
-
|
|
9
|
-
// Set up parameters specific to the OpenAI Chat API
|
|
10
|
-
getRequestParameters(text, parameters, prompt) {
|
|
11
|
-
const { modelPromptText, modelPromptMessages, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
|
|
12
|
-
const { stream } = parameters;
|
|
13
|
-
|
|
14
|
-
// Define the model's max token length
|
|
15
|
-
const modelTargetTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
|
|
16
|
-
|
|
17
|
-
let requestMessages = modelPromptMessages || [{ "role": "user", "content": modelPromptText }];
|
|
18
|
-
|
|
19
|
-
// Check if the token length exceeds the model's max token length
|
|
20
|
-
if (tokenLength > modelTargetTokenLength) {
|
|
21
|
-
// Remove older messages until the token length is within the model's limit
|
|
22
|
-
requestMessages = this.truncateMessagesToTargetLength(requestMessages, modelTargetTokenLength);
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
const requestParameters = {
|
|
26
|
-
messages: requestMessages,
|
|
27
|
-
temperature: this.temperature ?? 0.7,
|
|
28
|
-
stream
|
|
29
|
-
};
|
|
30
|
-
|
|
31
|
-
return requestParameters;
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
// Execute the request to the OpenAI Chat API
|
|
35
|
-
async execute(text, parameters, prompt) {
|
|
36
|
-
const url = this.requestUrl(text);
|
|
37
|
-
const requestParameters = this.getRequestParameters(text, parameters, prompt);
|
|
38
|
-
|
|
39
|
-
const data = { ...(this.model.params || {}), ...requestParameters };
|
|
40
|
-
const params = {};
|
|
41
|
-
const headers = this.model.headers || {};
|
|
42
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
export default OpenAIChatPlugin;
|