@aj-archipelago/cortex 1.0.7 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/server/plugins/azureTranslatePlugin.js +4 -4
- package/server/plugins/modelPlugin.js +23 -5
- package/server/plugins/openAiChatPlugin.js +4 -4
- package/server/plugins/openAiCompletionPlugin.js +5 -4
- package/server/plugins/palmChatPlugin.js +4 -4
- package/server/plugins/palmCompletionPlugin.js +3 -3
- package/server/rest.js +1 -0
- package/tests/modelPlugin.test.js +3 -4
- package/tests/openAiChatPlugin.test.js +8 -6
- package/tests/palmChatPlugin.test.js +3 -1
- package/tests/palmCompletionPlugin.test.js +3 -1
- package/tests/truncateMessages.test.js +2 -2
package/package.json
CHANGED
|
@@ -24,8 +24,9 @@ class AzureTranslatePlugin extends ModelPlugin {
|
|
|
24
24
|
}
|
|
25
25
|
|
|
26
26
|
// Execute the request to the Azure Translate API
|
|
27
|
-
async execute(text, parameters, prompt) {
|
|
27
|
+
async execute(text, parameters, prompt, pathwayResolver) {
|
|
28
28
|
const requestParameters = this.getRequestParameters(text, parameters, prompt);
|
|
29
|
+
const requestId = pathwayResolver?.requestId;
|
|
29
30
|
|
|
30
31
|
const url = this.requestUrl(text);
|
|
31
32
|
|
|
@@ -33,7 +34,7 @@ class AzureTranslatePlugin extends ModelPlugin {
|
|
|
33
34
|
const params = requestParameters.params;
|
|
34
35
|
const headers = this.model.headers || {};
|
|
35
36
|
|
|
36
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
37
|
+
return this.executeRequest(url, data, params, headers, prompt, requestId);
|
|
37
38
|
}
|
|
38
39
|
|
|
39
40
|
// Parse the response from the Azure Translate API
|
|
@@ -47,8 +48,7 @@ class AzureTranslatePlugin extends ModelPlugin {
|
|
|
47
48
|
|
|
48
49
|
// Override the logging function to display the request and response
|
|
49
50
|
logRequestData(data, responseData, prompt) {
|
|
50
|
-
|
|
51
|
-
console.log(separator);
|
|
51
|
+
this.logAIRequestFinished();
|
|
52
52
|
|
|
53
53
|
const modelInput = data[0].Text;
|
|
54
54
|
|
|
@@ -30,7 +30,8 @@ class ModelPlugin {
|
|
|
30
30
|
}
|
|
31
31
|
}
|
|
32
32
|
|
|
33
|
-
this.requestCount =
|
|
33
|
+
this.requestCount = 0;
|
|
34
|
+
this.lastRequestStartTime = new Date();
|
|
34
35
|
this.shouldCache = config.get('enableCache') && (pathway.enableCache || pathway.temperature == 0);
|
|
35
36
|
}
|
|
36
37
|
|
|
@@ -199,10 +200,24 @@ class ModelPlugin {
|
|
|
199
200
|
parseResponse(data) { return data; };
|
|
200
201
|
|
|
201
202
|
// Default simple logging
|
|
203
|
+
logRequestStart(url, data) {
|
|
204
|
+
this.requestCount++;
|
|
205
|
+
const logMessage = `>>> [${this.requestId}: ${this.pathwayName}.${this.requestCount}] request`;
|
|
206
|
+
const header = '>'.repeat(logMessage.length);
|
|
207
|
+
console.log(`\n${header}\n${logMessage}`);
|
|
208
|
+
console.log(`>>> Making API request to ${url}`);
|
|
209
|
+
};
|
|
210
|
+
|
|
211
|
+
logAIRequestFinished() {
|
|
212
|
+
const currentTime = new Date();
|
|
213
|
+
const timeElapsed = (currentTime - this.lastRequestStartTime) / 1000;
|
|
214
|
+
const logMessage = `<<< [${this.requestId}: ${this.pathwayName}.${this.requestCount}] response - complete in ${timeElapsed}s - data:`;
|
|
215
|
+
const header = '<'.repeat(logMessage.length);
|
|
216
|
+
console.log(`\n${header}\n${logMessage}\n`);
|
|
217
|
+
};
|
|
218
|
+
|
|
202
219
|
logRequestData(data, responseData, prompt) {
|
|
203
|
-
|
|
204
|
-
console.log(separator);
|
|
205
|
-
|
|
220
|
+
this.logAIRequestFinished();
|
|
206
221
|
const modelInput = data.prompt || (data.messages && data.messages[0].content) || (data.length > 0 && data[0].Text) || null;
|
|
207
222
|
|
|
208
223
|
if (modelInput) {
|
|
@@ -214,7 +229,10 @@ class ModelPlugin {
|
|
|
214
229
|
prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
|
|
215
230
|
}
|
|
216
231
|
|
|
217
|
-
async executeRequest(url, data, params, headers, prompt) {
|
|
232
|
+
async executeRequest(url, data, params, headers, prompt, requestId) {
|
|
233
|
+
this.aiRequestStartTime = new Date();
|
|
234
|
+
this.requestId = requestId;
|
|
235
|
+
this.logRequestStart(url, data);
|
|
218
236
|
const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
|
|
219
237
|
|
|
220
238
|
if (responseData.error) {
|
|
@@ -76,14 +76,15 @@ class OpenAIChatPlugin extends ModelPlugin {
|
|
|
76
76
|
}
|
|
77
77
|
|
|
78
78
|
// Execute the request to the OpenAI Chat API
|
|
79
|
-
async execute(text, parameters, prompt) {
|
|
79
|
+
async execute(text, parameters, prompt, pathwayResolver) {
|
|
80
80
|
const url = this.requestUrl(text);
|
|
81
81
|
const requestParameters = this.getRequestParameters(text, parameters, prompt);
|
|
82
|
+
const requestId = pathwayResolver?.requestId;
|
|
82
83
|
|
|
83
84
|
const data = { ...(this.model.params || {}), ...requestParameters };
|
|
84
85
|
const params = {};
|
|
85
86
|
const headers = this.model.headers || {};
|
|
86
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
87
|
+
return this.executeRequest(url, data, params, headers, prompt, requestId);
|
|
87
88
|
}
|
|
88
89
|
|
|
89
90
|
// Parse the response from the OpenAI Chat API
|
|
@@ -105,8 +106,7 @@ class OpenAIChatPlugin extends ModelPlugin {
|
|
|
105
106
|
|
|
106
107
|
// Override the logging function to display the messages and responses
|
|
107
108
|
logRequestData(data, responseData, prompt) {
|
|
108
|
-
|
|
109
|
-
console.log(separator);
|
|
109
|
+
this.logAIRequestFinished();
|
|
110
110
|
|
|
111
111
|
const { stream, messages } = data;
|
|
112
112
|
if (messages && messages.length > 1) {
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
// OpenAICompletionPlugin.js
|
|
2
2
|
|
|
3
|
+
import { request } from 'https';
|
|
3
4
|
import ModelPlugin from './modelPlugin.js';
|
|
4
5
|
import { encode } from 'gpt-3-encoder';
|
|
5
6
|
|
|
@@ -78,12 +79,13 @@ class OpenAICompletionPlugin extends ModelPlugin {
|
|
|
78
79
|
async execute(text, parameters, prompt, pathwayResolver) {
|
|
79
80
|
const url = this.requestUrl(text);
|
|
80
81
|
const requestParameters = this.getRequestParameters(text, parameters, prompt, pathwayResolver);
|
|
81
|
-
|
|
82
|
+
const requestId = pathwayResolver?.requestId;
|
|
83
|
+
|
|
82
84
|
const data = { ...(this.model.params || {}), ...requestParameters };
|
|
83
85
|
const params = {};
|
|
84
86
|
const headers = this.model.headers || {};
|
|
85
87
|
|
|
86
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
88
|
+
return this.executeRequest(url, data, params, headers, prompt, requestId);
|
|
87
89
|
}
|
|
88
90
|
|
|
89
91
|
// Parse the response from the OpenAI Completion API
|
|
@@ -105,8 +107,7 @@ class OpenAICompletionPlugin extends ModelPlugin {
|
|
|
105
107
|
|
|
106
108
|
// Override the logging function to log the prompt and response
|
|
107
109
|
logRequestData(data, responseData, prompt) {
|
|
108
|
-
|
|
109
|
-
console.log(separator);
|
|
110
|
+
this.logAIRequestFinished();
|
|
110
111
|
|
|
111
112
|
const stream = data.stream;
|
|
112
113
|
const modelInput = data.prompt;
|
|
@@ -137,9 +137,10 @@ class PalmChatPlugin extends ModelPlugin {
|
|
|
137
137
|
}
|
|
138
138
|
|
|
139
139
|
// Execute the request to the PaLM Chat API
|
|
140
|
-
async execute(text, parameters, prompt) {
|
|
140
|
+
async execute(text, parameters, prompt, pathwayResolver) {
|
|
141
141
|
const url = this.requestUrl(text);
|
|
142
142
|
const requestParameters = this.getRequestParameters(text, parameters, prompt);
|
|
143
|
+
const requestId = pathwayResolver?.requestId;
|
|
143
144
|
|
|
144
145
|
const data = { ...(this.model.params || {}), ...requestParameters };
|
|
145
146
|
const params = {};
|
|
@@ -147,7 +148,7 @@ class PalmChatPlugin extends ModelPlugin {
|
|
|
147
148
|
const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
|
|
148
149
|
const authToken = await gcpAuthTokenHelper.getAccessToken();
|
|
149
150
|
headers.Authorization = `Bearer ${authToken}`;
|
|
150
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
151
|
+
return this.executeRequest(url, data, params, headers, prompt, requestId);
|
|
151
152
|
}
|
|
152
153
|
|
|
153
154
|
// Parse the response from the PaLM Chat API
|
|
@@ -181,8 +182,7 @@ class PalmChatPlugin extends ModelPlugin {
|
|
|
181
182
|
|
|
182
183
|
// Override the logging function to display the messages and responses
|
|
183
184
|
logRequestData(data, responseData, prompt) {
|
|
184
|
-
|
|
185
|
-
console.log(separator);
|
|
185
|
+
this.logAIRequestFinished();
|
|
186
186
|
|
|
187
187
|
const instances = data && data.instances;
|
|
188
188
|
const messages = instances && instances[0] && instances[0].messages;
|
|
@@ -55,6 +55,7 @@ class PalmCompletionPlugin extends ModelPlugin {
|
|
|
55
55
|
async execute(text, parameters, prompt, pathwayResolver) {
|
|
56
56
|
const url = this.requestUrl(text);
|
|
57
57
|
const requestParameters = this.getRequestParameters(text, parameters, prompt, pathwayResolver);
|
|
58
|
+
const requestId = pathwayResolver?.requestId;
|
|
58
59
|
|
|
59
60
|
const data = { ...requestParameters };
|
|
60
61
|
const params = {};
|
|
@@ -62,7 +63,7 @@ class PalmCompletionPlugin extends ModelPlugin {
|
|
|
62
63
|
const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
|
|
63
64
|
const authToken = await gcpAuthTokenHelper.getAccessToken();
|
|
64
65
|
headers.Authorization = `Bearer ${authToken}`;
|
|
65
|
-
return this.executeRequest(url, data, params, headers, prompt);
|
|
66
|
+
return this.executeRequest(url, data, params, headers, prompt, requestId);
|
|
66
67
|
}
|
|
67
68
|
|
|
68
69
|
// Parse the response from the PaLM API Text Completion API
|
|
@@ -105,8 +106,7 @@ class PalmCompletionPlugin extends ModelPlugin {
|
|
|
105
106
|
|
|
106
107
|
// Override the logging function to log the prompt and response
|
|
107
108
|
logRequestData(data, responseData, prompt) {
|
|
108
|
-
|
|
109
|
-
console.log(separator);
|
|
109
|
+
this.logAIRequestFinished();
|
|
110
110
|
|
|
111
111
|
const safetyAttributes = this.getSafetyAttributes(responseData);
|
|
112
112
|
|
package/server/rest.js
CHANGED
|
@@ -8,11 +8,10 @@ const DEFAULT_MAX_TOKENS = 4096;
|
|
|
8
8
|
const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
|
|
9
9
|
|
|
10
10
|
// Mock configuration and pathway objects
|
|
11
|
-
const config =
|
|
12
|
-
const pathway = mockPathwayString;
|
|
11
|
+
const { config, pathway, modelName, model } = mockPathwayResolverString;
|
|
13
12
|
|
|
14
13
|
test('ModelPlugin constructor', (t) => {
|
|
15
|
-
const modelPlugin = new ModelPlugin(
|
|
14
|
+
const modelPlugin = new ModelPlugin(config, pathway, modelName, model);
|
|
16
15
|
|
|
17
16
|
t.is(modelPlugin.modelName, pathway.model, 'modelName should be set from pathway');
|
|
18
17
|
t.deepEqual(modelPlugin.model, config.get('models')[pathway.model], 'model should be set from config');
|
|
@@ -21,7 +20,7 @@ test('ModelPlugin constructor', (t) => {
|
|
|
21
20
|
});
|
|
22
21
|
|
|
23
22
|
test.beforeEach((t) => {
|
|
24
|
-
t.context.modelPlugin = new ModelPlugin(
|
|
23
|
+
t.context.modelPlugin = new ModelPlugin(config, pathway, modelName, model);
|
|
25
24
|
});
|
|
26
25
|
|
|
27
26
|
test('getCompiledPrompt - text and parameters', (t) => {
|
|
@@ -2,16 +2,18 @@ import test from 'ava';
|
|
|
2
2
|
import OpenAIChatPlugin from '../server/plugins/openAiChatPlugin.js';
|
|
3
3
|
import { mockPathwayResolverMessages } from './mocks.js';
|
|
4
4
|
|
|
5
|
+
const { config, pathway, modelName, model } = mockPathwayResolverMessages;
|
|
6
|
+
|
|
5
7
|
// Test the constructor
|
|
6
8
|
test('constructor', (t) => {
|
|
7
|
-
const plugin = new OpenAIChatPlugin(
|
|
9
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
8
10
|
t.is(plugin.config, mockPathwayResolverMessages.config);
|
|
9
11
|
t.is(plugin.pathwayPrompt, mockPathwayResolverMessages.pathway.prompt);
|
|
10
12
|
});
|
|
11
13
|
|
|
12
14
|
// Test the convertPalmToOpenAIMessages function
|
|
13
15
|
test('convertPalmToOpenAIMessages', (t) => {
|
|
14
|
-
const plugin = new OpenAIChatPlugin(
|
|
16
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
15
17
|
const context = 'This is a test context.';
|
|
16
18
|
const examples = [
|
|
17
19
|
{
|
|
@@ -35,7 +37,7 @@ test('convertPalmToOpenAIMessages', (t) => {
|
|
|
35
37
|
|
|
36
38
|
// Test the getRequestParameters function
|
|
37
39
|
test('getRequestParameters', async (t) => {
|
|
38
|
-
const plugin = new OpenAIChatPlugin(
|
|
40
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
39
41
|
const text = 'Help me';
|
|
40
42
|
const parameters = { name: 'John', age: 30 };
|
|
41
43
|
const prompt = mockPathwayResolverMessages.pathway.prompt;
|
|
@@ -57,7 +59,7 @@ test('getRequestParameters', async (t) => {
|
|
|
57
59
|
|
|
58
60
|
// Test the execute function
|
|
59
61
|
test('execute', async (t) => {
|
|
60
|
-
const plugin = new OpenAIChatPlugin(
|
|
62
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
61
63
|
const text = 'Help me';
|
|
62
64
|
const parameters = { name: 'John', age: 30 };
|
|
63
65
|
const prompt = mockPathwayResolverMessages.pathway.prompt;
|
|
@@ -89,7 +91,7 @@ test('execute', async (t) => {
|
|
|
89
91
|
|
|
90
92
|
// Test the parseResponse function
|
|
91
93
|
test('parseResponse', (t) => {
|
|
92
|
-
const plugin = new OpenAIChatPlugin(
|
|
94
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
93
95
|
const data = {
|
|
94
96
|
choices: [
|
|
95
97
|
{
|
|
@@ -105,7 +107,7 @@ test('parseResponse', (t) => {
|
|
|
105
107
|
|
|
106
108
|
// Test the logRequestData function
|
|
107
109
|
test('logRequestData', (t) => {
|
|
108
|
-
const plugin = new OpenAIChatPlugin(
|
|
110
|
+
const plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
|
|
109
111
|
const data = {
|
|
110
112
|
messages: [
|
|
111
113
|
{ role: 'user', content: 'User: Help me\nAssistant: Please help John who is 30 years old.' },
|
|
@@ -3,8 +3,10 @@ import test from 'ava';
|
|
|
3
3
|
import PalmChatPlugin from '../server/plugins/palmChatPlugin.js';
|
|
4
4
|
import { mockPathwayResolverMessages } from './mocks.js';
|
|
5
5
|
|
|
6
|
+
const { config, pathway, modelName, model } = mockPathwayResolverMessages;
|
|
7
|
+
|
|
6
8
|
test.beforeEach((t) => {
|
|
7
|
-
const palmChatPlugin = new PalmChatPlugin(
|
|
9
|
+
const palmChatPlugin = new PalmChatPlugin(config, pathway, modelName, model);
|
|
8
10
|
t.context = { palmChatPlugin };
|
|
9
11
|
});
|
|
10
12
|
|
|
@@ -4,8 +4,10 @@ import test from 'ava';
|
|
|
4
4
|
import PalmCompletionPlugin from '../server/plugins/palmCompletionPlugin.js';
|
|
5
5
|
import { mockPathwayResolverString } from './mocks.js';
|
|
6
6
|
|
|
7
|
+
const { config, pathway, modelName, model } = mockPathwayResolverString;
|
|
8
|
+
|
|
7
9
|
test.beforeEach((t) => {
|
|
8
|
-
const palmCompletionPlugin = new PalmCompletionPlugin(
|
|
10
|
+
const palmCompletionPlugin = new PalmCompletionPlugin(config, pathway, modelName, model);
|
|
9
11
|
t.context = { palmCompletionPlugin };
|
|
10
12
|
});
|
|
11
13
|
|
|
@@ -4,9 +4,9 @@ import ModelPlugin from '../server/plugins/modelPlugin.js';
|
|
|
4
4
|
import { encode } from 'gpt-3-encoder';
|
|
5
5
|
import { mockPathwayResolverString } from './mocks.js';
|
|
6
6
|
|
|
7
|
-
const { config, pathway } = mockPathwayResolverString;
|
|
7
|
+
const { config, pathway, modelName, model } = mockPathwayResolverString;
|
|
8
8
|
|
|
9
|
-
const modelPlugin = new ModelPlugin(
|
|
9
|
+
const modelPlugin = new ModelPlugin(config, pathway, modelName, model);
|
|
10
10
|
|
|
11
11
|
const generateMessage = (role, content) => ({ role, content });
|
|
12
12
|
|