@aj-archipelago/cortex 1.0.4 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,229 @@
1
+ // palmChatPlugin.js
2
+ import ModelPlugin from './modelPlugin.js';
3
+ import { encode } from 'gpt-3-encoder';
4
+ import HandleBars from '../../lib/handleBars.js';
5
+
6
+ class PalmChatPlugin extends ModelPlugin {
7
+ constructor(config, pathway) {
8
+ super(config, pathway);
9
+ }
10
+
11
+ // Convert to PaLM messages array format if necessary
12
+ convertMessagesToPalm(messages) {
13
+ let context = '';
14
+ let modifiedMessages = [];
15
+ let lastAuthor = '';
16
+
17
+ messages.forEach(message => {
18
+ const { role, author, content } = message;
19
+
20
+ // Extract system messages into the context string
21
+ if (role === 'system') {
22
+ context += (context.length > 0 ? '\n' : '') + content;
23
+ return;
24
+ }
25
+
26
+ // Aggregate consecutive author messages, appending the content
27
+ if ((role === lastAuthor || author === lastAuthor) && modifiedMessages.length > 0) {
28
+ modifiedMessages[modifiedMessages.length - 1].content += '\n' + content;
29
+ }
30
+ // Only push messages with role 'user' or 'assistant' or existing author messages
31
+ else if (role === 'user' || role === 'assistant' || author) {
32
+ modifiedMessages.push({
33
+ author: author || role,
34
+ content,
35
+ });
36
+ lastAuthor = author || role;
37
+ }
38
+ });
39
+
40
+ return {
41
+ modifiedMessages,
42
+ context,
43
+ };
44
+ }
45
+
46
+ // Handlebars compiler for context (PaLM chat specific)
47
+ getCompiledContext(text, parameters, context) {
48
+ const combinedParameters = { ...this.promptParameters, ...parameters };
49
+ return context ? HandleBars.compile(context)({ ...combinedParameters, text}) : '';
50
+ }
51
+
52
+ // Handlebars compiler for examples (PaLM chat specific)
53
+ getCompiledExamples(text, parameters, examples = []) {
54
+ const combinedParameters = { ...this.promptParameters, ...parameters };
55
+
56
+ const compileContent = (content) => {
57
+ const compile = HandleBars.compile(content);
58
+ return compile({ ...combinedParameters, text });
59
+ };
60
+
61
+ const processExample = (example, key) => {
62
+ if (example[key]?.content) {
63
+ return { ...example[key], content: compileContent(example[key].content) };
64
+ }
65
+ return { ...example[key] };
66
+ };
67
+
68
+ return examples.map((example) => ({
69
+ input: example.input ? processExample(example, 'input') : undefined,
70
+ output: example.output ? processExample(example, 'output') : undefined,
71
+ }));
72
+ }
73
+
74
+ // Set up parameters specific to the PaLM Chat API
75
+ getRequestParameters(text, parameters, prompt) {
76
+ const { modelPromptText, modelPromptMessages, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
77
+ const { stream } = parameters;
78
+
79
+ // Define the model's max token length
80
+ const modelTargetTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
81
+
82
+ const palmMessages = this.convertMessagesToPalm(modelPromptMessages || [{ "author": "user", "content": modelPromptText }]);
83
+
84
+ let requestMessages = palmMessages.modifiedMessages;
85
+
86
+ // Check if the token length exceeds the model's max token length
87
+ if (tokenLength > modelTargetTokenLength) {
88
+ // Remove older messages until the token length is within the model's limit
89
+ requestMessages = this.truncateMessagesToTargetLength(requestMessages, modelTargetTokenLength);
90
+ }
91
+
92
+ const context = this.getCompiledContext(text, parameters, prompt.context || palmMessages.context || '');
93
+ const examples = this.getCompiledExamples(text, parameters, prompt.examples || []);
94
+
95
+ // For PaLM right now, the max return tokens is 1024, regardless of the max context length
96
+ // I can't think of a time you'd want to constrain it to fewer at the moment.
97
+ const max_tokens = 1024//this.getModelMaxTokenLength() - tokenLength;
98
+
99
+ if (max_tokens < 0) {
100
+ throw new Error(`Prompt is too long to successfully call the model at ${tokenLength} tokens. The model will not be called.`);
101
+ }
102
+
103
+ // Ensure there are an even number of messages (PaLM requires an even number of messages)
104
+ if (requestMessages.length % 2 === 0) {
105
+ requestMessages = requestMessages.slice(1);
106
+ }
107
+
108
+ const requestParameters = {
109
+ instances: [{
110
+ context: context,
111
+ examples: examples,
112
+ messages: requestMessages,
113
+ }],
114
+ parameters: {
115
+ temperature: this.temperature ?? 0.7,
116
+ maxOutputTokens: max_tokens,
117
+ topP: parameters.topP ?? 0.95,
118
+ topK: parameters.topK ?? 40,
119
+ }
120
+ };
121
+
122
+ return requestParameters;
123
+ }
124
+
125
+ // Get the safetyAttributes from the PaLM Chat API response data
126
+ getSafetyAttributes(data) {
127
+ const { predictions } = data;
128
+ if (!predictions || !predictions.length) {
129
+ return null;
130
+ }
131
+
132
+ // if we got a predictions array back with more than one prediction, return the safetyAttributes of the first prediction
133
+ if (predictions.length > 1) {
134
+ return predictions[0].safetyAttributes ?? null;
135
+ }
136
+
137
+ // otherwise, return the safetyAttributes of the content of the first prediction
138
+ return predictions[0].safetyAttributes ?? null;
139
+ }
140
+
141
+ // Execute the request to the PaLM Chat API
142
+ async execute(text, parameters, prompt) {
143
+ const url = this.requestUrl(text);
144
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
145
+
146
+ const data = { ...(this.model.params || {}), ...requestParameters };
147
+ const params = {};
148
+ const headers = this.model.headers || {};
149
+ const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
150
+ const authToken = await gcpAuthTokenHelper.getAccessToken();
151
+ headers.Authorization = `Bearer ${authToken}`;
152
+ return this.executeRequest(url, data, params, headers, prompt);
153
+ }
154
+
155
+ // Parse the response from the PaLM Chat API
156
+ parseResponse(data) {
157
+ const { predictions } = data;
158
+ if (!predictions || !predictions.length) {
159
+ return null;
160
+ }
161
+
162
+ // Get the candidates array from the first prediction
163
+ const { candidates } = predictions[0];
164
+
165
+ // if it was blocked, return the blocked message
166
+ if (predictions[0].safetyAttributes?.blocked) {
167
+ return 'The response is blocked because the input or response potentially violates Google policies. Try rephrasing the prompt or adjusting the parameter settings. Currently, only English is supported.';
168
+ }
169
+
170
+ if (!candidates || !candidates.length) {
171
+ return null;
172
+ }
173
+
174
+ // If we got a candidates array back with more than one candidate, return the whole array
175
+ if (candidates.length > 1) {
176
+ return candidates;
177
+ }
178
+
179
+ // Otherwise, return the content of the first candidate
180
+ const messageResult = candidates[0].content && candidates[0].content.trim();
181
+ return messageResult ?? null;
182
+ }
183
+
184
+ // Override the logging function to display the messages and responses
185
+ logRequestData(data, responseData, prompt) {
186
+ const separator = `\n=== ${this.pathwayName}.${this.requestCount++} ===\n`;
187
+ console.log(separator);
188
+
189
+ const instances = data && data.instances;
190
+ const messages = instances && instances[0] && instances[0].messages;
191
+ const { context, examples } = instances && instances [0] || {};
192
+
193
+ if (context) {
194
+ console.log(`\x1b[36mContext: ${context}\x1b[0m`);
195
+ }
196
+
197
+ if (examples && examples.length) {
198
+ examples.forEach((example, index) => {
199
+ console.log(`\x1b[36mExample ${index + 1}: Input: "${example.input.content}", Output: "${example.output.content}"\x1b[0m`);
200
+ });
201
+ }
202
+
203
+ if (messages && messages.length > 1) {
204
+ messages.forEach((message, index) => {
205
+ const words = message.content.split(" ");
206
+ const tokenCount = encode(message.content).length;
207
+ const preview = words.length < 41 ? message.content : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
208
+
209
+ console.log(`\x1b[36mMessage ${index + 1}: Author: ${message.author}, Tokens: ${tokenCount}, Content: "${preview}"\x1b[0m`);
210
+ });
211
+ } else if (messages && messages.length === 1) {
212
+ console.log(`\x1b[36m${messages[0].content}\x1b[0m`);
213
+ }
214
+
215
+ const safetyAttributes = this.getSafetyAttributes(responseData);
216
+
217
+ console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
218
+
219
+ if (safetyAttributes) {
220
+ console.log(`\x1b[33mSafety Attributes: ${JSON.stringify(safetyAttributes, null, 2)}\x1b[0m`);
221
+ }
222
+
223
+ if (prompt && prompt.debugInfo) {
224
+ prompt.debugInfo += `${separator}${JSON.stringify(data)}`;
225
+ }
226
+ }
227
+ }
228
+
229
+ export default PalmChatPlugin;
@@ -0,0 +1,134 @@
1
+ // palmCompletionPlugin.js
2
+
3
+ import ModelPlugin from './modelPlugin.js';
4
+
5
+ // Helper function to truncate the prompt if it is too long
6
+ const truncatePromptIfNecessary = (text, textTokenCount, modelMaxTokenCount, targetTextTokenCount, pathwayResolver) => {
7
+ const maxAllowedTokens = textTokenCount + ((modelMaxTokenCount - targetTextTokenCount) * 0.5);
8
+
9
+ if (textTokenCount > maxAllowedTokens) {
10
+ pathwayResolver.logWarning(`Prompt is too long at ${textTokenCount} tokens (this target token length for this pathway is ${targetTextTokenCount} tokens because the response is expected to take up the rest of the model's max tokens (${modelMaxTokenCount}). Prompt will be truncated.`);
11
+ return pathwayResolver.truncate(text, maxAllowedTokens);
12
+ }
13
+ return text;
14
+ }
15
+
16
+ // PalmCompletionPlugin class for handling requests and responses to the PaLM API Text Completion API
17
+ class PalmCompletionPlugin extends ModelPlugin {
18
+ constructor(config, pathway) {
19
+ super(config, pathway);
20
+ }
21
+
22
+ // Set up parameters specific to the PaLM API Text Completion API
23
+ getRequestParameters(text, parameters, prompt, pathwayResolver) {
24
+ const { modelPromptText, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
25
+ const { stream } = parameters;
26
+ // Define the model's max token length
27
+ const modelTargetTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
28
+
29
+ const truncatedPrompt = truncatePromptIfNecessary(modelPromptText, tokenLength, this.getModelMaxTokenLength(), modelTargetTokenLength, pathwayResolver);
30
+
31
+ const max_tokens = 1024//this.getModelMaxTokenLength() - tokenLength;
32
+
33
+ if (max_tokens < 0) {
34
+ throw new Error(`Prompt is too long to successfully call the model at ${tokenLength} tokens. The model will not be called.`);
35
+ }
36
+
37
+ if (!truncatedPrompt) {
38
+ throw new Error(`Prompt is empty. The model will not be called.`);
39
+ }
40
+
41
+ const requestParameters = {
42
+ instances: [
43
+ { prompt: truncatedPrompt }
44
+ ],
45
+ parameters: {
46
+ temperature: this.temperature ?? 0.7,
47
+ maxOutputTokens: max_tokens,
48
+ topP: parameters.topP ?? 0.95,
49
+ topK: parameters.topK ?? 40,
50
+ }
51
+ };
52
+
53
+ return requestParameters;
54
+ }
55
+
56
+ // Execute the request to the PaLM API Text Completion API
57
+ async execute(text, parameters, prompt, pathwayResolver) {
58
+ const url = this.requestUrl(text);
59
+ const requestParameters = this.getRequestParameters(text, parameters, prompt, pathwayResolver);
60
+
61
+ const data = { ...requestParameters };
62
+ const params = {};
63
+ const headers = this.model.headers || {};
64
+ const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
65
+ const authToken = await gcpAuthTokenHelper.getAccessToken();
66
+ headers.Authorization = `Bearer ${authToken}`;
67
+ return this.executeRequest(url, data, params, headers, prompt);
68
+ }
69
+
70
+ // Parse the response from the PaLM API Text Completion API
71
+ parseResponse(data) {
72
+ const { predictions } = data;
73
+ if (!predictions || !predictions.length) {
74
+ return data;
75
+ }
76
+
77
+ // if we got a predictions array back with more than one prediction, return the whole array
78
+ if (predictions.length > 1) {
79
+ return predictions;
80
+ }
81
+
82
+ // otherwise, return the content of the first prediction
83
+ // if it was blocked, return the blocked message
84
+ if (predictions[0].safetyAttributes?.blocked) {
85
+ return 'The response is blocked because the input or response potentially violates Google policies. Try rephrasing the prompt or adjusting the parameter settings. Currently, only English is supported.';
86
+ }
87
+
88
+ const contentResult = predictions[0].content && predictions[0].content.trim();
89
+ return contentResult ?? null;
90
+ }
91
+
92
+ // Get the safetyAttributes from the PaLM API Text Completion API response data
93
+ getSafetyAttributes(data) {
94
+ const { predictions } = data;
95
+ if (!predictions || !predictions.length) {
96
+ return null;
97
+ }
98
+
99
+ // if we got a predictions array back with more than one prediction, return the safetyAttributes of the first prediction
100
+ if (predictions.length > 1) {
101
+ return predictions[0].safetyAttributes ?? null;
102
+ }
103
+
104
+ // otherwise, return the safetyAttributes of the content of the first prediction
105
+ return predictions[0].safetyAttributes ?? null;
106
+ }
107
+
108
+ // Override the logging function to log the prompt and response
109
+ logRequestData(data, responseData, prompt) {
110
+ const separator = `\n=== ${this.pathwayName}.${this.requestCount++} ===\n`;
111
+ console.log(separator);
112
+
113
+ const safetyAttributes = this.getSafetyAttributes(responseData);
114
+
115
+ const instances = data && data.instances;
116
+ const modelInput = instances && instances[0] && instances[0].prompt;
117
+
118
+ if (modelInput) {
119
+ console.log(`\x1b[36m${modelInput}\x1b[0m`);
120
+ }
121
+
122
+ console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
123
+
124
+ if (safetyAttributes) {
125
+ console.log(`\x1b[33mSafety Attributes: ${JSON.stringify(safetyAttributes, null, 2)}\x1b[0m`);
126
+ }
127
+
128
+ if (prompt && prompt.debugInfo) {
129
+ prompt.debugInfo += `${separator}${JSON.stringify(data)}`;
130
+ }
131
+ }
132
+ }
133
+
134
+ export default PalmCompletionPlugin;
package/graphql/prompt.js CHANGED
@@ -3,15 +3,21 @@ class Prompt {
3
3
  if (typeof params === 'string' || params instanceof String) {
4
4
  this.prompt = params;
5
5
  } else {
6
- const { prompt, saveResultTo, messages } = params;
6
+ const { prompt, saveResultTo, messages, context, examples } = params;
7
7
  this.prompt = prompt;
8
8
  this.saveResultTo = saveResultTo;
9
9
  this.messages = messages;
10
+ this.context = context;
11
+ this.examples = examples;
10
12
  this.params = params;
11
13
  }
12
14
 
13
- this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages);
14
- this.usesPreviousResult = promptContains('previousResult', this.prompt ? this.prompt : this.messages);
15
+ this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages) ||
16
+ (this.context && promptContains('text', this.context)) ||
17
+ (this.examples && promptContains('text', this.examples));
18
+ this.usesPreviousResult = promptContains('previousResult', this.prompt ? this.prompt : this.messages) ||
19
+ (this.context && promptContains('previousResult', this.context)) ||
20
+ (this.examples && promptContains('previousResult', this.examples));
15
21
  this.debugInfo = '';
16
22
  }
17
23
  }
@@ -23,7 +29,8 @@ function promptContains(variable, prompt) {
23
29
  let matches = [];
24
30
  let match;
25
31
 
26
- // if it's an array, it's the messages format
32
+ // if it's an array, it's either an OpenAI messages array or a PaLM messages
33
+ // array or a PaLM examples array, all of which have a content property
27
34
  if (Array.isArray(prompt)) {
28
35
  prompt.forEach(p => {
29
36
  // eslint-disable-next-line no-cond-assign
@@ -0,0 +1,20 @@
1
+ FROM node:18-alpine
2
+
3
+ WORKDIR /usr/src/app
4
+
5
+ COPY package*.json ./
6
+
7
+ RUN npm install
8
+
9
+ ## following 3 lines are for installing ffmepg
10
+ RUN apk update
11
+ RUN apk add
12
+ RUN apk add ffmpeg
13
+
14
+ COPY . .
15
+
16
+ EXPOSE 7071
17
+
18
+ # RUN npm run build
19
+
20
+ CMD [ "node", "start.js" ]
@@ -18,7 +18,7 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "git+ssh://git@github.com:khlevon/node-ytdl-core.git#v4.11.3-patch.1"
21
+ "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
22
22
  }
23
23
  },
24
24
  "node_modules/@azure/abort-controller": {
@@ -45,9 +45,9 @@
45
45
  }
46
46
  },
47
47
  "node_modules/@azure/core-http": {
48
- "version": "3.0.0",
49
- "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-3.0.0.tgz",
50
- "integrity": "sha512-BxI2SlGFPPz6J1XyZNIVUf0QZLBKFX+ViFjKOkzqD18J1zOINIQ8JSBKKr+i+v8+MB6LacL6Nn/sP/TE13+s2Q==",
48
+ "version": "3.0.1",
49
+ "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-3.0.1.tgz",
50
+ "integrity": "sha512-A3x+um3cAPgQe42Lu7Iv/x8/fNjhL/nIoEfqFxfn30EyxK6zC13n+OUxzZBRC0IzQqssqIbt4INf5YG7lYYFtw==",
51
51
  "dependencies": {
52
52
  "@azure/abort-controller": "^1.0.0",
53
53
  "@azure/core-auth": "^1.3.0",
@@ -62,7 +62,7 @@
62
62
  "tslib": "^2.2.0",
63
63
  "tunnel": "^0.0.6",
64
64
  "uuid": "^8.3.0",
65
- "xml2js": "^0.4.19"
65
+ "xml2js": "^0.5.0"
66
66
  },
67
67
  "engines": {
68
68
  "node": ">=14.0.0"
@@ -1720,9 +1720,9 @@
1720
1720
  }
1721
1721
  },
1722
1722
  "node_modules/xml2js": {
1723
- "version": "0.4.23",
1724
- "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
1725
- "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
1723
+ "version": "0.5.0",
1724
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.5.0.tgz",
1725
+ "integrity": "sha512-drPFnkQJik/O+uPKpqSgr22mpuFHqKdbS835iAQrUC73L2F5WkboIRd63ai/2Yg6I1jzifPFKH2NTK+cfglkIA==",
1726
1726
  "dependencies": {
1727
1727
  "sax": ">=0.6.0",
1728
1728
  "xmlbuilder": "~11.0.0"
@@ -1741,7 +1741,7 @@
1741
1741
  },
1742
1742
  "node_modules/ytdl-core": {
1743
1743
  "version": "0.0.0-development",
1744
- "resolved": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#586971bd9aeda1cbb4600851cfef82a809833ac2",
1744
+ "resolved": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
1745
1745
  "license": "MIT",
1746
1746
  "dependencies": {
1747
1747
  "m3u8stream": "^0.8.6",
@@ -1772,9 +1772,9 @@
1772
1772
  }
1773
1773
  },
1774
1774
  "@azure/core-http": {
1775
- "version": "3.0.0",
1776
- "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-3.0.0.tgz",
1777
- "integrity": "sha512-BxI2SlGFPPz6J1XyZNIVUf0QZLBKFX+ViFjKOkzqD18J1zOINIQ8JSBKKr+i+v8+MB6LacL6Nn/sP/TE13+s2Q==",
1775
+ "version": "3.0.1",
1776
+ "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-3.0.1.tgz",
1777
+ "integrity": "sha512-A3x+um3cAPgQe42Lu7Iv/x8/fNjhL/nIoEfqFxfn30EyxK6zC13n+OUxzZBRC0IzQqssqIbt4INf5YG7lYYFtw==",
1778
1778
  "requires": {
1779
1779
  "@azure/abort-controller": "^1.0.0",
1780
1780
  "@azure/core-auth": "^1.3.0",
@@ -1789,7 +1789,7 @@
1789
1789
  "tslib": "^2.2.0",
1790
1790
  "tunnel": "^0.0.6",
1791
1791
  "uuid": "^8.3.0",
1792
- "xml2js": "^0.4.19"
1792
+ "xml2js": "^0.5.0"
1793
1793
  },
1794
1794
  "dependencies": {
1795
1795
  "uuid": {
@@ -2948,9 +2948,9 @@
2948
2948
  }
2949
2949
  },
2950
2950
  "xml2js": {
2951
- "version": "0.4.23",
2952
- "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
2953
- "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
2951
+ "version": "0.5.0",
2952
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.5.0.tgz",
2953
+ "integrity": "sha512-drPFnkQJik/O+uPKpqSgr22mpuFHqKdbS835iAQrUC73L2F5WkboIRd63ai/2Yg6I1jzifPFKH2NTK+cfglkIA==",
2954
2954
  "requires": {
2955
2955
  "sax": ">=0.6.0",
2956
2956
  "xmlbuilder": "~11.0.0"
@@ -2962,8 +2962,8 @@
2962
2962
  "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="
2963
2963
  },
2964
2964
  "ytdl-core": {
2965
- "version": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#586971bd9aeda1cbb4600851cfef82a809833ac2",
2966
- "from": "ytdl-core@git+ssh://git@github.com:khlevon/node-ytdl-core.git#v4.11.3-patch.1",
2965
+ "version": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
2966
+ "from": "ytdl-core@github:khlevon/node-ytdl-core#v4.11.4-patch.2",
2967
2967
  "requires": {
2968
2968
  "m3u8stream": "^0.8.6",
2969
2969
  "miniget": "^4.2.2",
@@ -18,6 +18,6 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "git+ssh://git@github.com:khlevon/node-ytdl-core.git#v4.11.3-patch.1"
21
+ "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
22
22
  }
23
23
  }
@@ -0,0 +1,37 @@
1
+ import { GoogleAuth } from 'google-auth-library';
2
+
3
+ class GcpAuthTokenHelper {
4
+ constructor(config) {
5
+ const creds = config.gcpServiceAccountKey ? JSON.parse(config.gcpServiceAccountKey) : null;
6
+ if (!creds) {
7
+ throw new Error('GCP_SERVICE_ACCOUNT_KEY is missing or undefined');
8
+ }
9
+ this.authClient = new GoogleAuth({
10
+ credentials: creds,
11
+ scopes: ['https://www.googleapis.com/auth/cloud-platform'],
12
+ });
13
+ this.token = null;
14
+ this.expiry = null;
15
+ }
16
+
17
+ async getAccessToken() {
18
+ if (!this.token || !this.isTokenValid()) {
19
+ await this.refreshToken();
20
+ }
21
+ return this.token;
22
+ }
23
+
24
+ isTokenValid() {
25
+ // Check if token is still valid with a 5-minute buffer
26
+ return this.expiry && Date.now() < this.expiry.getTime() - 5 * 60 * 1000;
27
+ }
28
+
29
+ async refreshToken() {
30
+ const authClient = await this.authClient.getClient();
31
+ const accessTokenResponse = await authClient.getAccessToken();
32
+ this.token = accessTokenResponse.token;
33
+ this.expiry = new Date(accessTokenResponse.expirationTime);
34
+ }
35
+ }
36
+
37
+ export default GcpAuthTokenHelper;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.0.4",
3
+ "version": "1.0.5",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -43,6 +43,7 @@
43
43
  "convict": "^6.2.3",
44
44
  "express": "^4.18.2",
45
45
  "form-data": "^4.0.0",
46
+ "google-auth-library": "^8.8.0",
46
47
  "gpt-3-encoder": "^1.1.4",
47
48
  "graphql": "^16.6.0",
48
49
  "graphql-subscriptions": "^2.0.0",
@@ -51,6 +52,7 @@
51
52
  "ioredis": "^5.3.1",
52
53
  "keyv": "^4.5.2",
53
54
  "langchain": "^0.0.47",
55
+ "subsrt": "^1.1.1",
54
56
  "uuid": "^9.0.0",
55
57
  "ws": "^8.12.0"
56
58
  },
@@ -0,0 +1,17 @@
1
+ //completions.js
2
+
3
+ import { Prompt } from '../graphql/prompt.js';
4
+
5
+ export default {
6
+ prompt:
7
+ [
8
+ new Prompt({ messages: [
9
+ "{{messages}}",
10
+ ]}),
11
+ ],
12
+ inputParameters: {
13
+ messages: [],
14
+ },
15
+ model: 'palm-chat',
16
+ useInputChunking: false,
17
+ }
package/pathways/index.js CHANGED
@@ -3,10 +3,11 @@ import chat from './chat.js';
3
3
  import bias from './bias.js';
4
4
  import complete from './complete.js';
5
5
  import entities from './entities.js';
6
- import lc_test from './lc_test.mjs';
7
6
  import paraphrase from './paraphrase.js';
8
7
  import sentiment from './sentiment.js';
9
8
  import summary from './summary.js';
9
+ import test_langchain from './test_langchain.mjs';
10
+ import test_palm_chat from './test_palm_chat.js';
10
11
  import transcribe from './transcribe.js';
11
12
  import translate from './translate.js';
12
13
 
@@ -16,10 +17,11 @@ export {
16
17
  bias,
17
18
  complete,
18
19
  entities,
19
- lc_test,
20
20
  paraphrase,
21
21
  sentiment,
22
22
  summary,
23
+ test_langchain,
24
+ test_palm_chat,
23
25
  transcribe,
24
26
  translate
25
27
  };
@@ -1,4 +1,4 @@
1
- // lc_test.js
1
+ // test_langchain.mjs
2
2
  // LangChain Cortex integration test
3
3
 
4
4
  // Import required modules
@@ -0,0 +1,18 @@
1
+ //test_oai_chat.js
2
+
3
+ import { Prompt } from '../graphql/prompt.js';
4
+
5
+ // Description: Have a chat with a bot that uses context to understand the conversation
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'azure-td3',
17
+ useInputChunking: false,
18
+ }
@@ -0,0 +1,13 @@
1
+ //test_oai_cmpl.js
2
+
3
+ import { Prompt } from '../graphql/prompt.js';
4
+
5
+ // Description: Have a chat with a bot that uses context to understand the conversation
6
+ export default {
7
+ prompt: `{{prompt}}`,
8
+ inputParameters: {
9
+ prompt: '',
10
+ },
11
+ model: 'azure-td3',
12
+ useInputChunking: false,
13
+ }