@aj-archipelago/cortex 1.0.10 → 1.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -151,7 +151,7 @@ if (configFile && fs.existsSync(configFile)) {
151
151
  const openaiApiKey = config.get('openaiApiKey');
152
152
  if (!openaiApiKey) {
153
153
  throw console.log('No config file or api key specified. Please set the OPENAI_API_KEY to use OAI or use CORTEX_CONFIG_FILE environment variable to point at the Cortex configuration for your project.');
154
- }else {
154
+ } else {
155
155
  console.log(`Using default model with OPENAI_API_KEY environment variable`)
156
156
  }
157
157
  }
@@ -202,7 +202,7 @@ const buildPathways = async (config) => {
202
202
  const buildModels = (config) => {
203
203
  const { models } = config.getProperties();
204
204
 
205
- for (const [key, model] of Object.entries(models)) {
205
+ for (const [key, model] of Object.entries(models)) {
206
206
  // Compile handlebars templates for models
207
207
  models[key] = JSON.parse(HandleBars.compile(JSON.stringify(model))({ ...config.getEnv(), ...config.getProperties() }))
208
208
  }
@@ -219,8 +219,8 @@ const buildModels = (config) => {
219
219
  // Set default model name to the first model in the config in case no default is specified
220
220
  if (!config.get('defaultModelName')) {
221
221
  console.log('No default model specified, using first model as default.');
222
- config.load({ defaultModelName: Object.keys(config.get('models'))[0] });
223
- }
222
+ config.load({ defaultModelName: Object.keys(config.get('models'))[0] });
223
+ }
224
224
 
225
225
  return models;
226
226
  }
package/lib/request.js CHANGED
@@ -176,14 +176,23 @@ const postRequest = async ({ url, data, params, headers, cache }, model, request
176
176
  }
177
177
  } catch (error) {
178
178
  //console.error(`!!! [${requestId}] failed request with data ${JSON.stringify(data)}: ${error}`);
179
- if (error.response?.status === 429) {
180
- monitors[model].incrementError429Count();
181
- }
182
- console.log(`>>> [${requestId}] retrying request due to ${error.response?.status} response. Retry count: ${i + 1}`);
183
- if (i < MAX_RETRY - 1) {
184
- const backoffTime = 200 * Math.pow(2, i);
185
- const jitter = backoffTime * 0.2 * Math.random();
186
- await new Promise(r => setTimeout(r, backoffTime + jitter));
179
+ if (error.response) {
180
+ const status = error.response.status;
181
+ if ((status === 429) || (status >= 500 && status < 600)) {
182
+ if (status === 429) {
183
+ monitors[model].incrementError429Count();
184
+ }
185
+ console.log(`>>> [${requestId}] retrying request due to ${status} response. Retry count: ${i + 1}`);
186
+ if (i < MAX_RETRY - 1) {
187
+ const backoffTime = 200 * Math.pow(2, i);
188
+ const jitter = backoffTime * 0.2 * Math.random();
189
+ await new Promise(r => setTimeout(r, backoffTime + jitter));
190
+ } else {
191
+ throw error;
192
+ }
193
+ } else {
194
+ throw error;
195
+ }
187
196
  } else {
188
197
  throw error;
189
198
  }
@@ -192,17 +201,22 @@ const postRequest = async ({ url, data, params, headers, cache }, model, request
192
201
  };
193
202
 
194
203
  const request = async (params, model, requestId, pathway) => {
195
- const response = await postRequest(params, model, requestId, pathway);
196
- const { error, data, cached } = response;
197
- if (cached) {
198
- console.info(`<<< [${requestId}] served with cached response.`);
199
- }
200
- if (error && error.length > 0) {
201
- const lastError = error[error.length - 1];
202
- return { error: lastError.toJSON() ?? lastError ?? error };
203
- }
204
- //console.log("<<< [${requestId}] response: ", data.choices[0].delta || data.choices[0])
205
- return data;
204
+ try {
205
+ const response = await postRequest(params, model, requestId, pathway);
206
+ const { error, data, cached } = response;
207
+ if (cached) {
208
+ console.info(`<<< [${requestId}] served with cached response.`);
209
+ }
210
+ if (error && error.length > 0) {
211
+ const lastError = error[error.length - 1];
212
+ return { error: lastError.toJSON() ?? lastError ?? error };
213
+ }
214
+ //console.log("<<< [${requestId}] response: ", data.choices[0].delta || data.choices[0])
215
+ return data;
216
+ } catch (error) {
217
+ console.error(`Error in request: ${error.message || error}`);
218
+ return { error: error };
219
+ }
206
220
  }
207
221
 
208
222
  export {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.0.10",
3
+ "version": "1.0.12",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -21,5 +21,10 @@ export default {
21
21
  truncateFromFront: false, // true or false - if true, truncate from the front of the input instead of the back
22
22
  timeout: 120, // seconds, cancels the pathway after this many seconds
23
23
  duplicateRequestAfter: 10, // seconds, if the request is not completed after this many seconds, a backup request is sent
24
+ // override the default execution of the pathway
25
+ // callback signature: excuteOverride({args: object, runAllPrompts: function})
26
+ // args: the input arguments to the pathway
27
+ // runAllPrompts: a function that runs all prompts in the pathway and returns the result
28
+ executePathway: undefined,
24
29
  };
25
30
 
@@ -21,7 +21,7 @@ export default {
21
21
  const originalTargetLength = args.targetLength;
22
22
 
23
23
  // If targetLength is not provided, execute the prompt once and return the result.
24
- if (originalTargetLength === 0) {
24
+ if (originalTargetLength === 0 || originalTargetLength === null) {
25
25
  let pathwayResolver = new PathwayResolver({ config, pathway, args, requestState });
26
26
  return await pathwayResolver.resolve(args);
27
27
  }
package/server/chunker.js CHANGED
@@ -152,7 +152,7 @@ const getSemanticChunks = (text, chunkSize, inputFormat = 'text') => {
152
152
  }
153
153
  });
154
154
 
155
- if (chunks.some(chunk => encode(chunk).length > chunkSize)) {
155
+ if (chunks.filter(c => determineTextFormat(c) === 'html').some(chunk => encode(chunk).length > chunkSize)) {
156
156
  throw new Error('The HTML contains elements that are larger than the chunk size. Please try again with HTML that has smaller elements.');
157
157
  }
158
158
 
package/server/graphql.js CHANGED
@@ -20,6 +20,7 @@ import { cancelRequestResolver } from './resolver.js';
20
20
  import { buildPathways, buildModels } from '../config.js';
21
21
  import { requestState } from './requestState.js';
22
22
  import { buildRestEndpoints } from './rest.js';
23
+ import { startTestServer } from '../tests/server.js'
23
24
 
24
25
  // Utility functions
25
26
  // Server plugins
@@ -222,7 +223,7 @@ const build = async (config) => {
222
223
  });
223
224
  };
224
225
 
225
- return { server, startServer, cache, plugins, typeDefs, resolvers }
226
+ return { server, startServer, startTestServer, cache, plugins, typeDefs, resolvers }
226
227
  }
227
228
 
228
229
 
@@ -3,6 +3,7 @@ import OpenAIChatPlugin from './plugins/openAiChatPlugin.js';
3
3
  import OpenAICompletionPlugin from './plugins/openAiCompletionPlugin.js';
4
4
  import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
5
5
  import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
6
+ import OpenAIChatExtensionPlugin from './plugins/openAiChatExtensionPlugin.js';
6
7
  import LocalModelPlugin from './plugins/localModelPlugin.js';
7
8
  import PalmChatPlugin from './plugins/palmChatPlugin.js';
8
9
  import PalmCompletionPlugin from './plugins/palmCompletionPlugin.js';
@@ -19,6 +20,9 @@ class PathwayPrompter {
19
20
  case 'OPENAI-CHAT':
20
21
  plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
21
22
  break;
23
+ case 'OPENAI-CHAT-EXTENSION':
24
+ plugin = new OpenAIChatExtensionPlugin(config, pathway, modelName, model);
25
+ break;
22
26
  case 'AZURE-TRANSLATE':
23
27
  plugin = new AzureTranslatePlugin(config, pathway, modelName, model);
24
28
  break;
@@ -9,7 +9,6 @@ import { Prompt } from './prompt.js';
9
9
  import { getv, setv } from '../lib/keyValueStorageClient.js';
10
10
  import { requestState } from './requestState.js';
11
11
  import { callPathway } from '../lib/pathwayTools.js';
12
- import { response } from 'express';
13
12
 
14
13
  class PathwayResolver {
15
14
  constructor({ config, pathway, args }) {
@@ -26,7 +25,7 @@ class PathwayResolver {
26
25
  args?.model,
27
26
  pathway.inputParameters?.model,
28
27
  config.get('defaultModelName')
29
- ].find(modelName => modelName && config.get('models').hasOwnProperty(modelName));
28
+ ].find(modelName => modelName && Object.prototype.hasOwnProperty.call(config.get('models'), modelName));
30
29
  this.model = config.get('models')[this.modelName];
31
30
 
32
31
  if (!this.model) {
@@ -74,7 +73,7 @@ class PathwayResolver {
74
73
  let streamErrorOccurred = false;
75
74
 
76
75
  while (attempt < MAX_RETRY_COUNT) {
77
- const responseData = await this.promptAndParse(args);
76
+ const responseData = await this.executePathway(args);
78
77
 
79
78
  if (args.async || typeof responseData === 'string') {
80
79
  const { completedCount, totalCount } = requestState[this.requestId];
@@ -140,18 +139,20 @@ class PathwayResolver {
140
139
  } catch (error) {
141
140
  console.error('Could not publish the stream message', message, error);
142
141
  }
143
- };
144
- };
142
+ }
143
+ }
145
144
  } catch (error) {
146
145
  console.error('Could not process stream data', error);
147
146
  }
148
147
  };
149
148
 
150
- await new Promise((resolve, reject) => {
151
- incomingMessage.on('data', processData);
152
- incomingMessage.on('end', resolve);
153
- incomingMessage.on('error', reject);
154
- });
149
+ if (incomingMessage) {
150
+ await new Promise((resolve, reject) => {
151
+ incomingMessage.on('data', processData);
152
+ incomingMessage.on('end', resolve);
153
+ incomingMessage.on('error', reject);
154
+ });
155
+ }
155
156
 
156
157
  } catch (error) {
157
158
  console.error('Could not subscribe to stream', error);
@@ -187,6 +188,15 @@ class PathwayResolver {
187
188
  }
188
189
  else {
189
190
  // Syncronously process the request
191
+ return await this.executePathway(args);
192
+ }
193
+ }
194
+
195
+ async executePathway(args) {
196
+ if (this.pathway.executePathway && typeof this.pathway.executePathway === 'function') {
197
+ return await this.pathway.executePathway({ args, runAllPrompts: this.promptAndParse.bind(this) });
198
+ }
199
+ else {
190
200
  return await this.promptAndParse(args);
191
201
  }
192
202
  }
@@ -55,7 +55,7 @@ class AzureTranslatePlugin extends ModelPlugin {
55
55
  console.log(`\x1b[36m${modelInput}\x1b[0m`);
56
56
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
57
57
 
58
- prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
58
+ prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
59
59
  }
60
60
  }
61
61
 
@@ -22,11 +22,11 @@ class ModelPlugin {
22
22
 
23
23
  // Make all of the parameters defined on the pathway itself available to the prompt
24
24
  for (const [k, v] of Object.entries(pathway)) {
25
- this.promptParameters[k] = v.default ?? v;
25
+ this.promptParameters[k] = v?.default ?? v;
26
26
  }
27
27
  if (pathway.inputParameters) {
28
28
  for (const [k, v] of Object.entries(pathway.inputParameters)) {
29
- this.promptParameters[k] = v.default ?? v;
29
+ this.promptParameters[k] = v?.default ?? v;
30
30
  }
31
31
  }
32
32
 
@@ -206,17 +206,17 @@ class ModelPlugin {
206
206
  }
207
207
 
208
208
  // Default response parsing
209
- parseResponse(data) { return data; };
209
+ parseResponse(data) { return data; }
210
210
 
211
211
  // Default simple logging
212
- logRequestStart(url, data) {
212
+ logRequestStart(url, _data) {
213
213
  this.requestCount++;
214
214
  this.lastRequestStartTime = new Date();
215
215
  const logMessage = `>>> [${this.requestId}: ${this.pathwayName}.${this.requestCount}] request`;
216
216
  const header = '>'.repeat(logMessage.length);
217
217
  console.log(`\n${header}\n${logMessage}`);
218
218
  console.log(`>>> Making API request to ${url}`);
219
- };
219
+ }
220
220
 
221
221
  logAIRequestFinished() {
222
222
  const currentTime = new Date();
@@ -224,7 +224,7 @@ class ModelPlugin {
224
224
  const logMessage = `<<< [${this.requestId}: ${this.pathwayName}] response - complete in ${timeElapsed}s - data:`;
225
225
  const header = '<'.repeat(logMessage.length);
226
226
  console.log(`\n${header}\n${logMessage}\n`);
227
- };
227
+ }
228
228
 
229
229
  logRequestData(data, responseData, prompt) {
230
230
  this.logAIRequestFinished();
@@ -236,21 +236,26 @@ class ModelPlugin {
236
236
 
237
237
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
238
238
 
239
- prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
239
+ prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
240
240
  }
241
241
 
242
242
  async executeRequest(url, data, params, headers, prompt, requestId, pathway) {
243
- this.aiRequestStartTime = new Date();
244
- this.requestId = requestId;
245
- this.logRequestStart(url, data);
246
- const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName, this.requestId, pathway);
243
+ try {
244
+ this.aiRequestStartTime = new Date();
245
+ this.requestId = requestId;
246
+ this.logRequestStart(url, data);
247
+ const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName, this.requestId, pathway);
248
+
249
+ if (responseData.error) {
250
+ throw new Error(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
251
+ }
247
252
 
248
- if (responseData.error) {
249
- throw new Error(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
253
+ this.logRequestData(data, responseData, prompt);
254
+ return this.parseResponse(responseData);
255
+ } catch (error) {
256
+ // Log the error and continue
257
+ console.error(error);
250
258
  }
251
-
252
- this.logRequestData(data, responseData, prompt);
253
- return this.parseResponse(responseData);
254
259
  }
255
260
 
256
261
  }
@@ -0,0 +1,58 @@
1
+ // OpenAIChatPlugin.js
2
+ import OpenAIChatPlugin from './openAiChatPlugin.js';
3
+
4
+ class OpenAIChatExtensionPlugin extends OpenAIChatPlugin {
5
+ constructor(config, pathway, modelName, model) {
6
+ super(config, pathway, modelName, model);
7
+ this.tool = '';
8
+ }
9
+
10
+ // Parse the response from the OpenAI Extension API
11
+ parseResponse(data) {
12
+ const { choices } = data;
13
+ if (!choices || !choices.length) {
14
+ return data;
15
+ }
16
+
17
+ // if we got a choices array back with more than one choice, return the whole array
18
+ if (choices.length > 1) {
19
+ return choices;
20
+ }
21
+
22
+ // otherwise, return the first choice messages based on role
23
+ const messageResult = [];
24
+ for(const message of choices[0].messages) {
25
+ if(message.role === "tool"){
26
+ this.tool = message.content;
27
+ }else{
28
+ messageResult.push(message.content);
29
+ }
30
+ }
31
+ return messageResult.join('\n\n') ?? null;
32
+ }
33
+
34
+ // Set up parameters specific to the OpenAI Chat API
35
+ getRequestParameters(text, parameters, prompt) {
36
+ const reqParams = super.getRequestParameters(text, parameters, prompt);
37
+ reqParams.dataSources = this.model.dataSources || reqParams.dataSources || []; // add dataSources to the request parameters
38
+ const {roleInformation, indexName, semanticConfiguration } = parameters; // add roleInformation and indexName to the dataSource if given
39
+ for(const dataSource of reqParams.dataSources) {
40
+ if(!dataSource) continue;
41
+ if(!dataSource.parameters) dataSource.parameters = {};
42
+ roleInformation && (dataSource.parameters.roleInformation = roleInformation);
43
+ indexName && (dataSource.parameters.indexName = indexName);
44
+ semanticConfiguration && (dataSource.parameters.semanticConfiguration = semanticConfiguration);
45
+ dataSource.parameters.queryType = semanticConfiguration ? 'semantic' : 'simple';
46
+ }
47
+ return reqParams;
48
+ }
49
+
50
+ async execute(text, parameters, prompt, pathwayResolver) {
51
+ const result = await super.execute(text, parameters, prompt, pathwayResolver);
52
+ pathwayResolver.tool = this.tool; // add tool info back
53
+ return result;
54
+ }
55
+
56
+ }
57
+
58
+ export default OpenAIChatExtensionPlugin;
@@ -127,7 +127,7 @@ class OpenAIChatPlugin extends ModelPlugin {
127
127
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
128
128
  }
129
129
 
130
- prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
130
+ prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
131
131
  }
132
132
  }
133
133
 
@@ -120,7 +120,7 @@ class OpenAICompletionPlugin extends ModelPlugin {
120
120
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
121
121
  }
122
122
 
123
- prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
123
+ prompt && prompt.debugInfo && (prompt.debugInfo += `\n${JSON.stringify(data)}`);
124
124
  }
125
125
  }
126
126
 
@@ -219,7 +219,7 @@ class PalmChatPlugin extends ModelPlugin {
219
219
  }
220
220
 
221
221
  if (prompt && prompt.debugInfo) {
222
- prompt.debugInfo += `${separator}${JSON.stringify(data)}`;
222
+ prompt.debugInfo += `\n${JSON.stringify(data)}`;
223
223
  }
224
224
  }
225
225
  }
@@ -124,7 +124,7 @@ class PalmCompletionPlugin extends ModelPlugin {
124
124
  }
125
125
 
126
126
  if (prompt && prompt.debugInfo) {
127
- prompt.debugInfo += `${separator}${JSON.stringify(data)}`;
127
+ prompt.debugInfo += `\n${JSON.stringify(data)}`;
128
128
  }
129
129
  }
130
130
  }
@@ -17,12 +17,12 @@ const rootResolver = async (parent, args, contextValue, info) => {
17
17
 
18
18
  // Execute the request with timeout
19
19
  const result = await fulfillWithTimeout(pathway.resolver(parent, args, contextValue, info), pathway.timeout);
20
- const { warnings, previousResult, savedContextId } = pathwayResolver;
20
+ const { warnings, previousResult, savedContextId, tool } = pathwayResolver;
21
21
 
22
22
  // Add request parameters back as debug
23
23
  const debug = pathwayResolver.prompts.map(prompt => prompt.debugInfo || '').join('\n').trim();
24
24
 
25
- return { debug, result, warnings, previousResult, contextId: savedContextId }
25
+ return { debug, result, warnings, previousResult, tool, contextId: savedContextId }
26
26
  }
27
27
 
28
28
  // This resolver is used by the root resolver to process the request
@@ -13,9 +13,8 @@ const subscriptions = {
13
13
  (_, args, __, _info) => {
14
14
  const { requestIds } = args;
15
15
  for (const requestId of requestIds) {
16
- if (!requestState[requestId]) {
17
- console.error(`Subscription requestId: ${requestId} not found`);
18
- } else {
16
+ if (requestState[requestId] && !requestState[requestId].started) {
17
+ requestState[requestId].started = true;
19
18
  console.log(`Subscription starting async requestProgress, requestId: ${requestId}`);
20
19
  const { resolver, args } = requestState[requestId];
21
20
  resolver(args);
package/server/typeDef.js CHANGED
@@ -47,6 +47,7 @@ const typeDef = (pathway) => {
47
47
  previousResult: String
48
48
  warnings: [String]
49
49
  contextId: String
50
+ tool: String
50
51
  }`;
51
52
 
52
53
  const params = { ...defaultInputParameters, ...inputParameters };
@@ -0,0 +1,23 @@
1
+ import 'dotenv/config'
2
+ import { ApolloServer } from '@apollo/server';
3
+ import { config } from '../config.js';
4
+ import typeDefsresolversFactory from '../index.js';
5
+
6
+ let typeDefs;
7
+ let resolvers;
8
+
9
+ const initTypeDefsResolvers = async () => {
10
+ const result = await typeDefsresolversFactory();
11
+ typeDefs = result.typeDefs;
12
+ resolvers = result.resolvers;
13
+ };
14
+
15
+ export const startTestServer = async () => {
16
+ await initTypeDefsResolvers();
17
+
18
+ return new ApolloServer({
19
+ typeDefs,
20
+ resolvers,
21
+ context: () => ({ config, requestState: {} }),
22
+ });
23
+ };