@aj-archipelago/cortex 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -9,42 +9,42 @@ const __dirname = path.dirname(fileURLToPath(import.meta.url));
9
9
 
10
10
  // Schema for config
11
11
  var config = convict({
12
- pathwaysPath: {
12
+ basePathwayPath: {
13
13
  format: String,
14
- default: path.join(process.cwd(), '/pathways'),
15
- env: 'CORTEX_PATHWAYS_PATH'
14
+ default: path.join(__dirname, 'pathways', 'basePathway.js'),
15
+ env: 'CORTEX_BASE_PATHWAY_PATH'
16
16
  },
17
17
  corePathwaysPath: {
18
18
  format: String,
19
19
  default: path.join(__dirname, 'pathways'),
20
20
  env: 'CORTEX_CORE_PATHWAYS_PATH'
21
21
  },
22
- basePathwayPath: {
22
+ cortexApiKey: {
23
23
  format: String,
24
- default: path.join(__dirname, 'pathways', 'basePathway.js'),
25
- env: 'CORTEX_BASE_PATHWAY_PATH'
26
- },
27
- storageConnectionString: {
28
- doc: 'Connection string used for access to Storage',
29
- format: '*',
30
- default: '',
31
- sensitive: true,
32
- env: 'STORAGE_CONNECTION_STRING'
24
+ default: null,
25
+ env: 'CORTEX_API_KEY',
26
+ sensitive: true
33
27
  },
34
- PORT: {
35
- format: 'port',
36
- default: 4000,
37
- env: 'CORTEX_PORT'
28
+ cortexConfigFile: {
29
+ format: String,
30
+ default: null,
31
+ env: 'CORTEX_CONFIG_FILE'
38
32
  },
39
- pathways: {
40
- format: Object,
41
- default: {}
33
+ defaultModelName: {
34
+ format: String,
35
+ default: null,
36
+ env: 'DEFAULT_MODEL_NAME'
42
37
  },
43
38
  enableCache: {
44
39
  format: Boolean,
45
40
  default: true,
46
41
  env: 'CORTEX_ENABLE_CACHE'
47
42
  },
43
+ enableDuplicateRequests: {
44
+ format: Boolean,
45
+ default: true,
46
+ env: 'CORTEX_ENABLE_DUPLICATE_REQUESTS'
47
+ },
48
48
  enableGraphqlCache: {
49
49
  format: Boolean,
50
50
  default: false,
@@ -55,17 +55,12 @@ var config = convict({
55
55
  default: false,
56
56
  env: 'CORTEX_ENABLE_REST'
57
57
  },
58
- cortexApiKey: {
58
+ gcpServiceAccountKey: {
59
59
  format: String,
60
60
  default: null,
61
- env: 'CORTEX_API_KEY',
61
+ env: 'GCP_SERVICE_ACCOUNT_KEY',
62
62
  sensitive: true
63
63
  },
64
- defaultModelName: {
65
- format: String,
66
- default: null,
67
- env: 'DEFAULT_MODEL_NAME'
68
- },
69
64
  models: {
70
65
  format: Object,
71
66
  default: {
@@ -80,7 +75,8 @@ var config = convict({
80
75
  "model": "gpt-3.5-turbo"
81
76
  },
82
77
  "requestsPerSecond": 10,
83
- "maxTokenLength": 8192
78
+ "maxTokenLength": 8192,
79
+ "supportsStreaming": true,
84
80
  },
85
81
  "oai-whisper": {
86
82
  "type": "OPENAI-WHISPER",
@@ -95,11 +91,6 @@ var config = convict({
95
91
  },
96
92
  env: 'CORTEX_MODELS'
97
93
  },
98
- openaiDefaultModel: {
99
- format: String,
100
- default: 'gpt-3.5-turbo',
101
- env: 'OPENAI_DEFAULT_MODEL'
102
- },
103
94
  openaiApiKey: {
104
95
  format: String,
105
96
  default: null,
@@ -111,10 +102,31 @@ var config = convict({
111
102
  default: 'https://api.openai.com/v1/completions',
112
103
  env: 'OPENAI_API_URL'
113
104
  },
114
- cortexConfigFile: {
105
+ openaiDefaultModel: {
115
106
  format: String,
116
- default: null,
117
- env: 'CORTEX_CONFIG_FILE'
107
+ default: 'gpt-3.5-turbo',
108
+ env: 'OPENAI_DEFAULT_MODEL'
109
+ },
110
+ pathways: {
111
+ format: Object,
112
+ default: {}
113
+ },
114
+ pathwaysPath: {
115
+ format: String,
116
+ default: path.join(process.cwd(), '/pathways'),
117
+ env: 'CORTEX_PATHWAYS_PATH'
118
+ },
119
+ PORT: {
120
+ format: 'port',
121
+ default: 4000,
122
+ env: 'CORTEX_PORT'
123
+ },
124
+ storageConnectionString: {
125
+ doc: 'Connection string used for access to Storage',
126
+ format: '*',
127
+ default: '',
128
+ sensitive: true,
129
+ env: 'STORAGE_CONNECTION_STRING'
118
130
  },
119
131
  whisperMediaApiUrl: {
120
132
  format: String,
@@ -126,12 +138,6 @@ var config = convict({
126
138
  default: 'null',
127
139
  env: 'WHISPER_TS_API_URL'
128
140
  },
129
- gcpServiceAccountKey: {
130
- format: String,
131
- default: null,
132
- env: 'GCP_SERVICE_ACCOUNT_KEY',
133
- sensitive: true
134
- },
135
141
  });
136
142
 
137
143
  // Read in environment variables and set up service configuration
@@ -145,7 +151,7 @@ if (configFile && fs.existsSync(configFile)) {
145
151
  const openaiApiKey = config.get('openaiApiKey');
146
152
  if (!openaiApiKey) {
147
153
  throw console.log('No config file or api key specified. Please set the OPENAI_API_KEY to use OAI or use CORTEX_CONFIG_FILE environment variable to point at the Cortex configuration for your project.');
148
- }else {
154
+ } else {
149
155
  console.log(`Using default model with OPENAI_API_KEY environment variable`)
150
156
  }
151
157
  }
@@ -196,7 +202,7 @@ const buildPathways = async (config) => {
196
202
  const buildModels = (config) => {
197
203
  const { models } = config.getProperties();
198
204
 
199
- for (const [key, model] of Object.entries(models)) {
205
+ for (const [key, model] of Object.entries(models)) {
200
206
  // Compile handlebars templates for models
201
207
  models[key] = JSON.parse(HandleBars.compile(JSON.stringify(model))({ ...config.getEnv(), ...config.getProperties() }))
202
208
  }
@@ -213,8 +219,8 @@ const buildModels = (config) => {
213
219
  // Set default model name to the first model in the config in case no default is specified
214
220
  if (!config.get('defaultModelName')) {
215
221
  console.log('No default model specified, using first model as default.');
216
- config.load({ defaultModelName: Object.keys(config.get('models'))[0] });
217
- }
222
+ config.load({ defaultModelName: Object.keys(config.get('models'))[0] });
223
+ }
218
224
 
219
225
  return models;
220
226
  }
@@ -18,7 +18,7 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
21
+ "ytdl-core": "^4.11.5"
22
22
  }
23
23
  },
24
24
  "node_modules/@azure/abort-controller": {
@@ -1740,9 +1740,9 @@
1740
1740
  }
1741
1741
  },
1742
1742
  "node_modules/ytdl-core": {
1743
- "version": "0.0.0-development",
1744
- "resolved": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
1745
- "license": "MIT",
1743
+ "version": "4.11.5",
1744
+ "resolved": "https://registry.npmjs.org/ytdl-core/-/ytdl-core-4.11.5.tgz",
1745
+ "integrity": "sha512-27LwsW4n4nyNviRCO1hmr8Wr5J1wLLMawHCQvH8Fk0hiRqrxuIu028WzbJetiYH28K8XDbeinYW4/wcHQD1EXA==",
1746
1746
  "dependencies": {
1747
1747
  "m3u8stream": "^0.8.6",
1748
1748
  "miniget": "^4.2.2",
@@ -2962,8 +2962,9 @@
2962
2962
  "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="
2963
2963
  },
2964
2964
  "ytdl-core": {
2965
- "version": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
2966
- "from": "ytdl-core@github:khlevon/node-ytdl-core#v4.11.4-patch.2",
2965
+ "version": "4.11.5",
2966
+ "resolved": "https://registry.npmjs.org/ytdl-core/-/ytdl-core-4.11.5.tgz",
2967
+ "integrity": "sha512-27LwsW4n4nyNviRCO1hmr8Wr5J1wLLMawHCQvH8Fk0hiRqrxuIu028WzbJetiYH28K8XDbeinYW4/wcHQD1EXA==",
2967
2968
  "requires": {
2968
2969
  "m3u8stream": "^0.8.6",
2969
2970
  "miniget": "^4.2.2",
@@ -18,6 +18,6 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
21
+ "ytdl-core": "^4.11.5"
22
22
  }
23
23
  }
package/lib/request.js CHANGED
@@ -62,48 +62,163 @@ const postWithMonitor = async (model, url, data, axiosConfigObj) => {
62
62
  return cortexAxios.post(url, data, axiosConfigObj);
63
63
  }
64
64
 
65
- const MAX_RETRY = 10;
66
- const postRequest = async ({ url, data, params, headers, cache }, model) => {
67
- const errors = []
65
+ const MAX_RETRY = 10; // retries for error handling
66
+ const MAX_DUPLICATE_REQUESTS = 3; // duplicate requests to manage latency spikes
67
+ const DUPLICATE_REQUEST_AFTER = 10; // 10 seconds
68
+
69
+ const postRequest = async ({ url, data, params, headers, cache }, model, requestId, pathway) => {
70
+ let promises = [];
68
71
  for (let i = 0; i < MAX_RETRY; i++) {
72
+ const modelProperties = config.get('models')[model];
73
+ const enableDuplicateRequests = pathway.enableDuplicateRequests !== undefined ? pathway.enableDuplicateRequests : config.get('enableDuplicateRequests');
74
+ let maxDuplicateRequests = enableDuplicateRequests ? MAX_DUPLICATE_REQUESTS : 1;
75
+ let duplicateRequestAfter = (pathway.duplicateRequestAfter || DUPLICATE_REQUEST_AFTER) * 1000;
76
+
77
+ if (enableDuplicateRequests) {
78
+ //console.log(`>>> [${requestId}] Duplicate requests enabled after ${duplicateRequestAfter / 1000} seconds`);
79
+ }
80
+
81
+ const axiosConfigObj = { params, headers, cache };
82
+ const streamRequested = (params.stream || data.stream);
83
+ if (streamRequested && modelProperties.supportsStreaming) {
84
+ axiosConfigObj.responseType = 'stream';
85
+ promises.push(limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj)));
86
+ } else {
87
+ if (streamRequested) {
88
+ console.log(`>>> [${requestId}] ${model} does not support streaming - sending non-streaming request`);
89
+ axiosConfigObj.params.stream = false;
90
+ data.stream = false;
91
+ }
92
+ const controllers = Array.from({ length: maxDuplicateRequests }, () => new AbortController());
93
+ promises = controllers.map((controller, index) =>
94
+ new Promise((resolve, reject) => {
95
+ const duplicateRequestTime = duplicateRequestAfter * Math.pow(2, index) - duplicateRequestAfter;
96
+ const jitter = duplicateRequestTime * 0.2 * Math.random();
97
+ const duplicateRequestTimeout = Math.max(0, duplicateRequestTime + jitter);
98
+ setTimeout(async () => {
99
+ try {
100
+ if (!limiters[model]) {
101
+ throw new Error(`No limiter for model ${model}!`);
102
+ }
103
+ const axiosConfigObj = { params, headers, cache };
104
+
105
+ let response = null;
106
+
107
+ if (!controller.signal?.aborted) {
108
+
109
+ axiosConfigObj.signal = controller.signal;
110
+ axiosConfigObj.headers['X-Cortex-Request-Index'] = index;
111
+
112
+ if (index === 0) {
113
+ //console.log(`>>> [${requestId}] sending request to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`);
114
+ } else {
115
+ if (modelProperties.supportsStreaming) {
116
+ axiosConfigObj.responseType = 'stream';
117
+ axiosConfigObj.cache = false;
118
+ }
119
+ const logMessage = `>>> [${requestId}] taking too long - sending duplicate request ${index} to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`;
120
+ const header = '>'.repeat(logMessage.length);
121
+ console.log(`\n${header}\n${logMessage}`);
122
+ }
123
+
124
+ response = await limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj));
125
+
126
+ if (!controller.signal?.aborted) {
127
+
128
+ //console.log(`<<< [${requestId}] received response for request ${index}`);
129
+
130
+ if (axiosConfigObj.responseType === 'stream') {
131
+ // Buffering and collecting the stream data
132
+ console.log(`<<< [${requestId}] buffering streaming response for request ${index}`);
133
+ response = await new Promise((resolve, reject) => {
134
+ let responseData = '';
135
+ response.data.on('data', (chunk) => {
136
+ responseData += chunk;
137
+ //console.log(`<<< [${requestId}] received chunk for request ${index}`);
138
+ });
139
+ response.data.on('end', () => {
140
+ response.data = JSON.parse(responseData);
141
+ resolve(response);
142
+ });
143
+ response.data.on('error', (error) => {
144
+ reject(error);
145
+ });
146
+ });
147
+ }
148
+ }
149
+ }
150
+
151
+ resolve(response);
152
+
153
+ } catch (error) {
154
+ if (error.name === 'AbortError' || error.name === 'CanceledError') {
155
+ //console.log(`XXX [${requestId}] request ${index} was cancelled`);
156
+ reject(error);
157
+ } else {
158
+ console.log(`!!! [${requestId}] request ${index} failed with error: ${error?.response?.data?.error?.message || error}`);
159
+ reject(error);
160
+ }
161
+ } finally {
162
+ controllers.forEach(controller => controller.abort());
163
+ }
164
+ }, duplicateRequestTimeout);
165
+ })
166
+ );
167
+ }
168
+
69
169
  try {
70
- if (i > 0) {
71
- console.log(`Retrying request #retry ${i}: ${JSON.stringify(data)}...`);
72
- await new Promise(r => setTimeout(r, 200 * Math.pow(2, i))); // exponential backoff
73
- }
74
- if (!limiters[model]) {
75
- throw new Error(`No limiter for model ${model}!`);
170
+ const response = await Promise.race(promises);
171
+
172
+ if (response.status === 200) {
173
+ return response;
174
+ } else {
175
+ throw new Error(`Received error response: ${response.status}`);
76
176
  }
77
- const axiosConfigObj = { params, headers, cache };
78
- if (params.stream || data.stream) {
79
- axiosConfigObj.responseType = 'stream';
80
- }
81
- return await limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj));
82
- } catch (e) {
83
- console.error(`Failed request with data ${JSON.stringify(data)}: ${e} - ${e.response?.data?.error?.type || 'error'}: ${e.response?.data?.error?.message}`);
84
- if (e.response?.status && e.response?.status === 429) {
85
- monitors[model].incrementError429Count();
177
+ } catch (error) {
178
+ //console.error(`!!! [${requestId}] failed request with data ${JSON.stringify(data)}: ${error}`);
179
+ if (error.response) {
180
+ const status = error.response.status;
181
+ if ((status === 429) || (status >= 500 && status < 600)) {
182
+ if (status === 429) {
183
+ monitors[model].incrementError429Count();
184
+ }
185
+ console.log(`>>> [${requestId}] retrying request due to ${status} response. Retry count: ${i + 1}`);
186
+ if (i < MAX_RETRY - 1) {
187
+ const backoffTime = 200 * Math.pow(2, i);
188
+ const jitter = backoffTime * 0.2 * Math.random();
189
+ await new Promise(r => setTimeout(r, backoffTime + jitter));
190
+ } else {
191
+ throw error;
192
+ }
193
+ } else {
194
+ throw error;
195
+ }
196
+ } else {
197
+ throw error;
86
198
  }
87
- errors.push(e);
88
199
  }
89
200
  }
90
- return { error: errors };
91
- }
92
-
93
- const request = async (params, model) => {
94
- const response = await postRequest(params, model);
95
- const { error, data, cached } = response;
96
- if (cached) {
97
- console.info('=== Request served with cached response. ===');
98
- }
99
- if (error && error.length > 0) {
100
- const lastError = error[error.length - 1];
101
- return { error: lastError.toJSON() ?? lastError ?? error };
102
- }
201
+ };
103
202
 
104
- return data;
203
+ const request = async (params, model, requestId, pathway) => {
204
+ try {
205
+ const response = await postRequest(params, model, requestId, pathway);
206
+ const { error, data, cached } = response;
207
+ if (cached) {
208
+ console.info(`<<< [${requestId}] served with cached response.`);
209
+ }
210
+ if (error && error.length > 0) {
211
+ const lastError = error[error.length - 1];
212
+ return { error: lastError.toJSON() ?? lastError ?? error };
213
+ }
214
+ //console.log("<<< [${requestId}] response: ", data.choices[0].delta || data.choices[0])
215
+ return data;
216
+ } catch (error) {
217
+ console.error(`Error in request: ${error.message || error}`);
218
+ return { error: error };
219
+ }
105
220
  }
106
221
 
107
222
  export {
108
- axios,request, postRequest, buildLimiters
223
+ axios, request, postRequest, buildLimiters
109
224
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.0.9",
3
+ "version": "1.0.11",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -14,11 +14,17 @@ export default {
14
14
  typeDef,
15
15
  rootResolver,
16
16
  resolver,
17
- inputFormat: 'text',
18
- useInputChunking: true,
19
- useParallelChunkProcessing: false,
20
- useInputSummarization: false,
21
- truncateFromFront: false,
22
- timeout: 120, // in seconds
17
+ inputFormat: 'text', // text or html - changes the behavior of the input chunking
18
+ useInputChunking: true, // true or false - enables input to be split into multiple chunks to meet context window size
19
+ useParallelChunkProcessing: false, // true or false - enables parallel processing of chunks
20
+ useInputSummarization: false, // true or false - instead of chunking, summarize the input and act on the summary
21
+ truncateFromFront: false, // true or false - if true, truncate from the front of the input instead of the back
22
+ timeout: 120, // seconds, cancels the pathway after this many seconds
23
+ duplicateRequestAfter: 10, // seconds, if the request is not completed after this many seconds, a backup request is sent
24
+ // override the default execution of the pathway
25
+ // callback signature: excuteOverride({args: object, runAllPrompts: function})
26
+ // args: the input arguments to the pathway
27
+ // runAllPrompts: a function that runs all prompts in the pathway and returns the result
28
+ executePathway: undefined,
23
29
  };
24
30
 
package/pathways/index.js CHANGED
@@ -8,6 +8,7 @@ import sentiment from './sentiment.js';
8
8
  import summary from './summary.js';
9
9
  import sys_openai_chat from './sys_openai_chat.js';
10
10
  import sys_openai_completion from './sys_openai_completion.js';
11
+ import test_cohere_summarize from './test_cohere_summarize.js';
11
12
  import test_langchain from './test_langchain.mjs';
12
13
  import test_palm_chat from './test_palm_chat.js';
13
14
  import transcribe from './transcribe.js';
@@ -24,6 +25,7 @@ export {
24
25
  summary,
25
26
  sys_openai_chat,
26
27
  sys_openai_completion,
28
+ test_cohere_summarize,
27
29
  test_langchain,
28
30
  test_palm_chat,
29
31
  transcribe,
@@ -21,7 +21,7 @@ export default {
21
21
  const originalTargetLength = args.targetLength;
22
22
 
23
23
  // If targetLength is not provided, execute the prompt once and return the result.
24
- if (originalTargetLength === 0) {
24
+ if (originalTargetLength === 0 || originalTargetLength === null) {
25
25
  let pathwayResolver = new PathwayResolver({ config, pathway, args, requestState });
26
26
  return await pathwayResolver.resolve(args);
27
27
  }
@@ -0,0 +1,10 @@
1
+ // test_cohere_summarize.js
2
+ // Summarize text with the Cohere model
3
+
4
+ export default {
5
+ // Uncomment the following line to enable caching for this prompt, if desired.
6
+ // enableCache: true,
7
+
8
+ prompt: `{{text}}`,
9
+ model: 'cohere-summarize'
10
+ };
@@ -8,6 +8,7 @@ export default {
8
8
  wordTimestamped: false,
9
9
  },
10
10
  timeout: 3600, // in seconds
11
+ enableDuplicateRequests: false,
11
12
  };
12
13
 
13
14
 
@@ -16,5 +16,6 @@ export default {
16
16
  // Set the timeout for the translation process, in seconds.
17
17
  timeout: 400,
18
18
  inputChunkSize: 500,
19
+ enableDuplicateRequests: false,
19
20
  };
20
21
 
package/server/chunker.js CHANGED
@@ -152,7 +152,7 @@ const getSemanticChunks = (text, chunkSize, inputFormat = 'text') => {
152
152
  }
153
153
  });
154
154
 
155
- if (chunks.some(chunk => encode(chunk).length > chunkSize)) {
155
+ if (chunks.filter(c => determineTextFormat(c) === 'html').some(chunk => encode(chunk).length > chunkSize)) {
156
156
  throw new Error('The HTML contains elements that are larger than the chunk size. Please try again with HTML that has smaller elements.');
157
157
  }
158
158
 
package/server/graphql.js CHANGED
@@ -20,6 +20,7 @@ import { cancelRequestResolver } from './resolver.js';
20
20
  import { buildPathways, buildModels } from '../config.js';
21
21
  import { requestState } from './requestState.js';
22
22
  import { buildRestEndpoints } from './rest.js';
23
+ import { startTestServer } from '../tests/server.js'
23
24
 
24
25
  // Utility functions
25
26
  // Server plugins
@@ -222,7 +223,7 @@ const build = async (config) => {
222
223
  });
223
224
  };
224
225
 
225
- return { server, startServer, cache, plugins, typeDefs, resolvers }
226
+ return { server, startServer, startTestServer, cache, plugins, typeDefs, resolvers }
226
227
  }
227
228
 
228
229
 
package/server/parser.js CHANGED
@@ -37,9 +37,21 @@ const parseCommaSeparatedList = (str) => {
37
37
  return str.split(',').map(s => s.trim()).filter(s => s.length);
38
38
  }
39
39
 
40
+ const isCommaSeparatedList = (data) => {
41
+ const commaSeparatedPattern = /^([^,\n]+,)+[^,\n]+$/;
42
+ return commaSeparatedPattern.test(data.trim());
43
+ }
44
+
45
+ const isNumberedList = (data) => {
46
+ const numberedListPattern = /^\s*[\[\{\(]*\d+[\s.=\-:,;\]\)\}]/gm;
47
+ return numberedListPattern.test(data.trim());
48
+ }
49
+
40
50
  export {
41
51
  regexParser,
42
52
  parseNumberedList,
43
53
  parseNumberedObjectList,
44
54
  parseCommaSeparatedList,
55
+ isCommaSeparatedList,
56
+ isNumberedList,
45
57
  };
@@ -3,10 +3,13 @@ import OpenAIChatPlugin from './plugins/openAiChatPlugin.js';
3
3
  import OpenAICompletionPlugin from './plugins/openAiCompletionPlugin.js';
4
4
  import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
5
5
  import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
6
+ import OpenAIChatExtensionPlugin from './plugins/openAiChatExtensionPlugin.js';
6
7
  import LocalModelPlugin from './plugins/localModelPlugin.js';
7
8
  import PalmChatPlugin from './plugins/palmChatPlugin.js';
8
9
  import PalmCompletionPlugin from './plugins/palmCompletionPlugin.js';
9
10
  import PalmCodeCompletionPlugin from './plugins/palmCodeCompletionPlugin.js';
11
+ import CohereGeneratePlugin from './plugins/cohereGeneratePlugin.js';
12
+ import CohereSummarizePlugin from './plugins/cohereSummarizePlugin.js';
10
13
 
11
14
  class PathwayPrompter {
12
15
  constructor(config, pathway, modelName, model) {
@@ -17,6 +20,9 @@ class PathwayPrompter {
17
20
  case 'OPENAI-CHAT':
18
21
  plugin = new OpenAIChatPlugin(config, pathway, modelName, model);
19
22
  break;
23
+ case 'OPENAI-CHAT-EXTENSION':
24
+ plugin = new OpenAIChatExtensionPlugin(config, pathway, modelName, model);
25
+ break;
20
26
  case 'AZURE-TRANSLATE':
21
27
  plugin = new AzureTranslatePlugin(config, pathway, modelName, model);
22
28
  break;
@@ -38,6 +44,12 @@ class PathwayPrompter {
38
44
  case 'PALM-CODE-COMPLETION':
39
45
  plugin = new PalmCodeCompletionPlugin(config, pathway, modelName, model);
40
46
  break;
47
+ case 'COHERE-GENERATE':
48
+ plugin = new CohereGeneratePlugin(config, pathway, modelName, model);
49
+ break;
50
+ case 'COHERE-SUMMARIZE':
51
+ plugin = new CohereSummarizePlugin(config, pathway, modelName, model);
52
+ break;
41
53
  default:
42
54
  throw new Error(`Unsupported model type: ${model.type}`);
43
55
  }