@aj-archipelago/cortex 0.0.10 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +192 -28
  2. package/config.js +17 -11
  3. package/graphql/chunker.js +3 -3
  4. package/graphql/graphql.js +19 -22
  5. package/graphql/parser.js +1 -1
  6. package/graphql/pathwayPrompter.js +8 -9
  7. package/graphql/pathwayResolver.js +11 -13
  8. package/graphql/pathwayResponseParser.js +2 -2
  9. package/graphql/plugins/azureTranslatePlugin.js +2 -2
  10. package/graphql/plugins/modelPlugin.js +7 -6
  11. package/graphql/plugins/openAiChatPlugin.js +2 -2
  12. package/graphql/plugins/openAiCompletionPlugin.js +4 -3
  13. package/graphql/plugins/openAiWhisperPlugin.js +7 -6
  14. package/graphql/prompt.js +1 -1
  15. package/graphql/pubsub.js +2 -2
  16. package/graphql/requestState.js +1 -1
  17. package/graphql/resolver.js +4 -4
  18. package/graphql/subscriptions.js +5 -4
  19. package/graphql/typeDef.js +53 -53
  20. package/index.js +5 -5
  21. package/lib/fileChunker.js +15 -11
  22. package/lib/keyValueStorageClient.js +5 -5
  23. package/lib/promiser.js +2 -2
  24. package/lib/request.js +11 -9
  25. package/lib/requestMonitor.js +2 -2
  26. package/package.json +4 -2
  27. package/pathways/basePathway.js +5 -4
  28. package/pathways/bias.js +2 -2
  29. package/pathways/chat.js +3 -2
  30. package/pathways/complete.js +4 -2
  31. package/pathways/edit.js +3 -2
  32. package/pathways/entities.js +3 -2
  33. package/pathways/index.js +25 -12
  34. package/pathways/lc_test.mjs +99 -0
  35. package/pathways/paraphrase.js +3 -2
  36. package/pathways/sentiment.js +3 -2
  37. package/pathways/summary.js +6 -4
  38. package/pathways/transcribe.js +4 -2
  39. package/pathways/translate.js +3 -2
  40. package/start.js +5 -2
  41. package/tests/chunkfunction.test.js +3 -4
  42. package/tests/chunking.test.js +8 -3
  43. package/tests/main.test.js +28 -14
  44. package/tests/translate.test.js +8 -4
package/README.md CHANGED
@@ -5,7 +5,7 @@ Modern AI models are transformational, but a number of complexities emerge when
5
5
  ## Features
6
6
 
7
7
  * Simple architecture to build custom functional endpoints (called `pathways`), that implement common NL AI tasks. Default pathways include chat, summarization, translation, paraphrasing, completion, spelling and grammar correction, entity extraction, sentiment analysis, and bias analysis.
8
- * Allows for building multi-model, multi-vendor, and model-agnostic pathways (choose the right model or combination of models for the job, implement redundancy) with built-in support for OpenAI GPT-3, GPT-3.5 (chatGPT), and GPT-4 models - both from OpenAI directly and through Azure OpenAI, OpenAI Whisper, Azure Translator, and more.
8
+ * Allows for building multi-model, multi-tool, multi-vendor, and model-agnostic pathways (choose the right model or combination of models and tools for the job, implement redundancy) with built-in support for OpenAI GPT-3, GPT-3.5 (chatGPT), and GPT-4 models - both from OpenAI directly and through Azure OpenAI, OpenAI Whisper, Azure Translator, LangChain.js and more.
9
9
  * Easy, templatized prompt definition with flexible support for most prompt engineering techniques and strategies ranging from simple single prompts to complex custom prompt chains with context continuity.
10
10
  * Built in support for long-running, asynchronous operations with progress updates or streaming responses
11
11
  * Integrated context persistence: have your pathways "remember" whatever you want and use it on the next request to the model
@@ -15,7 +15,7 @@ Modern AI models are transformational, but a number of complexities emerge when
15
15
  * Caching of repeated queries to provide instant results and avoid excess requests to the underlying model in repetitive use cases (chat bots, unit tests, etc.)
16
16
 
17
17
  ## Installation
18
- In order to use Cortex, you must first have a working Node.js environment. The version of Node.js should be at least 14 or higher. After verifying that you have the correct version of Node.js installed, you can get the simplest form up and running with a couple of commands.
18
+ In order to use Cortex, you must first have a working Node.js environment. The version of Node.js should be 18 or higher (lower versions supported with some reduction in features). After verifying that you have the correct version of Node.js installed, you can get the simplest form up and running with a couple of commands.
19
19
  ## Quick Start
20
20
  ```sh
21
21
  git clone git@github.com:aj-archipelago/cortex.git
@@ -55,18 +55,20 @@ apolloClient.query({
55
55
  ## Cortex Pathways: Supercharged Prompts
56
56
  Pathways are a core concept in Cortex. Each pathway is a single JavaScript file that encapsulates the data and logic needed to define a functional API endpoint. When the client makes a request via the API, one or more pathways are executed and the result is sent back to the client. Pathways can be very simple:
57
57
  ```js
58
- module.exports = {
59
- prompt: `{{text}}\n\nRewrite the above using British English spelling:`
58
+ export default {
59
+ prompt: `{{text}}\n\nRewrite the above using British English spelling:`
60
60
  }
61
61
  ```
62
62
  The real power of Cortex starts to show as the pathways get more complex. This pathway, for example, uses a three-part sequential prompt to ensure that specific people and place names are correctly translated:
63
63
  ```js
64
- prompt:
65
- [
66
- `{{{text}}}\nCopy the names of all people and places exactly from this document in the language above:\n`,
67
- `Original Language:\n{{{previousResult}}}\n\n{{to}}:\n`,
68
- `Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nRewrite the document in {{to}}. If the document is already in {{to}}, copy it exactly below:\n`
69
- ]
64
+ export default {
65
+ prompt:
66
+ [
67
+ `{{{text}}}\nCopy the names of all people and places exactly from this document in the language above:\n`,
68
+ `Original Language:\n{{{previousResult}}}\n\n{{to}}:\n`,
69
+ `Entities in the document:\n\n{{{previousResult}}}\n\nDocument:\n{{{text}}}\nRewrite the document in {{to}}. If the document is already in {{to}}, copy it exactly below:\n`
70
+ ]
71
+ }
70
72
  ```
71
73
  Cortex pathway prompt enhancements include:
72
74
  * **Templatized prompt definition**: Pathways allow for easy and flexible prompt definition using Handlebars templating. This makes it simple to create and modify prompts using variables and context from the application as well as extensible internal functions provided by Cortex.
@@ -109,7 +111,7 @@ If you look closely at the examples above, you'll notice embedded parameters lik
109
111
  ### Parameters
110
112
  Pathways support an arbitrary number of input parameters. These are defined in the pathway like this:
111
113
  ```js
112
- module.exports = {
114
+ export default {
113
115
  prompt:
114
116
  [
115
117
  `{{{chatContext}}}\n\n{{{text}}}\n\nGiven the information above, create a short summary of the conversation to date making sure to include all of the personal details about the user that you encounter:\n\n`,
@@ -161,7 +163,7 @@ A core function of Cortex is dealing with token limited interfaces. To this end,
161
163
 
162
164
  Cortex provides built in functions to turn loosely formatted text output from the model API calls into structured objects for return to the application. Specifically, Cortex provides parsers for numbered lists of strings and numbered lists of objects. These are used in pathways like this:
163
165
  ```js
164
- module.exports = {
166
+ export default {
165
167
  temperature: 0,
166
168
  prompt: `{{text}}\n\nList the top {{count}} entities and their definitions for the above in the format {{format}}:`,
167
169
  format: `(name: definition)`,
@@ -179,44 +181,128 @@ The resolver property defines the function that processes the input and returns
179
181
 
180
182
  The core pathway `summary.js` below is implemented using custom pathway logic and a custom resolver to effectively target a specific summary length:
181
183
  ```js
182
- const { semanticTruncate } = require('../graphql/chunker');
183
- const { PathwayResolver } = require('../graphql/pathwayResolver');
184
- module.exports = {
185
- prompt: `{{{text}}}\n\nWrite a summary of the above text:\n\n`,
184
+ // summary.js
185
+ // Text summarization module with custom resolver
186
+ // This module exports a prompt that takes an input text and generates a summary using a custom resolver.
187
+
188
+ // Import required modules
189
+ import { semanticTruncate } from '../graphql/chunker.js';
190
+ import { PathwayResolver } from '../graphql/pathwayResolver.js';
191
+
192
+ export default {
193
+ // The main prompt function that takes the input text and asks to generate a summary.
194
+ prompt: `{{{text}}}\n\nWrite a summary of the above text. If the text is in a language other than english, make sure the summary is written in the same language:\n\n`,
195
+
196
+ // Define input parameters for the prompt, such as the target length of the summary.
186
197
  inputParameters: {
187
- targetLength: 500,
198
+ targetLength: 0,
188
199
  },
200
+
201
+ // Custom resolver to generate summaries by reprompting if they are too long or too short.
189
202
  resolver: async (parent, args, contextValue, info) => {
190
203
  const { config, pathway, requestState } = contextValue;
191
204
  const originalTargetLength = args.targetLength;
192
- const errorMargin = 0.2;
205
+
206
+ // If targetLength is not provided, execute the prompt once and return the result.
207
+ if (originalTargetLength === 0) {
208
+ let pathwayResolver = new PathwayResolver({ config, pathway, args, requestState });
209
+ return await pathwayResolver.resolve(args);
210
+ }
211
+
212
+ const errorMargin = 0.1;
193
213
  const lowTargetLength = originalTargetLength * (1 - errorMargin);
194
214
  const targetWords = Math.round(originalTargetLength / 6.6);
195
- // if the text is shorter than the summary length, just return the text
215
+
216
+ // If the text is shorter than the summary length, just return the text.
196
217
  if (args.text.length <= originalTargetLength) {
197
218
  return args.text;
198
219
  }
220
+
199
221
  const MAX_ITERATIONS = 5;
200
222
  let summary = '';
201
- let bestSummary = '';
202
- let pathwayResolver = new PathwayResolver({ config, pathway, requestState });
203
- // modify the prompt to be words-based instead of characters-based
204
- pathwayResolver.pathwayPrompt = `{{{text}}}\n\nWrite a summary of the above text in exactly ${targetWords} words:\n\n`
223
+ let pathwayResolver = new PathwayResolver({ config, pathway, args, requestState });
224
+
225
+ // Modify the prompt to be words-based instead of characters-based.
226
+ pathwayResolver.pathwayPrompt = `Write a summary of all of the text below. If the text is in a language other than english, make sure the summary is written in the same language. Your summary should be ${targetWords} words in length.\n\nText:\n\n{{{text}}}\n\nSummary:\n\n`
227
+
205
228
  let i = 0;
206
- // reprompt if summary is too long or too short
207
- while (((summary.length > originalTargetLength) || (summary.length < lowTargetLength)) && i < MAX_ITERATIONS) {
229
+ // Make sure it's long enough to start
230
+ while ((summary.length < lowTargetLength) && i < MAX_ITERATIONS) {
208
231
  summary = await pathwayResolver.resolve(args);
209
232
  i++;
210
233
  }
211
- // if the summary is still too long, truncate it
234
+
235
+ // If it's too long, it could be because the input text was chunked
236
+ // and now we have all the chunks together. We can summarize that
237
+ // to get a comprehensive summary.
238
+ if (summary.length > originalTargetLength) {
239
+ pathwayResolver.pathwayPrompt = `Write a summary of all of the text below. If the text is in a language other than english, make sure the summary is written in the same language. Your summary should be ${targetWords} words in length.\n\nText:\n\n${summary}\n\nSummary:\n\n`
240
+ summary = await pathwayResolver.resolve(args);
241
+ i++;
242
+
243
+ // Now make sure it's not too long
244
+ while ((summary.length > originalTargetLength) && i < MAX_ITERATIONS) {
245
+ pathwayResolver.pathwayPrompt = `${summary}\n\nIs that less than ${targetWords} words long? If not, try again using a length of no more than ${targetWords} words.\n\n`;
246
+ summary = await pathwayResolver.resolve(args);
247
+ i++;
248
+ }
249
+ }
250
+
251
+ // If the summary is still too long, truncate it.
212
252
  if (summary.length > originalTargetLength) {
213
253
  return semanticTruncate(summary, originalTargetLength);
214
254
  } else {
215
255
  return summary;
216
256
  }
217
257
  }
218
- }
258
+ };
219
259
  ```
260
+ ### LangChain.js Support
261
+ The ability to define a custom resolver function in Cortex pathways gives Cortex the flexibility to be able to cleanly incorporate alternate pipelines and technology stacks into the execution of a pathway. LangChain JS (https://github.com/hwchase17/langchainjs) is a very popular and well supported mechanism for wiring together models, tools, and logic to achieve some amazing results. We have developed specific functionality to support LangChain in the Cortex prompt execution framework and will continue to build features to fully integrate it with Cortex prompt execution contexts.
262
+
263
+ Below is an example pathway integrating with one of the example agents from the LangChain docs. You can see the seamless integration of Cortex's configuration and graphQL / REST interface logic.
264
+ ```js
265
+ // lc_test.js
266
+ // LangChain Cortex integration test
267
+
268
+ // Import required modules
269
+ import { OpenAI } from "langchain/llms";
270
+ import { initializeAgentExecutor } from "langchain/agents";
271
+ import { SerpAPI, Calculator } from "langchain/tools";
272
+
273
+ export default {
274
+
275
+ // Implement custom logic and interaction with Cortex
276
+ // in custom resolver.
277
+
278
+ resolver: async (parent, args, contextValue, info) => {
279
+
280
+ const { config } = contextValue;
281
+ const openAIApiKey = config.get('openaiApiKey');
282
+ const serpApiKey = config.get('serpApiKey');
283
+
284
+ const model = new OpenAI({ openAIApiKey: openAIApiKey, temperature: 0 });
285
+ const tools = [new SerpAPI( serpApiKey ), new Calculator()];
286
+
287
+ const executor = await initializeAgentExecutor(
288
+ tools,
289
+ model,
290
+ "zero-shot-react-description"
291
+ );
292
+
293
+ console.log(`====================`);
294
+ console.log("Loaded langchain agent.");
295
+ const input = args.text;
296
+ console.log(`Executing with input "${input}"...`);
297
+ const result = await executor.call({ input });
298
+ console.log(`Got output ${result.output}`);
299
+ console.log(`====================`);
300
+
301
+ return result?.output;
302
+ },
303
+ };
304
+ ```
305
+
220
306
  ### Building and Loading Pathways
221
307
 
222
308
  Pathways are loaded from modules in the `pathways` directory. The pathways are built and loaded to the `config` object using the `buildPathways` function. The `buildPathways` function loads the base pathway, the core pathways, and any custom pathways. It then creates a new object that contains all the pathways and adds it to the pathways property of the config object. The order of loading means that custom pathways will always override any core pathways that Cortext provides. While pathways are designed to be self-contained, you can override some pathway properties - including whether they're even available at all - in the `pathways` section of the config file.
@@ -236,7 +322,84 @@ Below are the default pathways provided with Cortex. These can be used as is, ov
236
322
  - `translate`: Translates text from one language to another
237
323
  ## Extensibility
238
324
 
239
- Cortex is designed to be highly extensible. This allows you to customize the API to fit your needs. You can add new features, modify existing features, and even add integrations with other APIs and models.
325
+ Cortex is designed to be highly extensible. This allows you to customize the API to fit your needs. You can add new features, modify existing features, and even add integrations with other APIs and models. Here's an example of what an extended project might look like:
326
+
327
+ ### Cortex Internal Implementation
328
+
329
+ - **config**
330
+ - default.json
331
+ - package-lock.json
332
+ - package.json
333
+ - **pathways**
334
+ - chat_code.js
335
+ - chat_context.js
336
+ - chat_persist.js
337
+ - expand_story.js
338
+ - ...whole bunch of custom pathways
339
+ - translate_gpt4.js
340
+ - translate_turbo.js
341
+ - start.js
342
+
343
+ Where `default.json` holds all of your specific configuration:
344
+ ```js
345
+ {
346
+ "defaultModelName": "oai-gpturbo",
347
+ "models": {
348
+ "oai-td3": {
349
+ "type": "OPENAI-COMPLETION",
350
+ "url": "https://api.openai.com/v1/completions",
351
+ "headers": {
352
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
353
+ "Content-Type": "application/json"
354
+ },
355
+ "params": {
356
+ "model": "text-davinci-003"
357
+ },
358
+ "requestsPerSecond": 10,
359
+ "maxTokenLength": 4096
360
+ },
361
+ "oai-gpturbo": {
362
+ "type": "OPENAI-CHAT",
363
+ "url": "https://api.openai.com/v1/chat/completions",
364
+ "headers": {
365
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
366
+ "Content-Type": "application/json"
367
+ },
368
+ "params": {
369
+ "model": "gpt-3.5-turbo"
370
+ },
371
+ "requestsPerSecond": 10,
372
+ "maxTokenLength": 8192
373
+ },
374
+ "oai-gpt4": {
375
+ "type": "OPENAI-CHAT",
376
+ "url": "https://api.openai.com/v1/chat/completions",
377
+ "headers": {
378
+ "Authorization": "Bearer {{OPENAI_API_KEY}}",
379
+ "Content-Type": "application/json"
380
+ },
381
+ "params": {
382
+ "model": "gpt-4"
383
+ },
384
+ "requestsPerSecond": 10,
385
+ "maxTokenLength": 8192
386
+ }
387
+ },
388
+ "enableCache": false,
389
+ "enableRestEndpoints": false
390
+ }
391
+ ```
392
+
393
+ ...and `start.js` is really simple:
394
+ ```js
395
+ import cortex from '@aj-archipelago/cortex';
396
+
397
+ (async () => {
398
+ const { startServer } = await cortex();
399
+ startServer && startServer();
400
+ })();
401
+ ```
402
+
240
403
  ## Configuration
241
404
  Configuration of Cortex is done via a [convict](https://github.com/mozilla/node-convict/tree/master) object called `config`. The `config` object is built by combining the default values and any values specified in a configuration file or environment variables. The environment variables take precedence over the values in the configuration file. Below are the configurable properties and their defaults:
242
405
 
@@ -280,5 +443,6 @@ Detailed documentation on Cortex's API can be found in the /graphql endpoint of
280
443
  ## Roadmap
281
444
  Cortex is a constantly evolving project, and the following features are coming soon:
282
445
 
446
+ * Prompt execution context preservation between calls (to enable interactive, multi-call integrations with LangChain and other technologies)
283
447
  * Model-specific cache key optimizations to increase hit rate and reduce cache size
284
448
  * Structured analytics and reporting on AI API call frequency, cost, cache hit rate, etc.
package/config.js CHANGED
@@ -1,7 +1,8 @@
1
- const path = require('path');
2
- const convict = require('convict');
3
- const handlebars = require("handlebars");
4
- const fs = require('fs');
1
+ import path from 'path';
2
+ const __dirname = path.dirname(new URL(import.meta.url).pathname);
3
+ import convict from 'convict';
4
+ import handlebars from 'handlebars';
5
+ import fs from 'fs';
5
6
 
6
7
  // Schema for config
7
8
  var config = convict({
@@ -108,7 +109,13 @@ var config = convict({
108
109
  format: String,
109
110
  default: null,
110
111
  env: 'CORTEX_CONFIG_FILE'
111
- }
112
+ },
113
+ serpApiKey: {
114
+ format: String,
115
+ default: null,
116
+ env: 'SERPAPI_API_KEY',
117
+ sensitive: true
118
+ },
112
119
  });
113
120
 
114
121
  // Read in environment variables and set up service configuration
@@ -127,22 +134,21 @@ if (configFile && fs.existsSync(configFile)) {
127
134
  }
128
135
  }
129
136
 
130
-
131
137
  // Build and load pathways to config
132
- const buildPathways = (config) => {
138
+ const buildPathways = async (config) => {
133
139
  const { pathwaysPath, corePathwaysPath, basePathwayPath } = config.getProperties();
134
140
 
135
141
  // Load cortex base pathway
136
- const basePathway = require(basePathwayPath);
142
+ const basePathway = await import(basePathwayPath).then(module => module.default);
137
143
 
138
144
  // Load core pathways, default from the Cortex package
139
145
  console.log('Loading core pathways from', corePathwaysPath)
140
- let loadedPathways = require(corePathwaysPath);
146
+ let loadedPathways = await import(`${corePathwaysPath}/index.js`).then(module => module);
141
147
 
142
148
  // Load custom pathways and override core pathways if same
143
149
  if (pathwaysPath && fs.existsSync(pathwaysPath)) {
144
150
  console.log('Loading custom pathways from', pathwaysPath)
145
- const customPathways = require(pathwaysPath);
151
+ const customPathways = await import(`${pathwaysPath}/index.js`).then(module => module);
146
152
  loadedPathways = { ...loadedPathways, ...customPathways };
147
153
  }
148
154
 
@@ -191,4 +197,4 @@ const buildModels = (config) => {
191
197
  // TODO: Perform validation
192
198
  // config.validate({ allowed: 'strict' });
193
199
 
194
- module.exports = { config, buildPathways, buildModels };
200
+ export { config, buildPathways, buildModels };
@@ -1,4 +1,4 @@
1
- const { encode, decode } = require('gpt-3-encoder')
1
+ import { encode, decode } from 'gpt-3-encoder';
2
2
 
3
3
  const getLastNToken = (text, maxTokenLen) => {
4
4
  const encoded = encode(text);
@@ -132,6 +132,6 @@ const semanticTruncate = (text, maxLength) => {
132
132
  : truncatedText + "...";
133
133
  };
134
134
 
135
- module.exports = {
135
+ export {
136
136
  getSemanticChunks, semanticTruncate, getLastNToken, getFirstNToken
137
- }
137
+ };
@@ -1,23 +1,21 @@
1
- const { createServer } = require('http');
2
- const {
1
+ import { createServer } from 'http';
2
+ import {
3
3
  ApolloServerPluginDrainHttpServer,
4
4
  ApolloServerPluginLandingPageLocalDefault,
5
- } = require("apollo-server-core");
6
- const { makeExecutableSchema } = require('@graphql-tools/schema');
7
- const { WebSocketServer } = require('ws');
8
- const { useServer } = require('graphql-ws/lib/use/ws');
9
- const express = require('express');
10
-
11
- /// Create apollo graphql server
12
- const Keyv = require("keyv");
13
- const { KeyvAdapter } = require("@apollo/utils.keyvadapter");
14
- const responseCachePlugin = require('apollo-server-plugin-response-cache').default
15
-
16
- const subscriptions = require('./subscriptions');
17
- const { buildLimiters } = require('../lib/request');
18
- const { cancelRequestResolver } = require('./resolver');
19
- const { buildPathways, buildModels } = require('../config');
20
- const { requestState } = require('./requestState');
5
+ } from 'apollo-server-core';
6
+ import { makeExecutableSchema } from '@graphql-tools/schema';
7
+ import { WebSocketServer } from 'ws';
8
+ import { useServer } from 'graphql-ws/lib/use/ws';
9
+ import express from 'express';
10
+ import { ApolloServer } from 'apollo-server-express';
11
+ import Keyv from 'keyv';
12
+ import { KeyvAdapter } from '@apollo/utils.keyvadapter';
13
+ import responseCachePlugin from 'apollo-server-plugin-response-cache';
14
+ import subscriptions from './subscriptions.js';
15
+ import { buildLimiters } from '../lib/request.js';
16
+ import { cancelRequestResolver } from './resolver.js';
17
+ import { buildPathways, buildModels } from '../config.js';
18
+ import { requestState } from './requestState.js';
21
19
 
22
20
  const getPlugins = (config) => {
23
21
  // server plugins
@@ -134,9 +132,9 @@ const getResolvers = (config, pathways) => {
134
132
  }
135
133
 
136
134
  //graphql api build factory method
137
- const build = (config) => {
135
+ const build = async (config) => {
138
136
  // First perform config build
139
- buildPathways(config);
137
+ await buildPathways(config);
140
138
  buildModels(config);
141
139
 
142
140
  // build api limiters
@@ -152,7 +150,6 @@ const build = (config) => {
152
150
 
153
151
  const { plugins, cache } = getPlugins(config);
154
152
 
155
- const { ApolloServer, gql } = require('apollo-server-express');
156
153
  const app = express()
157
154
 
158
155
  const httpServer = createServer(app);
@@ -221,6 +218,6 @@ const build = (config) => {
221
218
  }
222
219
 
223
220
 
224
- module.exports = {
221
+ export {
225
222
  build
226
223
  };
package/graphql/parser.js CHANGED
@@ -31,7 +31,7 @@ const parseNumberedObjectList = (text, format) => {
31
31
  return result;
32
32
  }
33
33
 
34
- module.exports = {
34
+ export {
35
35
  regexParser,
36
36
  parseNumberedList,
37
37
  parseNumberedObjectList,
@@ -1,10 +1,9 @@
1
1
  // PathwayPrompter.js
2
- const OpenAIChatPlugin = require('./plugins/openAIChatPlugin');
3
- const OpenAICompletionPlugin = require('./plugins/openAICompletionPlugin');
4
- const AzureTranslatePlugin = require('./plugins/azureTranslatePlugin');
5
- const OpenAIWhisperPlugin = require('./plugins/openAiWhisperPlugin');
6
- const handlebars = require("handlebars");
7
- const { Exception } = require("handlebars");
2
+ import OpenAIChatPlugin from './plugins/openAIChatPlugin.js';
3
+ import OpenAICompletionPlugin from './plugins/openAICompletionPlugin.js';
4
+ import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
5
+ import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
6
+ import handlebars from 'handlebars';
8
7
 
9
8
  // register functions that can be called directly in the prompt markdown
10
9
  handlebars.registerHelper('stripHTML', function (value) {
@@ -27,7 +26,7 @@ class PathwayPrompter {
27
26
  const model = config.get('models')[modelName];
28
27
 
29
28
  if (!model) {
30
- throw new Exception(`Model ${modelName} not found in config`);
29
+ throw new handlebars.Exception(`Model ${modelName} not found in config`);
31
30
  }
32
31
 
33
32
  let plugin;
@@ -46,7 +45,7 @@ class PathwayPrompter {
46
45
  plugin = new OpenAIWhisperPlugin(config, pathway);
47
46
  break;
48
47
  default:
49
- throw new Exception(`Unsupported model type: ${model.type}`);
48
+ throw new handlebars.Exception(`Unsupported model type: ${model.type}`);
50
49
  }
51
50
 
52
51
  this.plugin = plugin;
@@ -57,6 +56,6 @@ class PathwayPrompter {
57
56
  }
58
57
  }
59
58
 
60
- module.exports = {
59
+ export {
61
60
  PathwayPrompter
62
61
  };
@@ -1,14 +1,12 @@
1
- const { PathwayPrompter } = require('./pathwayPrompter');
2
- const {
3
- v4: uuidv4,
4
- } = require('uuid');
5
- const pubsub = require('./pubsub');
6
- const { encode } = require('gpt-3-encoder')
7
- const { getFirstNToken, getLastNToken, getSemanticChunks } = require('./chunker');
8
- const { PathwayResponseParser } = require('./pathwayResponseParser');
9
- const { Prompt } = require('./prompt');
10
- const { getv, setv } = require('../lib/keyValueStorageClient');
11
- const { requestState } = require('./requestState');
1
+ import { PathwayPrompter } from './pathwayPrompter.js';
2
+ import { v4 as uuidv4 } from 'uuid';
3
+ import pubsub from './pubsub.js';
4
+ import { encode } from 'gpt-3-encoder';
5
+ import { getFirstNToken, getLastNToken, getSemanticChunks } from './chunker.js';
6
+ import { PathwayResponseParser } from './pathwayResponseParser.js';
7
+ import { Prompt } from './prompt.js';
8
+ import { getv, setv } from '../lib/keyValueStorageClient.js';
9
+ import { requestState } from './requestState.js';
12
10
 
13
11
  const MAX_PREVIOUS_RESULT_TOKEN_LENGTH = 1000;
14
12
 
@@ -125,7 +123,7 @@ class PathwayResolver {
125
123
  // Get saved context from contextId or change contextId if needed
126
124
  const { contextId } = args;
127
125
  this.savedContextId = contextId ? contextId : null;
128
- this.savedContext = contextId ? (getv && await getv(contextId) || {}) : {};
126
+ this.savedContext = contextId ? (getv && (await getv(contextId)) || {}) : {};
129
127
 
130
128
  // Save the context before processing the request
131
129
  const savedContextStr = JSON.stringify(this.savedContext);
@@ -312,4 +310,4 @@ class PathwayResolver {
312
310
  }
313
311
  }
314
312
 
315
- module.exports = { PathwayResolver };
313
+ export { PathwayResolver };
@@ -1,4 +1,4 @@
1
- const { parseNumberedList, parseNumberedObjectList } = require('./parser')
1
+ import { parseNumberedList, parseNumberedObjectList } from './parser.js';
2
2
 
3
3
  class PathwayResponseParser {
4
4
  constructor(pathway) {
@@ -21,4 +21,4 @@ class PathwayResponseParser {
21
21
  }
22
22
  }
23
23
 
24
- module.exports = { PathwayResponseParser };
24
+ export { PathwayResponseParser };
@@ -1,5 +1,5 @@
1
1
  // AzureTranslatePlugin.js
2
- const ModelPlugin = require('./modelPlugin');
2
+ import ModelPlugin from './modelPlugin.js';
3
3
 
4
4
  class AzureTranslatePlugin extends ModelPlugin {
5
5
  constructor(config, pathway) {
@@ -37,4 +37,4 @@ class AzureTranslatePlugin extends ModelPlugin {
37
37
  }
38
38
  }
39
39
 
40
- module.exports = AzureTranslatePlugin;
40
+ export default AzureTranslatePlugin;
@@ -1,8 +1,9 @@
1
1
  // ModelPlugin.js
2
- const handlebars = require('handlebars');
3
- const { request } = require("../../lib/request");
4
- const { encode } = require("gpt-3-encoder");
5
- const { getFirstNToken } = require("../chunker");
2
+ import handlebars from 'handlebars';
3
+
4
+ import { request } from '../../lib/request.js';
5
+ import { encode } from 'gpt-3-encoder';
6
+ import { getFirstNToken } from '../chunker.js';
6
7
 
7
8
  const DEFAULT_MAX_TOKENS = 4096;
8
9
  const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
@@ -237,7 +238,7 @@ class ModelPlugin {
237
238
  const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
238
239
 
239
240
  if (responseData.error) {
240
- throw new Exception(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
241
+ throw new Error(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
241
242
  }
242
243
 
243
244
  this.logRequestData(data, responseData, prompt);
@@ -246,6 +247,6 @@ class ModelPlugin {
246
247
 
247
248
  }
248
249
 
249
- module.exports = ModelPlugin;
250
+ export default ModelPlugin;
250
251
 
251
252
 
@@ -1,5 +1,5 @@
1
1
  // OpenAIChatPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
2
+ import ModelPlugin from './modelPlugin.js';
3
3
 
4
4
  class OpenAIChatPlugin extends ModelPlugin {
5
5
  constructor(config, pathway) {
@@ -43,4 +43,4 @@ class OpenAIChatPlugin extends ModelPlugin {
43
43
  }
44
44
  }
45
45
 
46
- module.exports = OpenAIChatPlugin;
46
+ export default OpenAIChatPlugin;
@@ -1,6 +1,7 @@
1
1
  // OpenAICompletionPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
3
- const { encode } = require("gpt-3-encoder");
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ import { encode } from 'gpt-3-encoder';
4
5
 
5
6
  class OpenAICompletionPlugin extends ModelPlugin {
6
7
  constructor(config, pathway) {
@@ -66,5 +67,5 @@ class OpenAICompletionPlugin extends ModelPlugin {
66
67
  }
67
68
  }
68
69
 
69
- module.exports = OpenAICompletionPlugin;
70
+ export default OpenAICompletionPlugin;
70
71
 
@@ -1,9 +1,10 @@
1
1
  // OpenAICompletionPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
3
- const FormData = require('form-data');
4
- const fs = require('fs');
5
- const { splitMediaFile, isValidYoutubeUrl, processYoutubeUrl, deleteTempPath } = require('../../lib/fileChunker');
6
- const pubsub = require('../pubsub');
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ import FormData from 'form-data';
5
+ import fs from 'fs';
6
+ import { splitMediaFile, isValidYoutubeUrl, processYoutubeUrl, deleteTempPath } from '../../lib/fileChunker.js';
7
+ import pubsub from '../pubsub.js';
7
8
 
8
9
  class OpenAIWhisperPlugin extends ModelPlugin {
9
10
  constructor(config, pathway) {
@@ -87,5 +88,5 @@ class OpenAIWhisperPlugin extends ModelPlugin {
87
88
  }
88
89
  }
89
90
 
90
- module.exports = OpenAIWhisperPlugin;
91
+ export default OpenAIWhisperPlugin;
91
92