@aj-archipelago/cortex 0.0.10 → 0.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/config.js +17 -11
  2. package/graphql/chunker.js +3 -3
  3. package/graphql/graphql.js +19 -22
  4. package/graphql/parser.js +1 -1
  5. package/graphql/pathwayPrompter.js +8 -9
  6. package/graphql/pathwayResolver.js +11 -13
  7. package/graphql/pathwayResponseParser.js +2 -2
  8. package/graphql/plugins/azureTranslatePlugin.js +2 -2
  9. package/graphql/plugins/modelPlugin.js +7 -6
  10. package/graphql/plugins/openAiChatPlugin.js +2 -2
  11. package/graphql/plugins/openAiCompletionPlugin.js +4 -3
  12. package/graphql/plugins/openAiWhisperPlugin.js +7 -6
  13. package/graphql/prompt.js +1 -1
  14. package/graphql/pubsub.js +2 -2
  15. package/graphql/requestState.js +1 -1
  16. package/graphql/resolver.js +4 -4
  17. package/graphql/subscriptions.js +5 -4
  18. package/graphql/typeDef.js +53 -53
  19. package/index.js +5 -5
  20. package/lib/fileChunker.js +15 -11
  21. package/lib/keyValueStorageClient.js +5 -5
  22. package/lib/promiser.js +2 -2
  23. package/lib/request.js +11 -9
  24. package/lib/requestMonitor.js +2 -2
  25. package/package.json +4 -2
  26. package/pathways/basePathway.js +5 -4
  27. package/pathways/bias.js +2 -2
  28. package/pathways/chat.js +3 -2
  29. package/pathways/complete.js +4 -2
  30. package/pathways/edit.js +3 -2
  31. package/pathways/entities.js +3 -2
  32. package/pathways/index.js +25 -12
  33. package/pathways/lc_test.mjs +99 -0
  34. package/pathways/paraphrase.js +3 -2
  35. package/pathways/sentiment.js +3 -2
  36. package/pathways/summary.js +6 -4
  37. package/pathways/transcribe.js +4 -2
  38. package/pathways/translate.js +3 -2
  39. package/start.js +5 -2
  40. package/tests/chunkfunction.test.js +3 -4
  41. package/tests/chunking.test.js +8 -3
  42. package/tests/main.test.js +28 -14
  43. package/tests/translate.test.js +8 -4
package/config.js CHANGED
@@ -1,7 +1,8 @@
1
- const path = require('path');
2
- const convict = require('convict');
3
- const handlebars = require("handlebars");
4
- const fs = require('fs');
1
+ import path from 'path';
2
+ const __dirname = path.dirname(new URL(import.meta.url).pathname);
3
+ import convict from 'convict';
4
+ import handlebars from 'handlebars';
5
+ import fs from 'fs';
5
6
 
6
7
  // Schema for config
7
8
  var config = convict({
@@ -108,7 +109,13 @@ var config = convict({
108
109
  format: String,
109
110
  default: null,
110
111
  env: 'CORTEX_CONFIG_FILE'
111
- }
112
+ },
113
+ serpApiKey: {
114
+ format: String,
115
+ default: null,
116
+ env: 'SERPAPI_API_KEY',
117
+ sensitive: true
118
+ },
112
119
  });
113
120
 
114
121
  // Read in environment variables and set up service configuration
@@ -127,22 +134,21 @@ if (configFile && fs.existsSync(configFile)) {
127
134
  }
128
135
  }
129
136
 
130
-
131
137
  // Build and load pathways to config
132
- const buildPathways = (config) => {
138
+ const buildPathways = async (config) => {
133
139
  const { pathwaysPath, corePathwaysPath, basePathwayPath } = config.getProperties();
134
140
 
135
141
  // Load cortex base pathway
136
- const basePathway = require(basePathwayPath);
142
+ const basePathway = await import(basePathwayPath).then(module => module.default);
137
143
 
138
144
  // Load core pathways, default from the Cortex package
139
145
  console.log('Loading core pathways from', corePathwaysPath)
140
- let loadedPathways = require(corePathwaysPath);
146
+ let loadedPathways = await import(`${corePathwaysPath}/index.js`).then(module => module);
141
147
 
142
148
  // Load custom pathways and override core pathways if same
143
149
  if (pathwaysPath && fs.existsSync(pathwaysPath)) {
144
150
  console.log('Loading custom pathways from', pathwaysPath)
145
- const customPathways = require(pathwaysPath);
151
+ const customPathways = await import(`${pathwaysPath}/index.js`).then(module => module);
146
152
  loadedPathways = { ...loadedPathways, ...customPathways };
147
153
  }
148
154
 
@@ -191,4 +197,4 @@ const buildModels = (config) => {
191
197
  // TODO: Perform validation
192
198
  // config.validate({ allowed: 'strict' });
193
199
 
194
- module.exports = { config, buildPathways, buildModels };
200
+ export { config, buildPathways, buildModels };
@@ -1,4 +1,4 @@
1
- const { encode, decode } = require('gpt-3-encoder')
1
+ import { encode, decode } from 'gpt-3-encoder';
2
2
 
3
3
  const getLastNToken = (text, maxTokenLen) => {
4
4
  const encoded = encode(text);
@@ -132,6 +132,6 @@ const semanticTruncate = (text, maxLength) => {
132
132
  : truncatedText + "...";
133
133
  };
134
134
 
135
- module.exports = {
135
+ export {
136
136
  getSemanticChunks, semanticTruncate, getLastNToken, getFirstNToken
137
- }
137
+ };
@@ -1,23 +1,21 @@
1
- const { createServer } = require('http');
2
- const {
1
+ import { createServer } from 'http';
2
+ import {
3
3
  ApolloServerPluginDrainHttpServer,
4
4
  ApolloServerPluginLandingPageLocalDefault,
5
- } = require("apollo-server-core");
6
- const { makeExecutableSchema } = require('@graphql-tools/schema');
7
- const { WebSocketServer } = require('ws');
8
- const { useServer } = require('graphql-ws/lib/use/ws');
9
- const express = require('express');
10
-
11
- /// Create apollo graphql server
12
- const Keyv = require("keyv");
13
- const { KeyvAdapter } = require("@apollo/utils.keyvadapter");
14
- const responseCachePlugin = require('apollo-server-plugin-response-cache').default
15
-
16
- const subscriptions = require('./subscriptions');
17
- const { buildLimiters } = require('../lib/request');
18
- const { cancelRequestResolver } = require('./resolver');
19
- const { buildPathways, buildModels } = require('../config');
20
- const { requestState } = require('./requestState');
5
+ } from 'apollo-server-core';
6
+ import { makeExecutableSchema } from '@graphql-tools/schema';
7
+ import { WebSocketServer } from 'ws';
8
+ import { useServer } from 'graphql-ws/lib/use/ws';
9
+ import express from 'express';
10
+ import { ApolloServer } from 'apollo-server-express';
11
+ import Keyv from 'keyv';
12
+ import { KeyvAdapter } from '@apollo/utils.keyvadapter';
13
+ import responseCachePlugin from 'apollo-server-plugin-response-cache';
14
+ import subscriptions from './subscriptions.js';
15
+ import { buildLimiters } from '../lib/request.js';
16
+ import { cancelRequestResolver } from './resolver.js';
17
+ import { buildPathways, buildModels } from '../config.js';
18
+ import { requestState } from './requestState.js';
21
19
 
22
20
  const getPlugins = (config) => {
23
21
  // server plugins
@@ -134,9 +132,9 @@ const getResolvers = (config, pathways) => {
134
132
  }
135
133
 
136
134
  //graphql api build factory method
137
- const build = (config) => {
135
+ const build = async (config) => {
138
136
  // First perform config build
139
- buildPathways(config);
137
+ await buildPathways(config);
140
138
  buildModels(config);
141
139
 
142
140
  // build api limiters
@@ -152,7 +150,6 @@ const build = (config) => {
152
150
 
153
151
  const { plugins, cache } = getPlugins(config);
154
152
 
155
- const { ApolloServer, gql } = require('apollo-server-express');
156
153
  const app = express()
157
154
 
158
155
  const httpServer = createServer(app);
@@ -221,6 +218,6 @@ const build = (config) => {
221
218
  }
222
219
 
223
220
 
224
- module.exports = {
221
+ export {
225
222
  build
226
223
  };
package/graphql/parser.js CHANGED
@@ -31,7 +31,7 @@ const parseNumberedObjectList = (text, format) => {
31
31
  return result;
32
32
  }
33
33
 
34
- module.exports = {
34
+ export {
35
35
  regexParser,
36
36
  parseNumberedList,
37
37
  parseNumberedObjectList,
@@ -1,10 +1,9 @@
1
1
  // PathwayPrompter.js
2
- const OpenAIChatPlugin = require('./plugins/openAIChatPlugin');
3
- const OpenAICompletionPlugin = require('./plugins/openAICompletionPlugin');
4
- const AzureTranslatePlugin = require('./plugins/azureTranslatePlugin');
5
- const OpenAIWhisperPlugin = require('./plugins/openAiWhisperPlugin');
6
- const handlebars = require("handlebars");
7
- const { Exception } = require("handlebars");
2
+ import OpenAIChatPlugin from './plugins/openAIChatPlugin.js';
3
+ import OpenAICompletionPlugin from './plugins/openAICompletionPlugin.js';
4
+ import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
5
+ import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
6
+ import handlebars from 'handlebars';
8
7
 
9
8
  // register functions that can be called directly in the prompt markdown
10
9
  handlebars.registerHelper('stripHTML', function (value) {
@@ -27,7 +26,7 @@ class PathwayPrompter {
27
26
  const model = config.get('models')[modelName];
28
27
 
29
28
  if (!model) {
30
- throw new Exception(`Model ${modelName} not found in config`);
29
+ throw new handlebars.Exception(`Model ${modelName} not found in config`);
31
30
  }
32
31
 
33
32
  let plugin;
@@ -46,7 +45,7 @@ class PathwayPrompter {
46
45
  plugin = new OpenAIWhisperPlugin(config, pathway);
47
46
  break;
48
47
  default:
49
- throw new Exception(`Unsupported model type: ${model.type}`);
48
+ throw new handlebars.Exception(`Unsupported model type: ${model.type}`);
50
49
  }
51
50
 
52
51
  this.plugin = plugin;
@@ -57,6 +56,6 @@ class PathwayPrompter {
57
56
  }
58
57
  }
59
58
 
60
- module.exports = {
59
+ export {
61
60
  PathwayPrompter
62
61
  };
@@ -1,14 +1,12 @@
1
- const { PathwayPrompter } = require('./pathwayPrompter');
2
- const {
3
- v4: uuidv4,
4
- } = require('uuid');
5
- const pubsub = require('./pubsub');
6
- const { encode } = require('gpt-3-encoder')
7
- const { getFirstNToken, getLastNToken, getSemanticChunks } = require('./chunker');
8
- const { PathwayResponseParser } = require('./pathwayResponseParser');
9
- const { Prompt } = require('./prompt');
10
- const { getv, setv } = require('../lib/keyValueStorageClient');
11
- const { requestState } = require('./requestState');
1
+ import { PathwayPrompter } from './pathwayPrompter.js';
2
+ import { v4 as uuidv4 } from 'uuid';
3
+ import pubsub from './pubsub.js';
4
+ import { encode } from 'gpt-3-encoder';
5
+ import { getFirstNToken, getLastNToken, getSemanticChunks } from './chunker.js';
6
+ import { PathwayResponseParser } from './pathwayResponseParser.js';
7
+ import { Prompt } from './prompt.js';
8
+ import { getv, setv } from '../lib/keyValueStorageClient.js';
9
+ import { requestState } from './requestState.js';
12
10
 
13
11
  const MAX_PREVIOUS_RESULT_TOKEN_LENGTH = 1000;
14
12
 
@@ -125,7 +123,7 @@ class PathwayResolver {
125
123
  // Get saved context from contextId or change contextId if needed
126
124
  const { contextId } = args;
127
125
  this.savedContextId = contextId ? contextId : null;
128
- this.savedContext = contextId ? (getv && await getv(contextId) || {}) : {};
126
+ this.savedContext = contextId ? (getv && (await getv(contextId)) || {}) : {};
129
127
 
130
128
  // Save the context before processing the request
131
129
  const savedContextStr = JSON.stringify(this.savedContext);
@@ -312,4 +310,4 @@ class PathwayResolver {
312
310
  }
313
311
  }
314
312
 
315
- module.exports = { PathwayResolver };
313
+ export { PathwayResolver };
@@ -1,4 +1,4 @@
1
- const { parseNumberedList, parseNumberedObjectList } = require('./parser')
1
+ import { parseNumberedList, parseNumberedObjectList } from './parser.js';
2
2
 
3
3
  class PathwayResponseParser {
4
4
  constructor(pathway) {
@@ -21,4 +21,4 @@ class PathwayResponseParser {
21
21
  }
22
22
  }
23
23
 
24
- module.exports = { PathwayResponseParser };
24
+ export { PathwayResponseParser };
@@ -1,5 +1,5 @@
1
1
  // AzureTranslatePlugin.js
2
- const ModelPlugin = require('./modelPlugin');
2
+ import ModelPlugin from './modelPlugin.js';
3
3
 
4
4
  class AzureTranslatePlugin extends ModelPlugin {
5
5
  constructor(config, pathway) {
@@ -37,4 +37,4 @@ class AzureTranslatePlugin extends ModelPlugin {
37
37
  }
38
38
  }
39
39
 
40
- module.exports = AzureTranslatePlugin;
40
+ export default AzureTranslatePlugin;
@@ -1,8 +1,9 @@
1
1
  // ModelPlugin.js
2
- const handlebars = require('handlebars');
3
- const { request } = require("../../lib/request");
4
- const { encode } = require("gpt-3-encoder");
5
- const { getFirstNToken } = require("../chunker");
2
+ import handlebars from 'handlebars';
3
+
4
+ import { request } from '../../lib/request.js';
5
+ import { encode } from 'gpt-3-encoder';
6
+ import { getFirstNToken } from '../chunker.js';
6
7
 
7
8
  const DEFAULT_MAX_TOKENS = 4096;
8
9
  const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
@@ -237,7 +238,7 @@ class ModelPlugin {
237
238
  const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
238
239
 
239
240
  if (responseData.error) {
240
- throw new Exception(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
241
+ throw new Error(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
241
242
  }
242
243
 
243
244
  this.logRequestData(data, responseData, prompt);
@@ -246,6 +247,6 @@ class ModelPlugin {
246
247
 
247
248
  }
248
249
 
249
- module.exports = ModelPlugin;
250
+ export default ModelPlugin;
250
251
 
251
252
 
@@ -1,5 +1,5 @@
1
1
  // OpenAIChatPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
2
+ import ModelPlugin from './modelPlugin.js';
3
3
 
4
4
  class OpenAIChatPlugin extends ModelPlugin {
5
5
  constructor(config, pathway) {
@@ -43,4 +43,4 @@ class OpenAIChatPlugin extends ModelPlugin {
43
43
  }
44
44
  }
45
45
 
46
- module.exports = OpenAIChatPlugin;
46
+ export default OpenAIChatPlugin;
@@ -1,6 +1,7 @@
1
1
  // OpenAICompletionPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
3
- const { encode } = require("gpt-3-encoder");
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ import { encode } from 'gpt-3-encoder';
4
5
 
5
6
  class OpenAICompletionPlugin extends ModelPlugin {
6
7
  constructor(config, pathway) {
@@ -66,5 +67,5 @@ class OpenAICompletionPlugin extends ModelPlugin {
66
67
  }
67
68
  }
68
69
 
69
- module.exports = OpenAICompletionPlugin;
70
+ export default OpenAICompletionPlugin;
70
71
 
@@ -1,9 +1,10 @@
1
1
  // OpenAICompletionPlugin.js
2
- const ModelPlugin = require('./modelPlugin');
3
- const FormData = require('form-data');
4
- const fs = require('fs');
5
- const { splitMediaFile, isValidYoutubeUrl, processYoutubeUrl, deleteTempPath } = require('../../lib/fileChunker');
6
- const pubsub = require('../pubsub');
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ import FormData from 'form-data';
5
+ import fs from 'fs';
6
+ import { splitMediaFile, isValidYoutubeUrl, processYoutubeUrl, deleteTempPath } from '../../lib/fileChunker.js';
7
+ import pubsub from '../pubsub.js';
7
8
 
8
9
  class OpenAIWhisperPlugin extends ModelPlugin {
9
10
  constructor(config, pathway) {
@@ -87,5 +88,5 @@ class OpenAIWhisperPlugin extends ModelPlugin {
87
88
  }
88
89
  }
89
90
 
90
- module.exports = OpenAIWhisperPlugin;
91
+ export default OpenAIWhisperPlugin;
91
92
 
package/graphql/prompt.js CHANGED
@@ -43,4 +43,4 @@ function promptContains(variable, prompt) {
43
43
  return variables.includes(variable);
44
44
  }
45
45
 
46
- module.exports = { Prompt, promptContains };
46
+ export { Prompt, promptContains };
package/graphql/pubsub.js CHANGED
@@ -1,4 +1,4 @@
1
- const { PubSub } = require('graphql-subscriptions');
1
+ import { PubSub } from 'graphql-subscriptions';
2
2
  const pubsub = new PubSub();
3
3
 
4
- module.exports = pubsub;
4
+ export default pubsub;
@@ -1,5 +1,5 @@
1
1
  const requestState = {}; // Stores the state of each request
2
2
 
3
- module.exports = {
3
+ export {
4
4
  requestState
5
5
  };
@@ -1,5 +1,5 @@
1
- const { fulfillWithTimeout } = require("../lib/promiser");
2
- const { PathwayResolver } = require("./pathwayResolver");
1
+ import { fulfillWithTimeout } from '../lib/promiser.js';
2
+ import { PathwayResolver } from './pathwayResolver.js';
3
3
 
4
4
  // This resolver uses standard parameters required by Apollo server:
5
5
  // (parent, args, contextValue, info)
@@ -38,6 +38,6 @@ const cancelRequestResolver = (parent, args, contextValue, info) => {
38
38
  return true
39
39
  }
40
40
 
41
- module.exports = {
41
+ export {
42
42
  resolver, rootResolver, cancelRequestResolver
43
- }
43
+ };
@@ -2,9 +2,10 @@
2
2
  // multi-server instance
3
3
  // See https://www.apollographql.com/docs/apollo-server/v3/data/subscriptions/#resolving-a-subscription
4
4
 
5
- const pubsub = require("./pubsub");
6
- const { withFilter } = require("graphql-subscriptions");
7
- const { requestState } = require("./requestState");
5
+ import pubsub from './pubsub.js';
6
+
7
+ import { withFilter } from 'graphql-subscriptions';
8
+ import { requestState } from './requestState.js';
8
9
 
9
10
  const subscriptions = {
10
11
  requestProgress: {
@@ -31,4 +32,4 @@ const subscriptions = {
31
32
  },
32
33
  };
33
34
 
34
- module.exports = subscriptions;
35
+ export default subscriptions;
@@ -3,59 +3,59 @@ const GRAPHQL_TYPE_MAP = {
3
3
  string: 'String',
4
4
  number: 'Int',
5
5
  };
6
-
7
- const typeDef = (pathway) => {
8
- const { name, objName, defaultInputParameters, inputParameters, format } = pathway;
9
-
10
- const fields = format ? format.match(/\b(\w+)\b/g) : null;
11
- const fieldsStr = !fields ? `` : fields.map((f) => `${f}: String`).join('\n ');
12
-
13
- const typeName = fields ? `${objName}Result` : `String`;
14
- const messageType = `input Message { role: String, content: String }`;
15
-
16
- const type = fields ? `type ${typeName} {
17
- ${fieldsStr}
18
- }` : ``;
19
-
20
- const resultStr = pathway.list ? `[${typeName}]` : typeName;
21
-
22
- const responseType = `type ${objName} {
23
- debug: String
24
- result: ${resultStr}
25
- previousResult: String
26
- warnings: [String]
27
- contextId: String
28
- }`;
29
-
30
- const params = { ...defaultInputParameters, ...inputParameters };
31
-
32
- const paramsStr = Object.entries(params)
33
- .map(([key, value]) => {
34
- if (typeof value === 'object' && Array.isArray(value)) {
35
- return `${key}: [Message] = []`;
36
- } else {
37
- return `${key}: ${GRAPHQL_TYPE_MAP[typeof value]} = ${
38
- typeof value === 'string' ? `"${value}"` : value
39
- }`;
40
- }
41
- })
42
- .join('\n');
43
-
44
- const restDefinition = Object.entries(params).map(([key, value]) => {
45
- return {
46
- name: key,
47
- type: `${GRAPHQL_TYPE_MAP[typeof value]}${typeof value === 'object' && Array.isArray(value) ? '[]' : ''}`,
48
- };
49
- });
50
-
51
- const gqlDefinition = `${messageType}\n\n${type}\n\n${responseType}\n\nextend type Query {${name}(${paramsStr}): ${objName}}`;
52
-
6
+
7
+ const typeDef = (pathway) => {
8
+ const { name, objName, defaultInputParameters, inputParameters, format } = pathway;
9
+
10
+ const fields = format ? format.match(/\b(\w+)\b/g) : null;
11
+ const fieldsStr = !fields ? `` : fields.map((f) => `${f}: String`).join('\n ');
12
+
13
+ const typeName = fields ? `${objName}Result` : `String`;
14
+ const messageType = `input Message { role: String, content: String }`;
15
+
16
+ const type = fields ? `type ${typeName} {
17
+ ${fieldsStr}
18
+ }` : ``;
19
+
20
+ const resultStr = pathway.list ? `[${typeName}]` : typeName;
21
+
22
+ const responseType = `type ${objName} {
23
+ debug: String
24
+ result: ${resultStr}
25
+ previousResult: String
26
+ warnings: [String]
27
+ contextId: String
28
+ }`;
29
+
30
+ const params = { ...defaultInputParameters, ...inputParameters };
31
+
32
+ const paramsStr = Object.entries(params)
33
+ .map(([key, value]) => {
34
+ if (typeof value === 'object' && Array.isArray(value)) {
35
+ return `${key}: [Message] = []`;
36
+ } else {
37
+ return `${key}: ${GRAPHQL_TYPE_MAP[typeof value]} = ${
38
+ typeof value === 'string' ? `"${value}"` : value
39
+ }`;
40
+ }
41
+ })
42
+ .join('\n');
43
+
44
+ const restDefinition = Object.entries(params).map(([key, value]) => {
53
45
  return {
54
- gqlDefinition,
55
- restDefinition,
46
+ name: key,
47
+ type: `${GRAPHQL_TYPE_MAP[typeof value]}${typeof value === 'object' && Array.isArray(value) ? '[]' : ''}`,
56
48
  };
49
+ });
50
+
51
+ const gqlDefinition = `${messageType}\n\n${type}\n\n${responseType}\n\nextend type Query {${name}(${paramsStr}): ${objName}}`;
52
+
53
+ return {
54
+ gqlDefinition,
55
+ restDefinition,
57
56
  };
58
-
59
- module.exports = {
60
- typeDef,
61
- };
57
+ };
58
+
59
+ export {
60
+ typeDef,
61
+ };
package/index.js CHANGED
@@ -1,7 +1,7 @@
1
- const { config } = require('./config');
2
- const { build } = require('./graphql/graphql');
1
+ import { config } from './config.js';
2
+ import { build } from './graphql/graphql.js';
3
3
 
4
- module.exports = (configParams) => {
4
+ export default async (configParams) => {
5
5
  configParams && config.load(configParams);
6
- return build(config);
7
- }
6
+ return await build(config);
7
+ };
@@ -1,14 +1,18 @@
1
- const fs = require('fs');
2
- const ffmpegPath = require('@ffmpeg-installer/ffmpeg').path;
3
- const ffmpeg = require('fluent-ffmpeg');
1
+ import fs from 'fs';
2
+ import { path as ffmpegPath } from '@ffmpeg-installer/ffmpeg';
3
+ import ffmpeg from 'fluent-ffmpeg';
4
4
  ffmpeg.setFfmpegPath(ffmpegPath);
5
- const path = require('path');
6
- const { v4: uuidv4 } = require('uuid');
7
- const os = require('os');
8
- const util = require('util');
5
+ import path from 'path';
6
+ import { v4 as uuidv4 } from 'uuid';
7
+ import os from 'os';
8
+ import util from 'util';
9
+ import { pipeline } from 'stream';
10
+
9
11
  const ffmpegProbe = util.promisify(ffmpeg.ffprobe);
10
- const pipeline = util.promisify(require('stream').pipeline);
11
- const ytdl = require('ytdl-core');
12
+
13
+ const cPipeline = util.promisify(pipeline);
14
+
15
+ import ytdl from 'ytdl-core';
12
16
 
13
17
 
14
18
  async function processChunk(inputPath, outputFileName, start, duration) {
@@ -117,7 +121,7 @@ function convertYoutubeToMp3Stream(video) {
117
121
 
118
122
  async function pipeStreamToFile(stream, filePath) {
119
123
  try {
120
- await pipeline(stream, fs.createWriteStream(filePath));
124
+ await cPipeline(stream, fs.createWriteStream(filePath));
121
125
  console.log('Stream piped to file successfully.');
122
126
  } catch (error) {
123
127
  console.error(`Error piping stream to file: ${error.message}`);
@@ -151,6 +155,6 @@ function deleteFile(filePath) {
151
155
  }
152
156
  }
153
157
 
154
- module.exports = {
158
+ export {
155
159
  splitMediaFile, deleteTempPath, processYoutubeUrl, isValidYoutubeUrl
156
160
  };
@@ -1,5 +1,5 @@
1
- const Keyv = require('keyv');
2
- const { config } = require('../config');
1
+ import Keyv from 'keyv';
2
+ import { config } from '../config.js';
3
3
 
4
4
  const storageConnectionString = config.get('storageConnectionString');
5
5
 
@@ -18,15 +18,15 @@ const keyValueStorageClient = new Keyv(storageConnectionString, {
18
18
 
19
19
  // Set values to keyv
20
20
  async function setv(key, value) {
21
- return (keyValueStorageClient && await keyValueStorageClient.set(key, value));
21
+ return keyValueStorageClient && (await keyValueStorageClient.set(key, value));
22
22
  }
23
23
 
24
24
  // Get values from keyv
25
25
  async function getv(key) {
26
- return (keyValueStorageClient && await keyValueStorageClient.get(key));
26
+ return keyValueStorageClient && (await keyValueStorageClient.get(key));
27
27
  }
28
28
 
29
- module.exports = {
29
+ export {
30
30
  keyValueStorageClient,
31
31
  setv,
32
32
  getv
package/lib/promiser.js CHANGED
@@ -19,6 +19,6 @@ const fulfillWithTimeout = (promise, timeout) => {
19
19
  };
20
20
 
21
21
 
22
- module.exports = {
22
+ export {
23
23
  fulfillWithTimeout
24
- }
24
+ };
package/lib/request.js CHANGED
@@ -1,12 +1,14 @@
1
- const Bottleneck = require("bottleneck/es5");
2
- const RequestMonitor = require('./requestMonitor');
3
- const { config } = require('../config');
4
- let axios = require('axios');
1
+ import Bottleneck from 'bottleneck/es5.js';
2
+ import RequestMonitor from './requestMonitor.js';
3
+ import { config } from '../config.js';
4
+ import axios from 'axios';
5
+ import { setupCache } from 'axios-cache-interceptor';
6
+
7
+ let cortexAxios = axios;
5
8
 
6
9
  if (config.get('enableCache')) {
7
10
  // Setup cache
8
- const { setupCache } = require('axios-cache-interceptor');
9
- axios = setupCache(axios, {
11
+ cortexAxios = setupCache(axios, {
10
12
  // enable cache for all requests by default
11
13
  methods: ['get', 'post', 'put', 'delete', 'patch'],
12
14
  interpretHeader: false,
@@ -57,7 +59,7 @@ setInterval(() => {
57
59
  const postWithMonitor = async (model, url, data, axiosConfigObj) => {
58
60
  const monitor = monitors[model];
59
61
  monitor.incrementCallCount();
60
- return axios.post(url, data, axiosConfigObj);
62
+ return cortexAxios.post(url, data, axiosConfigObj);
61
63
  }
62
64
 
63
65
  const MAX_RETRY = 10;
@@ -103,6 +105,6 @@ const request = async (params, model) => {
103
105
  return data;
104
106
  }
105
107
 
106
- module.exports = {
108
+ export {
107
109
  request, postRequest, buildLimiters
108
- }
110
+ };
@@ -38,6 +38,6 @@ class RequestMonitor {
38
38
  this.startTime = new Date();
39
39
  }
40
40
  }
41
-
42
- module.exports = RequestMonitor;
41
+
42
+ export default RequestMonitor;
43
43
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "0.0.10",
3
+ "version": "0.0.11",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -25,6 +25,7 @@
25
25
  },
26
26
  "author": "",
27
27
  "license": "MIT",
28
+ "type": "module",
28
29
  "homepage": "https://github.com/aj-archipelago/cortex#readme",
29
30
  "dependencies": {
30
31
  "@apollo/utils.keyvadapter": "^1.1.2",
@@ -49,6 +50,7 @@
49
50
  "graphql-ws": "^5.11.2",
50
51
  "handlebars": "^4.7.7",
51
52
  "keyv": "^4.5.2",
53
+ "langchain": "^0.0.47",
52
54
  "ws": "^8.12.0",
53
55
  "ytdl-core": "^4.11.2"
54
56
  },
@@ -57,7 +59,7 @@
57
59
  "dotenv": "^16.0.3"
58
60
  },
59
61
  "publishConfig": {
60
- "access": "private"
62
+ "access": "restricted"
61
63
  },
62
64
  "ava": {
63
65
  "files": [
@@ -1,8 +1,8 @@
1
- const { rootResolver, resolver } = require("../graphql/resolver");
2
- const { typeDef } = require('../graphql/typeDef')
1
+ import { rootResolver, resolver } from '../graphql/resolver.js';
2
+ import { typeDef } from '../graphql/typeDef.js';
3
3
 
4
4
  // all default definitions of a single pathway
5
- module.exports = {
5
+ export default {
6
6
  prompt: `{{text}}`,
7
7
  defaultInputParameters: {
8
8
  text: ``,
@@ -19,4 +19,5 @@ module.exports = {
19
19
  useInputSummarization: false,
20
20
  truncateFromFront: false,
21
21
  timeout: 120, // in seconds
22
- }
22
+ };
23
+
package/pathways/bias.js CHANGED
@@ -2,9 +2,9 @@
2
2
  // Objectivity analysis of text
3
3
  // This module exports a prompt that analyzes the given text and determines if it's written objectively. It also provides a detailed explanation of the decision.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  // Uncomment the following line to enable caching for this prompt, if desired.
7
7
  // enableCache: true,
8
8
 
9
9
  prompt: `{{text}}\n\nIs the above text written objectively? Why or why not, explain with details:\n`
10
- }
10
+ };
package/pathways/chat.js CHANGED
@@ -2,7 +2,7 @@
2
2
  // Simple context-aware chat bot
3
3
  // This is a two prompt implementation of a context aware chat bot. The first prompt generates content that will be stored in the previousResult variable and will be returned to the client. In the optimum implementation, the client will then update their chatContext variable for the next call. The second prompt actually responds to the user. The second prompt *could* use previousResult instead of chatContext, but in this situation previousResult will also include the current turn of the conversation to which it is responding. That can get a little confusing as it tends to overemphasize the current turn in the response.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  prompt:
7
7
  [
8
8
  `{{{chatContext}}}\n\n{{{text}}}\n\nGiven the information above, create a short summary of the conversation to date making sure to include all of the personal details about the user that you encounter:\n\n`,
@@ -12,4 +12,5 @@ module.exports = {
12
12
  chatContext: `User: Starting conversation.`,
13
13
  },
14
14
  useInputChunking: false,
15
- }
15
+ };
16
+
@@ -2,6 +2,8 @@
2
2
  // Text completion module
3
3
  // This module exports a prompt that takes an input text and completes it by generating a continuation of the given text.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  prompt: `Continue and complete the following:\n\n{{text}}`
7
- }
7
+ };
8
+
9
+
package/pathways/edit.js CHANGED
@@ -2,9 +2,10 @@
2
2
  // Grammar and spelling correction module
3
3
  // This module exports a prompt that takes an input text and corrects all spelling and grammar errors found within the text.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  // Set the temperature to 0 to favor more deterministic output when generating corrections.
7
7
  temperature: 0,
8
8
 
9
9
  prompt: `Correct all spelling and grammar errors in the input text.\n\nInput:\n{{text}}\n\nOutput:\n`
10
- }
10
+ };
11
+
@@ -2,7 +2,7 @@
2
2
  // Entity extraction module
3
3
  // This module exports a prompt that takes an input text and extracts the top entities and their definitions as specified by the count parameter.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  // Set the temperature to 0 to favor more deterministic output when generating entity extraction.
7
7
  temperature: 0,
8
8
 
@@ -18,4 +18,5 @@ module.exports = {
18
18
 
19
19
  // Set the list option to true as the prompt is expected to return a list of entities.
20
20
  list: true,
21
- }
21
+ };
22
+
package/pathways/index.js CHANGED
@@ -1,12 +1,25 @@
1
- module.exports = {
2
- "edit": require('./edit'),
3
- "chat": require('./chat'),
4
- "bias": require('./bias'),
5
- "complete": require('./complete'),
6
- "entities": require('./entities'),
7
- "paraphrase": require('./paraphrase'),
8
- "sentiment": require('./sentiment'),
9
- "summary": require('./summary'),
10
- "transcribe": require('./transcribe'),
11
- "translate": require('./translate'),
12
- }
1
+ import edit from './edit.js';
2
+ import chat from './chat.js';
3
+ import bias from './bias.js';
4
+ import complete from './complete.js';
5
+ import entities from './entities.js';
6
+ import lc_test from './lc_test.mjs';
7
+ import paraphrase from './paraphrase.js';
8
+ import sentiment from './sentiment.js';
9
+ import summary from './summary.js';
10
+ import transcribe from './transcribe.js';
11
+ import translate from './translate.js';
12
+
13
+ export {
14
+ edit,
15
+ chat,
16
+ bias,
17
+ complete,
18
+ entities,
19
+ lc_test,
20
+ paraphrase,
21
+ sentiment,
22
+ summary,
23
+ transcribe,
24
+ translate
25
+ };
@@ -0,0 +1,99 @@
1
+ // lc_test.js
2
+ // LangChain Cortex integration test
3
+
4
+ // Import required modules
5
+ import { OpenAI } from "langchain/llms";
6
+ import { PromptTemplate } from "langchain/prompts";
7
+ import { LLMChain, ConversationChain } from "langchain/chains";
8
+ import { initializeAgentExecutor } from "langchain/agents";
9
+ import { SerpAPI, Calculator } from "langchain/tools";
10
+ import { BufferMemory } from "langchain/memory";
11
+
12
+ export default {
13
+
14
+ // Agent test case
15
+ resolver: async (parent, args, contextValue, info) => {
16
+
17
+ const { config } = contextValue;
18
+ const openAIApiKey = config.get('openaiApiKey');
19
+ const serpApiKey = config.get('serpApiKey');
20
+
21
+ const model = new OpenAI({ openAIApiKey: openAIApiKey, temperature: 0 });
22
+ const tools = [new SerpAPI( serpApiKey ), new Calculator()];
23
+
24
+ const executor = await initializeAgentExecutor(
25
+ tools,
26
+ model,
27
+ "zero-shot-react-description"
28
+ );
29
+
30
+ console.log(`====================`);
31
+ console.log("Loaded langchain agent.");
32
+ const input = args.text;
33
+ console.log(`Executing with input "${input}"...`);
34
+ const result = await executor.call({ input });
35
+ console.log(`Got output ${result.output}`);
36
+ console.log(`====================`);
37
+
38
+ return result?.output;
39
+ },
40
+
41
+ /*
42
+ // Agent test case
43
+ resolver: async (parent, args, contextValue, info) => {
44
+
45
+ const { config } = contextValue;
46
+ const openAIApiKey = config.get('openaiApiKey');
47
+ const serpApiKey = config.get('serpApiKey');
48
+
49
+ const model = new OpenAI({ openAIApiKey: openAIApiKey, temperature: 0 });
50
+ const tools = [new SerpAPI( serpApiKey ), new Calculator()];
51
+
52
+ const executor = await initializeAgentExecutor(
53
+ tools,
54
+ model,
55
+ "zero-shot-react-description"
56
+ );
57
+
58
+ console.log(`====================`);
59
+ console.log("Loaded langchain agent.");
60
+ const input = args.text;
61
+ console.log(`Executing with input "${input}"...`);
62
+ const result = await executor.call({ input });
63
+ console.log(`Got output ${result.output}`);
64
+ console.log(`====================`);
65
+
66
+ return result?.output;
67
+ },
68
+ */
69
+ // Simplest test case
70
+ /*
71
+ resolver: async (parent, args, contextValue, info) => {
72
+
73
+ const { config } = contextValue;
74
+ const openAIApiKey = config.get('openaiApiKey');
75
+
76
+ const model = new OpenAI({ openAIApiKey: openAIApiKey, temperature: 0.9 });
77
+
78
+ const template = "What is a good name for a company that makes {product}?";
79
+
80
+ const prompt = new PromptTemplate({
81
+ template: template,
82
+ inputVariables: ["product"],
83
+ });
84
+
85
+ const chain = new LLMChain({ llm: model, prompt: prompt });
86
+
87
+ console.log(`====================`);
88
+ console.log(`Calling langchain with prompt: ${prompt?.template}`);
89
+ console.log(`Input text: ${args.text}`);
90
+ const res = await chain.call({ product: args.text });
91
+ console.log(`Result: ${res?.text}`);
92
+ console.log(`====================`);
93
+
94
+ return res?.text?.trim();
95
+ },
96
+ */
97
+ };
98
+
99
+
@@ -2,6 +2,7 @@
2
2
  // Paraphrasing module
3
3
  // This module exports a prompt that takes an input text and rewrites it in a different way while maintaining the original meaning.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  prompt: `Rewrite the following:\n\n{{{text}}}`
7
- }
7
+ };
8
+
@@ -2,6 +2,7 @@
2
2
  // Sentiment detection module
3
3
  // This module exports a prompt that takes an input text and asks how it makes the AI feel.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  prompt: `How does the text below make you feel?\n\n{{text}}`,
7
- }
7
+ };
8
+
@@ -3,10 +3,10 @@
3
3
  // This module exports a prompt that takes an input text and generates a summary using a custom resolver.
4
4
 
5
5
  // Import required modules
6
- const { semanticTruncate } = require('../graphql/chunker');
7
- const { PathwayResolver } = require('../graphql/pathwayResolver');
6
+ import { semanticTruncate } from '../graphql/chunker.js';
7
+ import { PathwayResolver } from '../graphql/pathwayResolver.js';
8
8
 
9
- module.exports = {
9
+ export default {
10
10
  // The main prompt function that takes the input text and asks to generate a summary.
11
11
  prompt: `{{{text}}}\n\nWrite a summary of the above text. If the text is in a language other than english, make sure the summary is written in the same language:\n\n`,
12
12
 
@@ -72,4 +72,6 @@ module.exports = {
72
72
  return summary;
73
73
  }
74
74
  }
75
- }
75
+ };
76
+
77
+
@@ -1,8 +1,10 @@
1
- module.exports = {
1
+ export default {
2
2
  prompt: `{{text}}`,
3
3
  model: `oai-whisper`,
4
4
  inputParameters: {
5
5
  file: ``,
6
6
  },
7
7
  timeout: 600, // in seconds
8
- }
8
+ };
9
+
10
+
@@ -2,7 +2,7 @@
2
2
  // Translation module
3
3
  // This module exports a prompt that takes an input text and translates it from one language to another.
4
4
 
5
- module.exports = {
5
+ export default {
6
6
  // Set the temperature to 0 to favor more deterministic output when generating translations.
7
7
  temperature: 0,
8
8
 
@@ -15,4 +15,5 @@ module.exports = {
15
15
 
16
16
  // Set the timeout for the translation process, in seconds.
17
17
  timeout: 300,
18
- }
18
+ };
19
+
package/start.js CHANGED
@@ -1,3 +1,6 @@
1
- const { startServer } = require('./index')();
1
+ import startServerFactory from './index.js';
2
2
 
3
- startServer && startServer();
3
+ (async () => {
4
+ const { startServer } = await startServerFactory();
5
+ startServer && startServer();
6
+ })();
@@ -1,7 +1,6 @@
1
- const test = require('ava');
2
-
3
- const { getSemanticChunks } = require('../graphql/chunker');
4
- const { encode } = require('gpt-3-encoder')
1
+ import test from 'ava';
2
+ import { getSemanticChunks } from '../graphql/chunker.js';
3
+ import { encode } from 'gpt-3-encoder';
5
4
 
6
5
  const testText = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. In id erat sem. Phasellus ac dapibus purus, in fermentum nunc. Mauris quis rutrum magna. Quisque rutrum, augue vel blandit posuere, augue magna convallis turpis, nec elementum augue mauris sit amet nunc. Aenean sit amet leo est. Nunc ante ex, blandit et felis ut, iaculis lacinia est. Phasellus dictum orci id libero ullamcorper tempor.
7
6
 
@@ -1,7 +1,12 @@
1
- const test = require('ava');
2
- const { getTestServer } = require('./main.test');
1
+ import test from 'ava';
2
+ import { getTestServer, initTypeDefsResolvers } from './main.test.js';
3
3
 
4
- const testServer = getTestServer();
4
+ let testServer;
5
+
6
+ test.before(async () => {
7
+ await initTypeDefsResolvers();
8
+ testServer = getTestServer();
9
+ });
5
10
 
6
11
  test.after.always(async () => {
7
12
  await testServer.stop();
@@ -1,22 +1,35 @@
1
- const test = require('ava');
1
+ import test from 'ava';
2
+ import { ApolloServer } from 'apollo-server';
3
+ import { config } from '../config.js';
4
+ import typeDefsresolversFactory from '../index.js';
2
5
 
3
- const { ApolloServer } = require('apollo-server');
4
- const { config } = require('../config');
5
- const { typeDefs, resolvers } = require('../index')();
6
+ let typeDefs;
7
+ let resolvers;
6
8
 
7
- const getTestServer = () => {
8
- return new ApolloServer({
9
- typeDefs,
10
- resolvers,
11
- context: () => ({ config, requestState: {} }),
12
- });
13
- }
9
+ const initTypeDefsResolvers = async () => {
10
+ const result = await typeDefsresolversFactory();
11
+ typeDefs = result.typeDefs;
12
+ resolvers = result.resolvers;
13
+ };
14
14
 
15
- const testServer = getTestServer();
15
+ const getTestServer = () => {
16
+ return new ApolloServer({
17
+ typeDefs,
18
+ resolvers,
19
+ context: () => ({ config, requestState: {} }),
20
+ });
21
+ };
22
+
23
+ let testServer;
24
+
25
+ test.before(async () => {
26
+ await initTypeDefsResolvers();
27
+ testServer = getTestServer();
28
+ });
16
29
 
17
30
  //stop server after all tests
18
31
  test.after.always('cleanup', async () => {
19
- await testServer.stop();
32
+ await testServer.stop();
20
33
  });
21
34
 
22
35
  test('validates bias endpoint', async (t) => {
@@ -93,6 +106,7 @@ test('validates summary endpoint', async (t) => {
93
106
  t.truthy(response.data?.summary.result);
94
107
  });
95
108
 
96
- module.exports = {
109
+ export {
110
+ initTypeDefsResolvers,
97
111
  getTestServer,
98
112
  };
@@ -1,9 +1,13 @@
1
- const test = require('ava');
2
- const { getTestServer } = require('./main.test');
1
+ import test from 'ava';
2
+ import { getTestServer, initTypeDefsResolvers } from './main.test.js';
3
3
 
4
- const testServer = getTestServer();
4
+ let testServer;
5
+
6
+ test.before(async () => {
7
+ await initTypeDefsResolvers();
8
+ testServer = getTestServer();
9
+ });
5
10
 
6
- // stop server after all tests
7
11
  test.after.always(async () => {
8
12
  await testServer.stop();
9
13
  });