@aj-archipelago/cortex 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,183 @@
1
+ const { createServer } = require('http');
2
+ const {
3
+ ApolloServerPluginDrainHttpServer,
4
+ ApolloServerPluginLandingPageLocalDefault,
5
+ } = require("apollo-server-core");
6
+ const { makeExecutableSchema } = require('@graphql-tools/schema');
7
+ const { WebSocketServer } = require('ws');
8
+ const { useServer } = require('graphql-ws/lib/use/ws');
9
+ const express = require('express');
10
+
11
+ /// Create apollo graphql server
12
+ const Keyv = require("keyv");
13
+ const { KeyvAdapter } = require("@apollo/utils.keyvadapter");
14
+ const responseCachePlugin = require('apollo-server-plugin-response-cache').default
15
+
16
+ const subscriptions = require('./subscriptions');
17
+ const { buildLimiters } = require('../lib/request');
18
+ const { cancelRequestResolver } = require('./resolver');
19
+ const { buildPathways, buildModels } = require('../config');
20
+
21
+ const requestState = {}; // Stores the state of each request
22
+
23
+ const getPlugins = (config) => {
24
+ // server plugins
25
+ const plugins = [
26
+ ApolloServerPluginLandingPageLocalDefault({ embed: true }), // For local development.
27
+ ];
28
+
29
+ //if cache is enabled and Redis is available, use it
30
+ let cache;
31
+ if (config.get('enableCache') && config.get('storageConnectionString')) {
32
+ cache = new KeyvAdapter(new Keyv(config.get('storageConnectionString'),{
33
+ ssl: true,
34
+ abortConnect: false,
35
+ }));
36
+ //caching similar strings, embedding hashing, ... #delta similarity
37
+ // TODO: custom cache key:
38
+ // https://www.apollographql.com/docs/apollo-server/performance/cache-backends#implementing-your-own-cache-backend
39
+ plugins.push(responseCachePlugin({ cache }));
40
+ }
41
+
42
+ return { plugins, cache };
43
+ }
44
+
45
+ //typeDefs
46
+ const getTypedefs = (pathways) => {
47
+
48
+ const defaultTypeDefs = `#graphql
49
+ enum CacheControlScope {
50
+ PUBLIC
51
+ PRIVATE
52
+ }
53
+
54
+ directive @cacheControl(
55
+ maxAge: Int
56
+ scope: CacheControlScope
57
+ inheritMaxAge: Boolean
58
+ ) on FIELD_DEFINITION | OBJECT | INTERFACE | UNION
59
+
60
+ type Query {
61
+ _ : Boolean
62
+ }
63
+
64
+ type Mutation {
65
+ cancelRequest(requestId: String!): Boolean
66
+ }
67
+
68
+ type RequestSubscription {
69
+ requestId: String
70
+ progress: Float
71
+ data: String
72
+ }
73
+
74
+ type Subscription {
75
+ requestProgress(requestId: String!): RequestSubscription
76
+ }
77
+ `;
78
+
79
+ const typeDefs = [defaultTypeDefs, ...Object.values(pathways).filter(p=>!p.disabled).map(p => p.typeDef(p))];
80
+ return typeDefs.join('\n');
81
+ }
82
+
83
+ const getResolvers = (config, pathways) => {
84
+ const resolverFunctions = {};
85
+ for (const [name, pathway] of Object.entries(pathways)) {
86
+ if (pathway.disabled) continue; //skip disabled pathways
87
+ resolverFunctions[name] = (parent, args, contextValue, info) => {
88
+ // add shared state to contextValue
89
+ contextValue.pathway = pathway;
90
+ return pathway.rootResolver(parent, args, contextValue, info);
91
+ }
92
+ }
93
+
94
+ const resolvers = {
95
+ Query: resolverFunctions,
96
+ Mutation: { 'cancelRequest': cancelRequestResolver },
97
+ Subscription: subscriptions,
98
+ }
99
+
100
+ return resolvers;
101
+ }
102
+
103
+ //graphql api build factory method
104
+ const build = (config) => {
105
+ // First perform config build
106
+ buildPathways(config);
107
+ buildModels(config);
108
+
109
+ // build api limiters
110
+ buildLimiters(config);
111
+
112
+ //build api
113
+ const pathways = config.get('pathways');
114
+
115
+ const typeDefs = getTypedefs(pathways);
116
+ const resolvers = getResolvers(config, pathways);
117
+
118
+ const schema = makeExecutableSchema({ typeDefs, resolvers });
119
+
120
+ const { plugins, cache } = getPlugins(config);
121
+
122
+ const { ApolloServer, gql } = require('apollo-server-express');
123
+ const app = express()
124
+ const httpServer = createServer(app);
125
+
126
+ // Creating the WebSocket server
127
+ const wsServer = new WebSocketServer({
128
+ // This is the `httpServer` we created in a previous step.
129
+ server: httpServer,
130
+ // Pass a different path here if your ApolloServer serves at
131
+ // a different path.
132
+ path: '/graphql',
133
+ });
134
+
135
+ // Hand in the schema we just created and have the
136
+ // WebSocketServer start listening.
137
+ const serverCleanup = useServer({ schema }, wsServer);
138
+
139
+ const server = new ApolloServer({
140
+ schema,
141
+ csrfPrevention: true,
142
+ plugins: plugins.concat([// Proper shutdown for the HTTP server.
143
+ ApolloServerPluginDrainHttpServer({ httpServer }),
144
+
145
+ // Proper shutdown for the WebSocket server.
146
+ {
147
+ async serverWillStart() {
148
+ return {
149
+ async drainServer() {
150
+ await serverCleanup.dispose();
151
+ },
152
+ };
153
+ },
154
+ }]),
155
+ context: ({ req, res }) => ({ req, res, config, requestState }),
156
+ });
157
+
158
+ // if local start server
159
+ const startServer = async () => {
160
+ await server.start();
161
+ server.applyMiddleware({ app });
162
+
163
+ // Now that our HTTP server is fully set up, we can listen to it.
164
+ httpServer.listen(config.get('PORT'), () => {
165
+ console.log(`🚀 Server is now running at http://localhost:${config.get('PORT')}${server.graphqlPath}`);
166
+ });
167
+ };
168
+
169
+ app.use((req, res, next) => {
170
+ if (process.env.API_KEY && req.headers.api_key !== process.env.API_KEY && req.query.api_key !== process.env.API_KEY) {
171
+ res.status(401).send('Unauthorized');
172
+ }
173
+
174
+ next();
175
+ })
176
+
177
+ return { server, startServer, cache, plugins, typeDefs, resolvers }
178
+ }
179
+
180
+
181
+ module.exports = {
182
+ build
183
+ };
@@ -0,0 +1,58 @@
1
+ //simples form string single or list return
2
+ const getResponseResult = (data) => {
3
+ const { choices } = data;
4
+ if (!choices || !choices.length) {
5
+ return; //TODO no choices case
6
+ }
7
+
8
+ // if we got a choices array back with more than one choice, return the whole array
9
+ if (choices.length > 1) {
10
+ return choices;
11
+ }
12
+
13
+ // otherwise, return the first choice
14
+ const textResult = choices[0].text && choices[0].text.trim();
15
+ const messageResult = choices[0].message && choices[0].message.content && choices[0].message.content.trim();
16
+
17
+ return messageResult || textResult || null;
18
+ }
19
+
20
+ //simply trim and parse with given regex
21
+ const regexParser = (text, regex) => {
22
+ return text.trim().split(regex).map(s => s.trim()).filter(s => s.length);
23
+ }
24
+
25
+ // parse numbered list text format into list
26
+ // this supports most common numbered list returns like "1.", "1)", "1-"
27
+ const parseNumberedList = (str) => {
28
+ return regexParser(str, /^\s*[\[\{\(]*\d+[\s.=\-:,;\]\)\}]/gm);
29
+ }
30
+
31
+ // parse a numbered object list text format into list of objects
32
+ const parseNumberedObjectList = (text, format) => {
33
+ const fields = format.match(/\b(\w+)\b/g);
34
+ const values = parseNumberedList(text);
35
+
36
+ const result = [];
37
+ for (const value of values) {
38
+ try {
39
+ const splitted = regexParser(value, /[:-](.*)/);
40
+ const obj = {};
41
+ for (let i = 0; i < fields.length; i++) {
42
+ obj[fields[i]] = splitted[i];
43
+ }
44
+ result.push(obj);
45
+ } catch (e) {
46
+ console.warn(`Failed to parse value in parseNumberedObjectList, value: ${value}, fields: ${fields}`);
47
+ }
48
+ }
49
+
50
+ return result;
51
+ }
52
+
53
+ module.exports = {
54
+ getResponseResult,
55
+ regexParser,
56
+ parseNumberedList,
57
+ parseNumberedObjectList
58
+ };
@@ -0,0 +1,145 @@
1
+ const { request } = require("../lib/request");
2
+ const handlebars = require("handlebars");
3
+ const { getResponseResult } = require("./parser");
4
+ const { Exception } = require("handlebars");
5
+ const { encode } = require("gpt-3-encoder");
6
+
7
+ const DEFAULT_MAX_TOKENS = 4096;
8
+ const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
9
+
10
+ // register functions that can be called directly in the prompt markdown
11
+ handlebars.registerHelper('stripHTML', function(value) {
12
+ return value.replace(/<[^>]*>/g, '');
13
+ });
14
+
15
+ handlebars.registerHelper('now', function() {
16
+ return new Date().toISOString();
17
+ });
18
+
19
+ class PathwayPrompter {
20
+ constructor({ config, pathway }) {
21
+ // If the pathway specifies a model, use that, otherwise use the default
22
+ this.modelName = pathway.model || config.get('defaultModelName');
23
+ // Get the model from the config
24
+ this.model = config.get('models')[this.modelName];
25
+ // If the model doesn't exist, throw an exception
26
+ if (!this.model) {
27
+ throw new Exception(`Model ${this.modelName} not found in config`);
28
+ }
29
+ this.environmentVariables = config.getEnv();
30
+ this.temperature = pathway.temperature;
31
+ this.pathwayPrompt = pathway.prompt;
32
+ this.pathwayName = pathway.name;
33
+ this.promptParameters = {}
34
+ // Make all of the parameters defined on the pathway itself available to the prompt
35
+ for (const [k, v] of Object.entries(pathway)) {
36
+ this.promptParameters[k] = v.default ?? v;
37
+ }
38
+ if (pathway.inputParameters) {
39
+ for (const [k, v] of Object.entries(pathway.inputParameters)) {
40
+ this.promptParameters[k] = v.default ?? v;
41
+ }
42
+ }
43
+ this.requestCount = 1
44
+ }
45
+
46
+ getModelMaxTokenLength() {
47
+ return (this.promptParameters.maxTokenLength ?? this.model.maxTokenLength ?? DEFAULT_MAX_TOKENS);
48
+ }
49
+
50
+ getPromptTokenRatio() {
51
+ return this.promptParameters.inputParameters.tokenRatio ?? this.promptParameters.tokenRatio ?? DEFAULT_PROMPT_TOKEN_RATIO;
52
+ }
53
+
54
+ requestUrl() {
55
+ const generateUrl = handlebars.compile(this.model.url);
56
+ return generateUrl({ ...this.model, ...this.environmentVariables, ...this.config });
57
+ }
58
+
59
+ requestParameters(text, parameters, prompt) {
60
+ // the prompt object will either have a messages property or a prompt propery
61
+ // or it could be a function that returns prompt text
62
+
63
+ const combinedParameters = { ...this.promptParameters, ...parameters };
64
+
65
+ // if it's a messages prompt, compile the messages and send them directly
66
+ // to the API - a messages prompt automatically means its a chat-style
67
+ // conversation
68
+ if (prompt.messages)
69
+ {
70
+ const compiledMessages = prompt.messages.map((message) => {
71
+ const compileText = handlebars.compile(message.content);
72
+ return { role: message.role,
73
+ content: compileText({...combinedParameters, text})
74
+ }
75
+ })
76
+
77
+ return {
78
+ messages: compiledMessages,
79
+ temperature: this.temperature ?? 0.7,
80
+ }
81
+ }
82
+
83
+ // otherwise, we need to get the prompt text
84
+ let promptText;
85
+
86
+ if (typeof (prompt) === 'function') {
87
+ promptText = prompt(parameters);
88
+ }
89
+ else {
90
+ promptText = prompt.prompt;
91
+ }
92
+
93
+ const interpolatePrompt = handlebars.compile(promptText);
94
+ const constructedPrompt = interpolatePrompt({ ...combinedParameters, text });
95
+
96
+ // this prompt could be for either a chat-style conversation or a completion-style
97
+ // conversation. They require different parameters.
98
+
99
+ let params = {};
100
+
101
+ if (this.model.type === 'OPENAI_CHAT') {
102
+ params = {
103
+ messages: [ {"role": "user", "content": constructedPrompt} ],
104
+ temperature: this.temperature ?? 0.7,
105
+ }
106
+ } else {
107
+ params = {
108
+ prompt: constructedPrompt,
109
+ max_tokens: this.getModelMaxTokenLength() - encode(constructedPrompt).length - 1,
110
+ // model: "text-davinci-002",
111
+ temperature: this.temperature ?? 0.7,
112
+ // "top_p": 1,
113
+ // "n": 1,
114
+ // "presence_penalty": 0,
115
+ // "frequency_penalty": 0,
116
+ // "best_of": 1,
117
+ }
118
+ }
119
+
120
+ return params;
121
+ }
122
+
123
+ async execute(text, parameters, prompt) {
124
+ const requestParameters = this.requestParameters(text, parameters, prompt);
125
+
126
+ const url = this.requestUrl(text);
127
+ const params = { ...(this.model.params || {}), ...requestParameters }
128
+ const headers = this.model.headers || {};
129
+ const data = await request({ url, params, headers }, this.modelName);
130
+ const modelInput = params.prompt || params.messages[0].content;
131
+ console.log(`=== ${this.pathwayName}.${this.requestCount++} ===`)
132
+ console.log(`\x1b[36m${modelInput}\x1b[0m`)
133
+ console.log(`\x1b[34m> ${getResponseResult(data)}\x1b[0m`)
134
+
135
+ if (data.error) {
136
+ throw new Exception(`An error was returned from the server: ${JSON.stringify(data.error)}`);
137
+ }
138
+
139
+ return getResponseResult(data);
140
+ }
141
+ }
142
+
143
+ module.exports = {
144
+ PathwayPrompter
145
+ }
@@ -0,0 +1,250 @@
1
+ const { PathwayPrompter } = require('./pathwayPrompter');
2
+ const {
3
+ v4: uuidv4,
4
+ } = require('uuid');
5
+ const pubsub = require('./pubsub');
6
+ const { encode } = require('gpt-3-encoder')
7
+ const { getFirstNToken, getLastNToken, getSemanticChunks } = require('./chunker');
8
+ const { PathwayResponseParser } = require('./pathwayResponseParser');
9
+ const { Prompt } = require('./prompt');
10
+ const { getv, setv } = require('../lib/keyValueStorageClient');
11
+
12
+ const MAX_PREVIOUS_RESULT_TOKEN_LENGTH = 1000;
13
+
14
+ const callPathway = async (config, pathwayName, requestState, { text, ...parameters }) => {
15
+ const pathwayResolver = new PathwayResolver({ config, pathway: config.get(`pathways.${pathwayName}`), requestState });
16
+ return await pathwayResolver.resolve({ text, ...parameters });
17
+ }
18
+
19
+ class PathwayResolver {
20
+ constructor({ config, pathway, requestState }) {
21
+ this.config = config;
22
+ this.requestState = requestState;
23
+ this.pathway = pathway;
24
+ this.useInputChunking = pathway.useInputChunking;
25
+ this.chunkMaxTokenLength = 0;
26
+ this.warnings = [];
27
+ this.requestId = uuidv4();
28
+ this.responseParser = new PathwayResponseParser(pathway);
29
+ this.pathwayPrompter = new PathwayPrompter({ config, pathway });
30
+ this.previousResult = '';
31
+ this.prompts = [];
32
+ this._pathwayPrompt = '';
33
+
34
+ Object.defineProperty(this, 'pathwayPrompt', {
35
+ get() {
36
+ return this._pathwayPrompt;
37
+ },
38
+ set(value) {
39
+ this._pathwayPrompt = value;
40
+ if (!Array.isArray(this._pathwayPrompt)) {
41
+ this._pathwayPrompt = [this._pathwayPrompt];
42
+ }
43
+ this.prompts = this._pathwayPrompt.map(p => (p instanceof Prompt) ? p : new Prompt({ prompt:p }));
44
+ this.chunkMaxTokenLength = this.getChunkMaxTokenLength();
45
+ }
46
+ });
47
+
48
+ this.pathwayPrompt = pathway.prompt;
49
+ }
50
+
51
+ async resolve(args) {
52
+ if (args.async) {
53
+ // Asynchronously process the request
54
+ this.promptAndParse(args).then((data) => {
55
+ this.requestState[this.requestId].data = data;
56
+ pubsub.publish('REQUEST_PROGRESS', {
57
+ requestProgress: {
58
+ requestId: this.requestId,
59
+ data: JSON.stringify(data)
60
+ }
61
+ });
62
+ });
63
+
64
+ return this.requestId;
65
+ }
66
+ else {
67
+ // Syncronously process the request
68
+ return await this.promptAndParse(args);
69
+ }
70
+ }
71
+
72
+ async promptAndParse(args) {
73
+
74
+ // Get saved context from contextId or change contextId if needed
75
+ const { contextId } = args;
76
+ this.savedContextId = contextId ? contextId : null;
77
+ this.savedContext = contextId ? (getv && await getv(contextId) || {}) : {};
78
+
79
+ // Save the context before processing the request
80
+ const savedContextStr = JSON.stringify(this.savedContext);
81
+
82
+ // Process the request
83
+ const data = await this.processRequest(args);
84
+
85
+ // Update saved context if it has changed, generating a new contextId if necessary
86
+ if (savedContextStr !== JSON.stringify(this.savedContext)) {
87
+ this.savedContextId = this.savedContextId || uuidv4();
88
+ setv && setv(this.savedContextId, this.savedContext);
89
+ }
90
+
91
+ // Return the result
92
+ return this.responseParser.parse(data);
93
+ }
94
+
95
+ // Here we choose how to handle long input - either summarize or chunk
96
+ processInputText(text) {
97
+ let chunkMaxChunkTokenLength = 0;
98
+ if (this.pathway.inputChunkSize) {
99
+ chunkMaxChunkTokenLength = Math.min(this.pathway.inputChunkSize, this.chunkMaxTokenLength);
100
+ } else {
101
+ chunkMaxChunkTokenLength = this.chunkMaxTokenLength;
102
+ }
103
+ const encoded = encode(text);
104
+ if (!this.useInputChunking || encoded.length <= chunkMaxChunkTokenLength) { // no chunking, return as is
105
+ if (encoded.length >= chunkMaxChunkTokenLength) {
106
+ const warnText = `Your input is possibly too long, truncating! Text length: ${text.length}`;
107
+ this.warnings.push(warnText);
108
+ console.warn(warnText);
109
+ text = truncate(text, chunkMaxChunkTokenLength);
110
+ }
111
+ return [text];
112
+ }
113
+
114
+ // chunk the text and return the chunks with newline separators
115
+ return getSemanticChunks({ text, maxChunkToken: chunkMaxChunkTokenLength });
116
+ }
117
+
118
+ truncate(str, n) {
119
+ if (this.pathwayPrompter.promptParameters.truncateFromFront) {
120
+ return getFirstNToken(str, n);
121
+ }
122
+ return getLastNToken(str, n);
123
+ }
124
+
125
+ async summarizeIfEnabled({ text, ...parameters }) {
126
+ if (this.pathway.useInputSummarization) {
127
+ return await callPathway(this.config, 'summary', this.requestState, { text, targetLength: 1000, ...parameters });
128
+ }
129
+ return text;
130
+ }
131
+
132
+ // Calculate the maximum token length for a chunk
133
+ getChunkMaxTokenLength() {
134
+ // find the longest prompt
135
+ const maxPromptTokenLength = Math.max(...this.prompts.map(({ prompt }) => prompt ? encode(String(prompt)).length : 0));
136
+ const maxMessagesTokenLength = Math.max(...this.prompts.map(({ messages }) => messages ? messages.reduce((acc, {role, content}) => {
137
+ return acc + encode(role).length + encode(content).length;
138
+ }, 0) : 0));
139
+
140
+ const maxTokenLength = Math.max(maxPromptTokenLength, maxMessagesTokenLength);
141
+
142
+ // find out if any prompts use both text input and previous result
143
+ const hasBothProperties = this.prompts.some(prompt => prompt.usesInputText && prompt.usesPreviousResult);
144
+
145
+ // the token ratio is the ratio of the total prompt to the result text - both have to be included
146
+ // in computing the max token length
147
+ const promptRatio = this.pathwayPrompter.getPromptTokenRatio();
148
+ let maxChunkToken = promptRatio * this.pathwayPrompter.getModelMaxTokenLength() - maxTokenLength;
149
+
150
+ // if we have to deal with prompts that have both text input
151
+ // and previous result, we need to split the maxChunkToken in half
152
+ maxChunkToken = hasBothProperties ? maxChunkToken / 2 : maxChunkToken;
153
+
154
+ // detect if the longest prompt might be too long to allow any chunk size
155
+ if (maxChunkToken && maxChunkToken <= 0) {
156
+ throw new Error(`Your prompt is too long! Split to multiple prompts or reduce length of your prompt, prompt length: ${maxPromptLength}`);
157
+ }
158
+ return maxChunkToken;
159
+ }
160
+
161
+ // Process the request and return the result
162
+ async processRequest({ text, ...parameters }) {
163
+
164
+ text = await this.summarizeIfEnabled({ text, ...parameters }); // summarize if flag enabled
165
+ const chunks = this.processInputText(text);
166
+
167
+ const anticipatedRequestCount = chunks.length * this.prompts.length;
168
+
169
+ if ((this.requestState[this.requestId] || {}).canceled) {
170
+ throw new Error('Request canceled');
171
+ }
172
+
173
+ // Store the request state
174
+ this.requestState[this.requestId] = { totalCount: anticipatedRequestCount, completedCount: 0 };
175
+
176
+ // If pre information is needed, apply current prompt with previous prompt info, only parallelize current call
177
+ if (this.pathway.useParallelChunkProcessing) {
178
+ // Apply each prompt across all chunks in parallel
179
+ // this.previousResult is not available at the object level as it is different for each chunk
180
+ this.previousResult = '';
181
+ const data = await Promise.all(chunks.map(chunk =>
182
+ this.applyPromptsSerially(chunk, parameters)));
183
+ // Join the chunks with newlines
184
+ return data.join("\n\n");
185
+ } else {
186
+ // Apply prompts one by one, serially, across all chunks
187
+ // This is the default processing mode and will make previousResult available at the object level
188
+ let previousResult = '';
189
+ let result = '';
190
+
191
+ for (let i = 0; i < this.prompts.length; i++) {
192
+ // If the prompt doesn't contain {{text}} then we can skip the chunking, and also give that token space to the previous result
193
+ if (!this.prompts[i].usesTextInput) {
194
+ // Limit context to it's N + text's characters
195
+ previousResult = this.truncate(previousResult, 2 * this.chunkMaxTokenLength);
196
+ result = await this.applyPrompt(this.prompts[i], null, { ...parameters, previousResult });
197
+ } else {
198
+ // Limit context to N characters
199
+ previousResult = this.truncate(previousResult, this.chunkMaxTokenLength);
200
+ result = await Promise.all(chunks.map(chunk =>
201
+ this.applyPrompt(this.prompts[i], chunk, { ...parameters, previousResult })));
202
+ result = result.join("\n\n")
203
+ }
204
+
205
+ // If this is any prompt other than the last, use the result as the previous context
206
+ if (i < this.prompts.length - 1) {
207
+ previousResult = result;
208
+ }
209
+ }
210
+ // store the previous result in the PathwayResolver
211
+ this.previousResult = previousResult;
212
+ return result;
213
+ }
214
+
215
+ }
216
+
217
+ async applyPromptsSerially(text, parameters) {
218
+ let previousResult = '';
219
+ let result = '';
220
+ for (const prompt of this.prompts) {
221
+ previousResult = result;
222
+ result = await this.applyPrompt(prompt, text, { ...parameters, previousResult });
223
+ }
224
+ return result;
225
+ }
226
+
227
+ async applyPrompt(prompt, text, parameters) {
228
+ if (this.requestState[this.requestId].canceled) {
229
+ return;
230
+ }
231
+ const result = await this.pathwayPrompter.execute(text, { ...parameters, ...this.savedContext }, prompt);
232
+ this.requestState[this.requestId].completedCount++;
233
+
234
+ const { completedCount, totalCount } = this.requestState[this.requestId];
235
+
236
+ pubsub.publish('REQUEST_PROGRESS', {
237
+ requestProgress: {
238
+ requestId: this.requestId,
239
+ progress: completedCount / totalCount,
240
+ }
241
+ });
242
+
243
+ if (prompt.saveResultTo) {
244
+ this.savedContext[prompt.saveResultTo] = result;
245
+ }
246
+ return result;
247
+ }
248
+ }
249
+
250
+ module.exports = { PathwayResolver };
@@ -0,0 +1,24 @@
1
+ const { parseNumberedList, parseNumberedObjectList } = require('./parser')
2
+
3
+ class PathwayResponseParser {
4
+ constructor(pathway) {
5
+ this.pathway = pathway;
6
+ }
7
+
8
+ parse(data) {
9
+ if (this.pathway.parser) {
10
+ return this.pathway.parser(data);
11
+ }
12
+
13
+ if (this.pathway.list) {
14
+ if (this.pathway.format) {
15
+ return parseNumberedObjectList(data, this.pathway.format);
16
+ }
17
+ return parseNumberedList(data)
18
+ }
19
+
20
+ return data;
21
+ }
22
+ }
23
+
24
+ module.exports = { PathwayResponseParser };