@aj-archipelago/cortex 1.0.8 → 1.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.js CHANGED
@@ -9,42 +9,42 @@ const __dirname = path.dirname(fileURLToPath(import.meta.url));
9
9
 
10
10
  // Schema for config
11
11
  var config = convict({
12
- pathwaysPath: {
12
+ basePathwayPath: {
13
13
  format: String,
14
- default: path.join(process.cwd(), '/pathways'),
15
- env: 'CORTEX_PATHWAYS_PATH'
14
+ default: path.join(__dirname, 'pathways', 'basePathway.js'),
15
+ env: 'CORTEX_BASE_PATHWAY_PATH'
16
16
  },
17
17
  corePathwaysPath: {
18
18
  format: String,
19
19
  default: path.join(__dirname, 'pathways'),
20
20
  env: 'CORTEX_CORE_PATHWAYS_PATH'
21
21
  },
22
- basePathwayPath: {
22
+ cortexApiKey: {
23
23
  format: String,
24
- default: path.join(__dirname, 'pathways', 'basePathway.js'),
25
- env: 'CORTEX_BASE_PATHWAY_PATH'
26
- },
27
- storageConnectionString: {
28
- doc: 'Connection string used for access to Storage',
29
- format: '*',
30
- default: '',
31
- sensitive: true,
32
- env: 'STORAGE_CONNECTION_STRING'
24
+ default: null,
25
+ env: 'CORTEX_API_KEY',
26
+ sensitive: true
33
27
  },
34
- PORT: {
35
- format: 'port',
36
- default: 4000,
37
- env: 'CORTEX_PORT'
28
+ cortexConfigFile: {
29
+ format: String,
30
+ default: null,
31
+ env: 'CORTEX_CONFIG_FILE'
38
32
  },
39
- pathways: {
40
- format: Object,
41
- default: {}
33
+ defaultModelName: {
34
+ format: String,
35
+ default: null,
36
+ env: 'DEFAULT_MODEL_NAME'
42
37
  },
43
38
  enableCache: {
44
39
  format: Boolean,
45
40
  default: true,
46
41
  env: 'CORTEX_ENABLE_CACHE'
47
42
  },
43
+ enableDuplicateRequests: {
44
+ format: Boolean,
45
+ default: true,
46
+ env: 'CORTEX_ENABLE_DUPLICATE_REQUESTS'
47
+ },
48
48
  enableGraphqlCache: {
49
49
  format: Boolean,
50
50
  default: false,
@@ -55,17 +55,12 @@ var config = convict({
55
55
  default: false,
56
56
  env: 'CORTEX_ENABLE_REST'
57
57
  },
58
- cortexApiKey: {
58
+ gcpServiceAccountKey: {
59
59
  format: String,
60
60
  default: null,
61
- env: 'CORTEX_API_KEY',
61
+ env: 'GCP_SERVICE_ACCOUNT_KEY',
62
62
  sensitive: true
63
63
  },
64
- defaultModelName: {
65
- format: String,
66
- default: null,
67
- env: 'DEFAULT_MODEL_NAME'
68
- },
69
64
  models: {
70
65
  format: Object,
71
66
  default: {
@@ -80,7 +75,8 @@ var config = convict({
80
75
  "model": "gpt-3.5-turbo"
81
76
  },
82
77
  "requestsPerSecond": 10,
83
- "maxTokenLength": 8192
78
+ "maxTokenLength": 8192,
79
+ "supportsStreaming": true,
84
80
  },
85
81
  "oai-whisper": {
86
82
  "type": "OPENAI-WHISPER",
@@ -95,11 +91,6 @@ var config = convict({
95
91
  },
96
92
  env: 'CORTEX_MODELS'
97
93
  },
98
- openaiDefaultModel: {
99
- format: String,
100
- default: 'gpt-3.5-turbo',
101
- env: 'OPENAI_DEFAULT_MODEL'
102
- },
103
94
  openaiApiKey: {
104
95
  format: String,
105
96
  default: null,
@@ -111,10 +102,31 @@ var config = convict({
111
102
  default: 'https://api.openai.com/v1/completions',
112
103
  env: 'OPENAI_API_URL'
113
104
  },
114
- cortexConfigFile: {
105
+ openaiDefaultModel: {
115
106
  format: String,
116
- default: null,
117
- env: 'CORTEX_CONFIG_FILE'
107
+ default: 'gpt-3.5-turbo',
108
+ env: 'OPENAI_DEFAULT_MODEL'
109
+ },
110
+ pathways: {
111
+ format: Object,
112
+ default: {}
113
+ },
114
+ pathwaysPath: {
115
+ format: String,
116
+ default: path.join(process.cwd(), '/pathways'),
117
+ env: 'CORTEX_PATHWAYS_PATH'
118
+ },
119
+ PORT: {
120
+ format: 'port',
121
+ default: 4000,
122
+ env: 'CORTEX_PORT'
123
+ },
124
+ storageConnectionString: {
125
+ doc: 'Connection string used for access to Storage',
126
+ format: '*',
127
+ default: '',
128
+ sensitive: true,
129
+ env: 'STORAGE_CONNECTION_STRING'
118
130
  },
119
131
  whisperMediaApiUrl: {
120
132
  format: String,
@@ -126,12 +138,6 @@ var config = convict({
126
138
  default: 'null',
127
139
  env: 'WHISPER_TS_API_URL'
128
140
  },
129
- gcpServiceAccountKey: {
130
- format: String,
131
- default: null,
132
- env: 'GCP_SERVICE_ACCOUNT_KEY',
133
- sensitive: true
134
- },
135
141
  });
136
142
 
137
143
  // Read in environment variables and set up service configuration
@@ -18,7 +18,7 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
21
+ "ytdl-core": "^4.11.5"
22
22
  }
23
23
  },
24
24
  "node_modules/@azure/abort-controller": {
@@ -1740,9 +1740,9 @@
1740
1740
  }
1741
1741
  },
1742
1742
  "node_modules/ytdl-core": {
1743
- "version": "0.0.0-development",
1744
- "resolved": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
1745
- "license": "MIT",
1743
+ "version": "4.11.5",
1744
+ "resolved": "https://registry.npmjs.org/ytdl-core/-/ytdl-core-4.11.5.tgz",
1745
+ "integrity": "sha512-27LwsW4n4nyNviRCO1hmr8Wr5J1wLLMawHCQvH8Fk0hiRqrxuIu028WzbJetiYH28K8XDbeinYW4/wcHQD1EXA==",
1746
1746
  "dependencies": {
1747
1747
  "m3u8stream": "^0.8.6",
1748
1748
  "miniget": "^4.2.2",
@@ -2962,8 +2962,9 @@
2962
2962
  "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="
2963
2963
  },
2964
2964
  "ytdl-core": {
2965
- "version": "git+ssh://git@github.com/khlevon/node-ytdl-core.git#87450450caabb91f81afa6e66758bf2f629664a1",
2966
- "from": "ytdl-core@github:khlevon/node-ytdl-core#v4.11.4-patch.2",
2965
+ "version": "4.11.5",
2966
+ "resolved": "https://registry.npmjs.org/ytdl-core/-/ytdl-core-4.11.5.tgz",
2967
+ "integrity": "sha512-27LwsW4n4nyNviRCO1hmr8Wr5J1wLLMawHCQvH8Fk0hiRqrxuIu028WzbJetiYH28K8XDbeinYW4/wcHQD1EXA==",
2967
2968
  "requires": {
2968
2969
  "m3u8stream": "^0.8.6",
2969
2970
  "miniget": "^4.2.2",
@@ -18,6 +18,6 @@
18
18
  "ioredis": "^5.3.1",
19
19
  "public-ip": "^6.0.1",
20
20
  "uuid": "^9.0.0",
21
- "ytdl-core": "github:khlevon/node-ytdl-core#v4.11.4-patch.2"
21
+ "ytdl-core": "^4.11.5"
22
22
  }
23
23
  }
package/lib/request.js CHANGED
@@ -62,48 +62,149 @@ const postWithMonitor = async (model, url, data, axiosConfigObj) => {
62
62
  return cortexAxios.post(url, data, axiosConfigObj);
63
63
  }
64
64
 
65
- const MAX_RETRY = 10;
66
- const postRequest = async ({ url, data, params, headers, cache }, model) => {
67
- const errors = []
65
+ const MAX_RETRY = 10; // retries for error handling
66
+ const MAX_DUPLICATE_REQUESTS = 3; // duplicate requests to manage latency spikes
67
+ const DUPLICATE_REQUEST_AFTER = 10; // 10 seconds
68
+
69
+ const postRequest = async ({ url, data, params, headers, cache }, model, requestId, pathway) => {
70
+ let promises = [];
68
71
  for (let i = 0; i < MAX_RETRY; i++) {
72
+ const modelProperties = config.get('models')[model];
73
+ const enableDuplicateRequests = pathway.enableDuplicateRequests !== undefined ? pathway.enableDuplicateRequests : config.get('enableDuplicateRequests');
74
+ let maxDuplicateRequests = enableDuplicateRequests ? MAX_DUPLICATE_REQUESTS : 1;
75
+ let duplicateRequestAfter = (pathway.duplicateRequestAfter || DUPLICATE_REQUEST_AFTER) * 1000;
76
+
77
+ if (enableDuplicateRequests) {
78
+ //console.log(`>>> [${requestId}] Duplicate requests enabled after ${duplicateRequestAfter / 1000} seconds`);
79
+ }
80
+
81
+ const axiosConfigObj = { params, headers, cache };
82
+ const streamRequested = (params.stream || data.stream);
83
+ if (streamRequested && modelProperties.supportsStreaming) {
84
+ axiosConfigObj.responseType = 'stream';
85
+ promises.push(limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj)));
86
+ } else {
87
+ if (streamRequested) {
88
+ console.log(`>>> [${requestId}] ${model} does not support streaming - sending non-streaming request`);
89
+ axiosConfigObj.params.stream = false;
90
+ data.stream = false;
91
+ }
92
+ const controllers = Array.from({ length: maxDuplicateRequests }, () => new AbortController());
93
+ promises = controllers.map((controller, index) =>
94
+ new Promise((resolve, reject) => {
95
+ const duplicateRequestTime = duplicateRequestAfter * Math.pow(2, index) - duplicateRequestAfter;
96
+ const jitter = duplicateRequestTime * 0.2 * Math.random();
97
+ const duplicateRequestTimeout = Math.max(0, duplicateRequestTime + jitter);
98
+ setTimeout(async () => {
99
+ try {
100
+ if (!limiters[model]) {
101
+ throw new Error(`No limiter for model ${model}!`);
102
+ }
103
+ const axiosConfigObj = { params, headers, cache };
104
+
105
+ let response = null;
106
+
107
+ if (!controller.signal?.aborted) {
108
+
109
+ axiosConfigObj.signal = controller.signal;
110
+ axiosConfigObj.headers['X-Cortex-Request-Index'] = index;
111
+
112
+ if (index === 0) {
113
+ //console.log(`>>> [${requestId}] sending request to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`);
114
+ } else {
115
+ if (modelProperties.supportsStreaming) {
116
+ axiosConfigObj.responseType = 'stream';
117
+ axiosConfigObj.cache = false;
118
+ }
119
+ const logMessage = `>>> [${requestId}] taking too long - sending duplicate request ${index} to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`;
120
+ const header = '>'.repeat(logMessage.length);
121
+ console.log(`\n${header}\n${logMessage}`);
122
+ }
123
+
124
+ response = await limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj));
125
+
126
+ if (!controller.signal?.aborted) {
127
+
128
+ //console.log(`<<< [${requestId}] received response for request ${index}`);
129
+
130
+ if (axiosConfigObj.responseType === 'stream') {
131
+ // Buffering and collecting the stream data
132
+ console.log(`<<< [${requestId}] buffering streaming response for request ${index}`);
133
+ response = await new Promise((resolve, reject) => {
134
+ let responseData = '';
135
+ response.data.on('data', (chunk) => {
136
+ responseData += chunk;
137
+ //console.log(`<<< [${requestId}] received chunk for request ${index}`);
138
+ });
139
+ response.data.on('end', () => {
140
+ response.data = JSON.parse(responseData);
141
+ resolve(response);
142
+ });
143
+ response.data.on('error', (error) => {
144
+ reject(error);
145
+ });
146
+ });
147
+ }
148
+ }
149
+ }
150
+
151
+ resolve(response);
152
+
153
+ } catch (error) {
154
+ if (error.name === 'AbortError' || error.name === 'CanceledError') {
155
+ //console.log(`XXX [${requestId}] request ${index} was cancelled`);
156
+ reject(error);
157
+ } else {
158
+ console.log(`!!! [${requestId}] request ${index} failed with error: ${error?.response?.data?.error?.message || error}`);
159
+ reject(error);
160
+ }
161
+ } finally {
162
+ controllers.forEach(controller => controller.abort());
163
+ }
164
+ }, duplicateRequestTimeout);
165
+ })
166
+ );
167
+ }
168
+
69
169
  try {
70
- if (i > 0) {
71
- console.log(`Retrying request #retry ${i}: ${JSON.stringify(data)}...`);
72
- await new Promise(r => setTimeout(r, 200 * Math.pow(2, i))); // exponential backoff
73
- }
74
- if (!limiters[model]) {
75
- throw new Error(`No limiter for model ${model}!`);
170
+ const response = await Promise.race(promises);
171
+
172
+ if (response.status === 200) {
173
+ return response;
174
+ } else {
175
+ throw new Error(`Received error response: ${response.status}`);
76
176
  }
77
- const axiosConfigObj = { params, headers, cache };
78
- if (params.stream || data.stream) {
79
- axiosConfigObj.responseType = 'stream';
80
- }
81
- return await limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj));
82
- } catch (e) {
83
- console.error(`Failed request with data ${JSON.stringify(data)}: ${e} - ${e.response?.data?.error?.type || 'error'}: ${e.response?.data?.error?.message}`);
84
- if (e.response?.status && e.response?.status === 429) {
177
+ } catch (error) {
178
+ //console.error(`!!! [${requestId}] failed request with data ${JSON.stringify(data)}: ${error}`);
179
+ if (error.response?.status === 429) {
85
180
  monitors[model].incrementError429Count();
86
181
  }
87
- errors.push(e);
182
+ console.log(`>>> [${requestId}] retrying request due to ${error.response?.status} response. Retry count: ${i + 1}`);
183
+ if (i < MAX_RETRY - 1) {
184
+ const backoffTime = 200 * Math.pow(2, i);
185
+ const jitter = backoffTime * 0.2 * Math.random();
186
+ await new Promise(r => setTimeout(r, backoffTime + jitter));
187
+ } else {
188
+ throw error;
189
+ }
88
190
  }
89
191
  }
90
- return { error: errors };
91
- }
192
+ };
92
193
 
93
- const request = async (params, model) => {
94
- const response = await postRequest(params, model);
194
+ const request = async (params, model, requestId, pathway) => {
195
+ const response = await postRequest(params, model, requestId, pathway);
95
196
  const { error, data, cached } = response;
96
197
  if (cached) {
97
- console.info('=== Request served with cached response. ===');
198
+ console.info(`<<< [${requestId}] served with cached response.`);
98
199
  }
99
200
  if (error && error.length > 0) {
100
201
  const lastError = error[error.length - 1];
101
202
  return { error: lastError.toJSON() ?? lastError ?? error };
102
203
  }
103
-
204
+ //console.log("<<< [${requestId}] response: ", data.choices[0].delta || data.choices[0])
104
205
  return data;
105
206
  }
106
207
 
107
208
  export {
108
- axios,request, postRequest, buildLimiters
209
+ axios, request, postRequest, buildLimiters
109
210
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.0.8",
3
+ "version": "1.0.10",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "repository": {
6
6
  "type": "git",
@@ -14,11 +14,12 @@ export default {
14
14
  typeDef,
15
15
  rootResolver,
16
16
  resolver,
17
- inputFormat: 'text',
18
- useInputChunking: true,
19
- useParallelChunkProcessing: false,
20
- useInputSummarization: false,
21
- truncateFromFront: false,
22
- timeout: 120, // in seconds
17
+ inputFormat: 'text', // text or html - changes the behavior of the input chunking
18
+ useInputChunking: true, // true or false - enables input to be split into multiple chunks to meet context window size
19
+ useParallelChunkProcessing: false, // true or false - enables parallel processing of chunks
20
+ useInputSummarization: false, // true or false - instead of chunking, summarize the input and act on the summary
21
+ truncateFromFront: false, // true or false - if true, truncate from the front of the input instead of the back
22
+ timeout: 120, // seconds, cancels the pathway after this many seconds
23
+ duplicateRequestAfter: 10, // seconds, if the request is not completed after this many seconds, a backup request is sent
23
24
  };
24
25
 
package/pathways/index.js CHANGED
@@ -8,6 +8,7 @@ import sentiment from './sentiment.js';
8
8
  import summary from './summary.js';
9
9
  import sys_openai_chat from './sys_openai_chat.js';
10
10
  import sys_openai_completion from './sys_openai_completion.js';
11
+ import test_cohere_summarize from './test_cohere_summarize.js';
11
12
  import test_langchain from './test_langchain.mjs';
12
13
  import test_palm_chat from './test_palm_chat.js';
13
14
  import transcribe from './transcribe.js';
@@ -24,6 +25,7 @@ export {
24
25
  summary,
25
26
  sys_openai_chat,
26
27
  sys_openai_completion,
28
+ test_cohere_summarize,
27
29
  test_langchain,
28
30
  test_palm_chat,
29
31
  transcribe,
@@ -0,0 +1,10 @@
1
+ // test_cohere_summarize.js
2
+ // Summarize text with the Cohere model
3
+
4
+ export default {
5
+ // Uncomment the following line to enable caching for this prompt, if desired.
6
+ // enableCache: true,
7
+
8
+ prompt: `{{text}}`,
9
+ model: 'cohere-summarize'
10
+ };
@@ -8,6 +8,7 @@ export default {
8
8
  wordTimestamped: false,
9
9
  },
10
10
  timeout: 3600, // in seconds
11
+ enableDuplicateRequests: false,
11
12
  };
12
13
 
13
14
 
@@ -16,5 +16,6 @@ export default {
16
16
  // Set the timeout for the translation process, in seconds.
17
17
  timeout: 400,
18
18
  inputChunkSize: 500,
19
+ enableDuplicateRequests: false,
19
20
  };
20
21
 
package/server/parser.js CHANGED
@@ -37,9 +37,21 @@ const parseCommaSeparatedList = (str) => {
37
37
  return str.split(',').map(s => s.trim()).filter(s => s.length);
38
38
  }
39
39
 
40
+ const isCommaSeparatedList = (data) => {
41
+ const commaSeparatedPattern = /^([^,\n]+,)+[^,\n]+$/;
42
+ return commaSeparatedPattern.test(data.trim());
43
+ }
44
+
45
+ const isNumberedList = (data) => {
46
+ const numberedListPattern = /^\s*[\[\{\(]*\d+[\s.=\-:,;\]\)\}]/gm;
47
+ return numberedListPattern.test(data.trim());
48
+ }
49
+
40
50
  export {
41
51
  regexParser,
42
52
  parseNumberedList,
43
53
  parseNumberedObjectList,
44
54
  parseCommaSeparatedList,
55
+ isCommaSeparatedList,
56
+ isNumberedList,
45
57
  };
@@ -7,6 +7,8 @@ import LocalModelPlugin from './plugins/localModelPlugin.js';
7
7
  import PalmChatPlugin from './plugins/palmChatPlugin.js';
8
8
  import PalmCompletionPlugin from './plugins/palmCompletionPlugin.js';
9
9
  import PalmCodeCompletionPlugin from './plugins/palmCodeCompletionPlugin.js';
10
+ import CohereGeneratePlugin from './plugins/cohereGeneratePlugin.js';
11
+ import CohereSummarizePlugin from './plugins/cohereSummarizePlugin.js';
10
12
 
11
13
  class PathwayPrompter {
12
14
  constructor(config, pathway, modelName, model) {
@@ -38,6 +40,12 @@ class PathwayPrompter {
38
40
  case 'PALM-CODE-COMPLETION':
39
41
  plugin = new PalmCodeCompletionPlugin(config, pathway, modelName, model);
40
42
  break;
43
+ case 'COHERE-GENERATE':
44
+ plugin = new CohereGeneratePlugin(config, pathway, modelName, model);
45
+ break;
46
+ case 'COHERE-SUMMARIZE':
47
+ plugin = new CohereSummarizePlugin(config, pathway, modelName, model);
48
+ break;
41
49
  default:
42
50
  throw new Error(`Unsupported model type: ${model.type}`);
43
51
  }
@@ -9,6 +9,7 @@ import { Prompt } from './prompt.js';
9
9
  import { getv, setv } from '../lib/keyValueStorageClient.js';
10
10
  import { requestState } from './requestState.js';
11
11
  import { callPathway } from '../lib/pathwayTools.js';
12
+ import { response } from 'express';
12
13
 
13
14
  class PathwayResolver {
14
15
  constructor({ config, pathway, args }) {
@@ -63,57 +64,116 @@ class PathwayResolver {
63
64
  this.pathwayPrompt = pathway.prompt;
64
65
  }
65
66
 
67
+ // This code handles async and streaming responses. In either case, we use
68
+ // the graphql subscription to send progress updates to the client. Most of
69
+ // the time the client will be an external client, but it could also be the
70
+ // Cortex REST api code.
66
71
  async asyncResolve(args) {
67
- const responseData = await this.promptAndParse(args);
72
+ const MAX_RETRY_COUNT = 3;
73
+ let attempt = 0;
74
+ let streamErrorOccurred = false;
75
+
76
+ while (attempt < MAX_RETRY_COUNT) {
77
+ const responseData = await this.promptAndParse(args);
78
+
79
+ if (args.async || typeof responseData === 'string') {
80
+ const { completedCount, totalCount } = requestState[this.requestId];
81
+ requestState[this.requestId].data = responseData;
82
+ pubsub.publish('REQUEST_PROGRESS', {
83
+ requestProgress: {
84
+ requestId: this.requestId,
85
+ progress: completedCount / totalCount,
86
+ data: JSON.stringify(responseData),
87
+ }
88
+ });
89
+ } else {
90
+ try {
91
+ const incomingMessage = responseData;
68
92
 
69
- // Either we're dealing with an async request or a stream
70
- if(args.async || typeof responseData === 'string') {
71
- const { completedCount, totalCount } = requestState[this.requestId];
72
- requestState[this.requestId].data = responseData;
73
- pubsub.publish('REQUEST_PROGRESS', {
74
- requestProgress: {
75
- requestId: this.requestId,
76
- progress: completedCount / totalCount,
77
- data: JSON.stringify(responseData),
78
- }
79
- });
80
- } else { // stream
81
- try {
82
- const incomingMessage = Array.isArray(responseData) && responseData.length > 0 ? responseData[0] : responseData;
83
- incomingMessage.on('data', data => {
84
- const events = data.toString().split('\n');
85
-
86
- events.forEach(event => {
87
- if (event.trim() === '') return; // Skip empty lines
88
-
89
- const message = event.replace(/^data: /, '');
90
-
91
- //console.log(`====================================`);
92
- //console.log(`STREAM EVENT: ${event}`);
93
- //console.log(`MESSAGE: ${message}`);
94
-
95
- const requestProgress = {
96
- requestId: this.requestId,
97
- data: message,
98
- }
99
-
100
- if (message.trim() === '[DONE]') {
101
- requestProgress.progress = 1;
102
- }
103
-
93
+ const processData = (data) => {
104
94
  try {
105
- pubsub.publish('REQUEST_PROGRESS', {
106
- requestProgress: requestProgress
107
- });
95
+ //console.log(`\n\nReceived stream data for requestId ${this.requestId}`, data.toString());
96
+ let events = data.toString().split('\n');
97
+
98
+ //events = "data: {\"id\":\"chatcmpl-20bf1895-2fa7-4ef9-abfe-4d142aba5817\",\"object\":\"chat.completion.chunk\",\"created\":1689303423723,\"model\":\"gpt-4\",\"choices\":[{\"delta\":{\"role\":\"assistant\",\"content\":{\"error\":{\"message\":\"The server had an error while processing your request. Sorry about that!\",\"type\":\"server_error\",\"param\":null,\"code\":null}}},\"finish_reason\":null}]}\n\n".split("\n");
99
+
100
+ for (let event of events) {
101
+ if (streamErrorOccurred) break;
102
+
103
+ // skip empty events
104
+ if (!(event.trim() === '')) {
105
+ //console.log(`Processing stream event for requestId ${this.requestId}`, event);
106
+
107
+ let message = event.replace(/^data: /, '');
108
+
109
+ const requestProgress = {
110
+ requestId: this.requestId,
111
+ data: message,
112
+ }
113
+
114
+ // check for end of stream or in-stream errors
115
+ if (message.trim() === '[DONE]') {
116
+ requestProgress.progress = 1;
117
+ } else {
118
+ let parsedMessage;
119
+ try {
120
+ parsedMessage = JSON.parse(message);
121
+ } catch (error) {
122
+ console.error('Could not JSON parse stream message', message, error);
123
+ return;
124
+ }
125
+
126
+ const streamError = parsedMessage.error || parsedMessage?.choices?.[0]?.delta?.content?.error || parsedMessage?.choices?.[0]?.text?.error;
127
+ if (streamError) {
128
+ streamErrorOccurred = true;
129
+ console.error(`Stream error: ${streamError.message}`);
130
+ incomingMessage.off('data', processData); // Stop listening to 'data'
131
+ return;
132
+ }
133
+ }
134
+
135
+ try {
136
+ //console.log(`Publishing stream message to requestId ${this.requestId}`, message);
137
+ pubsub.publish('REQUEST_PROGRESS', {
138
+ requestProgress: requestProgress
139
+ });
140
+ } catch (error) {
141
+ console.error('Could not publish the stream message', message, error);
142
+ }
143
+ };
144
+ };
108
145
  } catch (error) {
109
- console.error('Could not JSON parse stream message', message, error);
146
+ console.error('Could not process stream data', error);
110
147
  }
148
+ };
149
+
150
+ await new Promise((resolve, reject) => {
151
+ incomingMessage.on('data', processData);
152
+ incomingMessage.on('end', resolve);
153
+ incomingMessage.on('error', reject);
111
154
  });
112
- });
113
- } catch (error) {
114
- console.error('Could not subscribe to stream', error);
155
+
156
+ } catch (error) {
157
+ console.error('Could not subscribe to stream', error);
158
+ }
159
+ }
160
+
161
+ if (streamErrorOccurred) {
162
+ attempt++;
163
+ console.error(`Stream attempt ${attempt} failed. Retrying...`);
164
+ streamErrorOccurred = false; // Reset the flag for the next attempt
165
+ } else {
166
+ return;
115
167
  }
116
168
  }
169
+ // if all retries failed, publish the stream end message
170
+ pubsub.publish('REQUEST_PROGRESS', {
171
+ requestProgress: {
172
+ requestId: this.requestId,
173
+ progress: 1,
174
+ data: '[DONE]',
175
+ }
176
+ });
117
177
  }
118
178
 
119
179
  async resolve(args) {
@@ -167,7 +227,7 @@ class PathwayResolver {
167
227
  } else {
168
228
  chunkTokenLength = this.chunkMaxTokenLength;
169
229
  }
170
- const encoded = encode(text);
230
+ const encoded = text ? encode(text) : [];
171
231
  if (!this.useInputChunking || encoded.length <= chunkTokenLength) { // no chunking, return as is
172
232
  if (encoded.length > 0 && encoded.length >= chunkTokenLength) {
173
233
  const warnText = `Truncating long input text. Text length: ${text.length}`;
@@ -275,8 +335,11 @@ class PathwayResolver {
275
335
  previousResult = this.truncate(previousResult, this.chunkMaxTokenLength);
276
336
  result = await Promise.all(chunks.map(chunk =>
277
337
  this.applyPrompt(this.prompts[i], chunk, currentParameters)));
278
- if (!currentParameters.stream) {
279
- result = result.join("\n\n")
338
+
339
+ if (result.length === 1) {
340
+ result = result[0];
341
+ } else if (!currentParameters.stream) {
342
+ result = result.join("\n\n");
280
343
  }
281
344
  }
282
345
 
@@ -1,29 +1,25 @@
1
- import { parseNumberedList, parseNumberedObjectList, parseCommaSeparatedList } from './parser.js';
1
+ import { parseNumberedList, parseNumberedObjectList, parseCommaSeparatedList, isCommaSeparatedList, isNumberedList } from './parser.js';
2
2
 
3
3
  class PathwayResponseParser {
4
4
  constructor(pathway) {
5
5
  this.pathway = pathway;
6
6
  }
7
7
 
8
- isCommaSeparatedList(data) {
9
- const commaSeparatedPattern = /^([^,\n]+,)+[^,\n]+$/;
10
- return commaSeparatedPattern.test(data.trim());
11
- }
12
-
13
8
  parse(data) {
14
9
  if (this.pathway.parser) {
15
10
  return this.pathway.parser(data);
16
11
  }
17
12
 
18
13
  if (this.pathway.list) {
19
- if (this.isCommaSeparatedList(data)) {
20
- return parseCommaSeparatedList(data);
21
- } else {
14
+ if (isNumberedList(data)) {
22
15
  if (this.pathway.format) {
23
16
  return parseNumberedObjectList(data, this.pathway.format);
24
17
  }
25
18
  return parseNumberedList(data);
19
+ } else if (isCommaSeparatedList(data)) {
20
+ return parseCommaSeparatedList(data);
26
21
  }
22
+ return [data];
27
23
  }
28
24
 
29
25
  return data;
@@ -26,7 +26,7 @@ class AzureTranslatePlugin extends ModelPlugin {
26
26
  // Execute the request to the Azure Translate API
27
27
  async execute(text, parameters, prompt, pathwayResolver) {
28
28
  const requestParameters = this.getRequestParameters(text, parameters, prompt);
29
- const requestId = pathwayResolver?.requestId;
29
+ const { requestId, pathway} = pathwayResolver;
30
30
 
31
31
  const url = this.requestUrl(text);
32
32
 
@@ -34,7 +34,7 @@ class AzureTranslatePlugin extends ModelPlugin {
34
34
  const params = requestParameters.params;
35
35
  const headers = this.model.headers || {};
36
36
 
37
- return this.executeRequest(url, data, params, headers, prompt, requestId);
37
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
38
38
  }
39
39
 
40
40
  // Parse the response from the Azure Translate API
@@ -0,0 +1,60 @@
1
+ // CohereGeneratePlugin.js
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ class CohereGeneratePlugin extends ModelPlugin {
5
+ constructor(config, pathway, modelName, model) {
6
+ super(config, pathway, modelName, model);
7
+ }
8
+
9
+ // Set up parameters specific to the Cohere API
10
+ getRequestParameters(text, parameters, prompt) {
11
+ const { modelPromptText, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
12
+
13
+ // Define the model's max token length
14
+ const modelTargetTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
15
+
16
+ // Check if the token length exceeds the model's max token length
17
+ if (tokenLength > modelTargetTokenLength) {
18
+ // Truncate the prompt text to fit within the token length
19
+ modelPromptText = modelPromptText.substring(0, modelTargetTokenLength);
20
+ }
21
+
22
+ const requestParameters = {
23
+ model: "command",
24
+ prompt: modelPromptText,
25
+ max_tokens: this.getModelMaxReturnTokens(),
26
+ temperature: this.temperature ?? 0.7,
27
+ k: 0,
28
+ stop_sequences: parameters.stop_sequences || [],
29
+ return_likelihoods: parameters.return_likelihoods || "NONE"
30
+ };
31
+
32
+ return requestParameters;
33
+ }
34
+
35
+ // Execute the request to the Cohere API
36
+ async execute(text, parameters, prompt, pathwayResolver) {
37
+ const url = this.requestUrl();
38
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
39
+ const { requestId, pathway} = pathwayResolver;
40
+
41
+ const data = { ...(this.model.params || {}), ...requestParameters };
42
+ const params = {};
43
+ const headers = {
44
+ ...this.model.headers || {}
45
+ };
46
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
47
+ }
48
+
49
+ // Parse the response from the Cohere API
50
+ parseResponse(data) {
51
+ const { generations } = data;
52
+ if (!generations || !generations.length) {
53
+ return data;
54
+ }
55
+ // Return the text of the first generation
56
+ return generations[0].text || null;
57
+ }
58
+ }
59
+
60
+ export default CohereGeneratePlugin;
@@ -0,0 +1,50 @@
1
+ // CohereSummarizePlugin.js
2
+ import ModelPlugin from './modelPlugin.js';
3
+
4
+ class CohereSummarizePlugin extends ModelPlugin {
5
+ constructor(config, pathway, modelName, model) {
6
+ super(config, pathway, modelName, model);
7
+ }
8
+
9
+ // Set up parameters specific to the Cohere Summarize API
10
+ getRequestParameters(text, parameters, prompt) {
11
+ const { modelPromptText } = this.getCompiledPrompt(text, parameters, prompt);
12
+
13
+ const requestParameters = {
14
+ length: parameters.length || "medium",
15
+ format: parameters.format || "paragraph",
16
+ model: "summarize-xlarge",
17
+ extractiveness: parameters.extractiveness || "low",
18
+ temperature: this.temperature ?? 0.3,
19
+ text: modelPromptText
20
+ };
21
+
22
+ return requestParameters;
23
+ }
24
+
25
+ // Execute the request to the Cohere Summarize API
26
+ async execute(text, parameters, prompt, pathwayResolver) {
27
+ const url = this.requestUrl();
28
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
29
+ const { requestId, pathway} = pathwayResolver;
30
+
31
+ const data = { ...(this.model.params || {}), ...requestParameters };
32
+ const params = {};
33
+ const headers = {
34
+ ...this.model.headers || {}
35
+ };
36
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
37
+ }
38
+
39
+ // Parse the response from the Cohere Summarize API
40
+ parseResponse(data) {
41
+ const { summary } = data;
42
+ if (!summary) {
43
+ return data;
44
+ }
45
+ // Return the summary
46
+ return summary;
47
+ }
48
+ }
49
+
50
+ export default CohereSummarizePlugin;
@@ -121,7 +121,16 @@ class ModelPlugin {
121
121
 
122
122
  // compile the Prompt
123
123
  getCompiledPrompt(text, parameters, prompt) {
124
- const combinedParameters = { ...this.promptParameters, ...parameters };
124
+
125
+ const mergeParameters = (promptParameters, parameters) => {
126
+ let result = { ...promptParameters };
127
+ for (let key in parameters) {
128
+ if (parameters[key] !== null) result[key] = parameters[key];
129
+ }
130
+ return result;
131
+ }
132
+
133
+ const combinedParameters = mergeParameters(this.promptParameters, parameters);
125
134
  const modelPrompt = this.getModelPrompt(prompt, parameters);
126
135
  const modelPromptText = modelPrompt.prompt ? HandleBars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
127
136
  const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
@@ -202,6 +211,7 @@ class ModelPlugin {
202
211
  // Default simple logging
203
212
  logRequestStart(url, data) {
204
213
  this.requestCount++;
214
+ this.lastRequestStartTime = new Date();
205
215
  const logMessage = `>>> [${this.requestId}: ${this.pathwayName}.${this.requestCount}] request`;
206
216
  const header = '>'.repeat(logMessage.length);
207
217
  console.log(`\n${header}\n${logMessage}`);
@@ -211,7 +221,7 @@ class ModelPlugin {
211
221
  logAIRequestFinished() {
212
222
  const currentTime = new Date();
213
223
  const timeElapsed = (currentTime - this.lastRequestStartTime) / 1000;
214
- const logMessage = `<<< [${this.requestId}: ${this.pathwayName}.${this.requestCount}] response - complete in ${timeElapsed}s - data:`;
224
+ const logMessage = `<<< [${this.requestId}: ${this.pathwayName}] response - complete in ${timeElapsed}s - data:`;
215
225
  const header = '<'.repeat(logMessage.length);
216
226
  console.log(`\n${header}\n${logMessage}\n`);
217
227
  };
@@ -229,11 +239,11 @@ class ModelPlugin {
229
239
  prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
230
240
  }
231
241
 
232
- async executeRequest(url, data, params, headers, prompt, requestId) {
242
+ async executeRequest(url, data, params, headers, prompt, requestId, pathway) {
233
243
  this.aiRequestStartTime = new Date();
234
244
  this.requestId = requestId;
235
245
  this.logRequestStart(url, data);
236
- const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
246
+ const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName, this.requestId, pathway);
237
247
 
238
248
  if (responseData.error) {
239
249
  throw new Error(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
@@ -79,12 +79,12 @@ class OpenAIChatPlugin extends ModelPlugin {
79
79
  async execute(text, parameters, prompt, pathwayResolver) {
80
80
  const url = this.requestUrl(text);
81
81
  const requestParameters = this.getRequestParameters(text, parameters, prompt);
82
- const requestId = pathwayResolver?.requestId;
82
+ const { requestId, pathway} = pathwayResolver;
83
83
 
84
84
  const data = { ...(this.model.params || {}), ...requestParameters };
85
- const params = {};
85
+ const params = {}; // query params
86
86
  const headers = this.model.headers || {};
87
- return this.executeRequest(url, data, params, headers, prompt, requestId);
87
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
88
88
  }
89
89
 
90
90
  // Parse the response from the OpenAI Chat API
@@ -122,7 +122,7 @@ class OpenAIChatPlugin extends ModelPlugin {
122
122
  }
123
123
 
124
124
  if (stream) {
125
- console.log(`\x1b[34m> Response is streaming...\x1b[0m`);
125
+ console.log(`\x1b[34m> [response is an SSE stream]\x1b[0m`);
126
126
  } else {
127
127
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
128
128
  }
@@ -79,13 +79,13 @@ class OpenAICompletionPlugin extends ModelPlugin {
79
79
  async execute(text, parameters, prompt, pathwayResolver) {
80
80
  const url = this.requestUrl(text);
81
81
  const requestParameters = this.getRequestParameters(text, parameters, prompt, pathwayResolver);
82
- const requestId = pathwayResolver?.requestId;
82
+ const { requestId, pathway} = pathwayResolver;
83
83
 
84
84
  const data = { ...(this.model.params || {}), ...requestParameters };
85
85
  const params = {};
86
86
  const headers = this.model.headers || {};
87
87
 
88
- return this.executeRequest(url, data, params, headers, prompt, requestId);
88
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
89
89
  }
90
90
 
91
91
  // Parse the response from the OpenAI Completion API
@@ -115,7 +115,7 @@ class OpenAICompletionPlugin extends ModelPlugin {
115
115
  console.log(`\x1b[36m${modelInput}\x1b[0m`);
116
116
 
117
117
  if (stream) {
118
- console.log(`\x1b[34m> Response is streaming...\x1b[0m`);
118
+ console.log(`\x1b[34m> [response is an SSE stream]\x1b[0m`);
119
119
  } else {
120
120
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
121
121
  }
@@ -129,7 +129,7 @@ class OpenAIWhisperPlugin extends ModelPlugin {
129
129
 
130
130
  try {
131
131
  // const res = await axios.post(WHISPER_TS_API_URL, { params: { fileurl: uri } });
132
- const res = await this.executeRequest(WHISPER_TS_API_URL, {fileurl:uri},{},{});
132
+ const res = await this.executeRequest(WHISPER_TS_API_URL, {fileurl:uri}, {}, {}, {}, requestId, pathway);
133
133
  return res;
134
134
  } catch (err) {
135
135
  console.log(`Error getting word timestamped data from api:`, err);
@@ -150,7 +150,7 @@ class OpenAIWhisperPlugin extends ModelPlugin {
150
150
  language && formData.append('language', language);
151
151
  modelPromptText && formData.append('prompt', modelPromptText);
152
152
 
153
- return this.executeRequest(url, formData, params, { ...this.model.headers, ...formData.getHeaders() });
153
+ return this.executeRequest(url, formData, params, { ...this.model.headers, ...formData.getHeaders() }, {}, requestId, pathway);
154
154
  } catch (err) {
155
155
  console.log(err);
156
156
  throw err;
@@ -161,7 +161,7 @@ class OpenAIWhisperPlugin extends ModelPlugin {
161
161
  let { file } = parameters;
162
162
  let totalCount = 0;
163
163
  let completedCount = 0;
164
- const { requestId } = pathwayResolver;
164
+ const { requestId, pathway } = pathwayResolver;
165
165
 
166
166
  const sendProgress = () => {
167
167
  completedCount++;
@@ -140,7 +140,7 @@ class PalmChatPlugin extends ModelPlugin {
140
140
  async execute(text, parameters, prompt, pathwayResolver) {
141
141
  const url = this.requestUrl(text);
142
142
  const requestParameters = this.getRequestParameters(text, parameters, prompt);
143
- const requestId = pathwayResolver?.requestId;
143
+ const { requestId, pathway} = pathwayResolver;
144
144
 
145
145
  const data = { ...(this.model.params || {}), ...requestParameters };
146
146
  const params = {};
@@ -148,7 +148,7 @@ class PalmChatPlugin extends ModelPlugin {
148
148
  const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
149
149
  const authToken = await gcpAuthTokenHelper.getAccessToken();
150
150
  headers.Authorization = `Bearer ${authToken}`;
151
- return this.executeRequest(url, data, params, headers, prompt, requestId);
151
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
152
152
  }
153
153
 
154
154
  // Parse the response from the PaLM Chat API
@@ -55,7 +55,7 @@ class PalmCompletionPlugin extends ModelPlugin {
55
55
  async execute(text, parameters, prompt, pathwayResolver) {
56
56
  const url = this.requestUrl(text);
57
57
  const requestParameters = this.getRequestParameters(text, parameters, prompt, pathwayResolver);
58
- const requestId = pathwayResolver?.requestId;
58
+ const { requestId, pathway} = pathwayResolver;
59
59
 
60
60
  const data = { ...requestParameters };
61
61
  const params = {};
@@ -63,7 +63,7 @@ class PalmCompletionPlugin extends ModelPlugin {
63
63
  const gcpAuthTokenHelper = this.config.get('gcpAuthTokenHelper');
64
64
  const authToken = await gcpAuthTokenHelper.getAccessToken();
65
65
  headers.Authorization = `Bearer ${authToken}`;
66
- return this.executeRequest(url, data, params, headers, prompt, requestId);
66
+ return this.executeRequest(url, data, params, headers, prompt, requestId, pathway);
67
67
  }
68
68
 
69
69
  // Parse the response from the PaLM API Text Completion API
package/server/rest.js CHANGED
@@ -61,7 +61,7 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
61
61
  }
62
62
 
63
63
  const finishStream = (res, jsonResponse) => {
64
-
64
+
65
65
  // If we haven't sent the stop message yet, do it now
66
66
  if (jsonResponse.choices?.[0]?.finish_reason !== "stop") {
67
67
 
@@ -77,17 +77,20 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
77
77
  jsonEndStream.choices[0].delta = {};
78
78
  }
79
79
 
80
- //console.log(`REST SEND: data: ${JSON.stringify(jsonEndStream)}`);
81
- res.write(`data: ${JSON.stringify(jsonEndStream)}\n\n`);
80
+ sendStreamData(jsonEndStream);
82
81
  }
83
82
 
84
- //console.log(`REST SEND: data: [DONE]\n\n`);
85
- res.write(`data: [DONE]\n\n`);
83
+ sendStreamData('[DONE]');
84
+ res.end();
86
85
  }
87
86
 
88
87
  const sendStreamData = (data) => {
89
88
  //console.log(`REST SEND: data: ${JSON.stringify(data)}`);
90
- res.write(`data: ${JSON.stringify(data)}\n\n`);
89
+ const dataString = (data==='[DONE]') ? data : JSON.stringify(data);
90
+
91
+ if (!res.writableEnded) {
92
+ res.write(`data: ${dataString}\n\n`);
93
+ }
91
94
  }
92
95
 
93
96
  const fillJsonResponse = (jsonResponse, inputText, finishReason) => {
@@ -106,13 +109,18 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
106
109
 
107
110
  let subscription;
108
111
 
109
- const unsubscribe = async () => {
110
- if (subscription) {
111
- pubsub.unsubscribe(await subscription);
112
+ subscription = pubsub.subscribe('REQUEST_PROGRESS', (data) => {
113
+
114
+ const safeUnsubscribe = async () => {
115
+ if (subscription) {
116
+ try {
117
+ pubsub.unsubscribe(await subscription);
118
+ } catch (error) {
119
+ console.error(`Error unsubscribing from pubsub: ${error}`);
120
+ }
121
+ }
112
122
  }
113
- }
114
123
 
115
- subscription = pubsub.subscribe('REQUEST_PROGRESS', (data) => {
116
124
  if (data.requestProgress.requestId === requestId) {
117
125
  //console.log(`REQUEST_PROGRESS received progress: ${data.requestProgress.progress}, data: ${data.requestProgress.data}`);
118
126
 
@@ -121,7 +129,12 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
121
129
 
122
130
  try {
123
131
  const messageJson = JSON.parse(progressData);
124
- if (messageJson.choices) {
132
+ if (messageJson.error) {
133
+ console.error(`Stream error REST:`, messageJson?.error?.message);
134
+ safeUnsubscribe();
135
+ finishStream(res, jsonResponse);
136
+ return;
137
+ } else if (messageJson.choices) {
125
138
  const { text, delta, finish_reason } = messageJson.choices[0];
126
139
 
127
140
  if (messageJson.object === 'text_completion') {
@@ -133,20 +146,20 @@ const processIncomingStream = (requestId, res, jsonResponse) => {
133
146
  fillJsonResponse(jsonResponse, messageJson, null);
134
147
  }
135
148
  } catch (error) {
136
- console.log(`progressData not JSON: ${progressData}`);
149
+ //console.log(`progressData not JSON: ${progressData}`);
137
150
  fillJsonResponse(jsonResponse, progressData, "stop");
138
151
  }
139
-
140
152
  if (progress === 1 && progressData.trim() === "[DONE]") {
153
+ safeUnsubscribe();
141
154
  finishStream(res, jsonResponse);
142
- unsubscribe();
143
155
  return;
144
156
  }
157
+
145
158
  sendStreamData(jsonResponse);
146
159
 
147
160
  if (progress === 1) {
161
+ safeUnsubscribe();
148
162
  finishStream(res, jsonResponse);
149
- unsubscribe();
150
163
  }
151
164
  }
152
165
  });
@@ -14,9 +14,9 @@ const subscriptions = {
14
14
  const { requestIds } = args;
15
15
  for (const requestId of requestIds) {
16
16
  if (!requestState[requestId]) {
17
- console.log(`requestProgress, requestId: ${requestId} not found`);
17
+ console.error(`Subscription requestId: ${requestId} not found`);
18
18
  } else {
19
- console.log(`starting async requestProgress, requestId: ${requestId}`);
19
+ console.log(`Subscription starting async requestProgress, requestId: ${requestId}`);
20
20
  const { resolver, args } = requestState[requestId];
21
21
  resolver(args);
22
22
  }
package/server/typeDef.js CHANGED
@@ -7,7 +7,7 @@ const getGraphQlType = (value) => {
7
7
  return {type: 'String', defaultValue: `""`};
8
8
  break;
9
9
  case 'number':
10
- return {type: 'Int', defaultValue: '0'};
10
+ return {type: 'Int', defaultValue: 'null'};
11
11
  break;
12
12
  case 'object':
13
13
  if (Array.isArray(value)) {
@@ -77,7 +77,7 @@ test('execute', async (t) => {
77
77
  };
78
78
  };
79
79
 
80
- const result = await plugin.execute(text, parameters, prompt);
80
+ const result = await plugin.execute(text, parameters, prompt, { requestId: 'foo', pathway: {} });
81
81
  t.deepEqual(result, {
82
82
  choices: [
83
83
  {