@aj-archipelago/cortex 0.0.6 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,15 +12,16 @@ const { requestState } = require('./requestState');
12
12
 
13
13
  const MAX_PREVIOUS_RESULT_TOKEN_LENGTH = 1000;
14
14
 
15
- const callPathway = async (config, pathwayName, requestState, { text, ...parameters }) => {
16
- const pathwayResolver = new PathwayResolver({ config, pathway: config.get(`pathways.${pathwayName}`), requestState });
15
+ const callPathway = async (config, pathwayName, args, requestState, { text, ...parameters }) => {
16
+ const pathwayResolver = new PathwayResolver({ config, pathway: config.get(`pathways.${pathwayName}`), args, requestState });
17
17
  return await pathwayResolver.resolve({ text, ...parameters });
18
18
  }
19
19
 
20
20
  class PathwayResolver {
21
- constructor({ config, pathway }) {
21
+ constructor({ config, pathway, args }) {
22
22
  this.config = config;
23
23
  this.pathway = pathway;
24
+ this.args = args;
24
25
  this.useInputChunking = pathway.useInputChunking;
25
26
  this.chunkMaxTokenLength = 0;
26
27
  this.warnings = [];
@@ -29,22 +30,21 @@ class PathwayResolver {
29
30
  this.pathwayPrompter = new PathwayPrompter({ config, pathway });
30
31
  this.previousResult = '';
31
32
  this.prompts = [];
32
- this._pathwayPrompt = '';
33
33
 
34
34
  Object.defineProperty(this, 'pathwayPrompt', {
35
35
  get() {
36
- return this._pathwayPrompt;
36
+ return this.prompts
37
37
  },
38
38
  set(value) {
39
- this._pathwayPrompt = value;
40
- if (!Array.isArray(this._pathwayPrompt)) {
41
- this._pathwayPrompt = [this._pathwayPrompt];
39
+ if (!Array.isArray(value)) {
40
+ value = [value];
42
41
  }
43
- this.prompts = this._pathwayPrompt.map(p => (p instanceof Prompt) ? p : new Prompt({ prompt:p }));
42
+ this.prompts = value.map(p => (p instanceof Prompt) ? p : new Prompt({ prompt:p }));
44
43
  this.chunkMaxTokenLength = this.getChunkMaxTokenLength();
45
44
  }
46
45
  });
47
46
 
47
+ // set up initial prompt
48
48
  this.pathwayPrompt = pathway.prompt;
49
49
  }
50
50
 
@@ -145,25 +145,25 @@ class PathwayResolver {
145
145
 
146
146
  // Here we choose how to handle long input - either summarize or chunk
147
147
  processInputText(text) {
148
- let chunkMaxChunkTokenLength = 0;
148
+ let chunkTokenLength = 0;
149
149
  if (this.pathway.inputChunkSize) {
150
- chunkMaxChunkTokenLength = Math.min(this.pathway.inputChunkSize, this.chunkMaxTokenLength);
150
+ chunkTokenLength = Math.min(this.pathway.inputChunkSize, this.chunkMaxTokenLength);
151
151
  } else {
152
- chunkMaxChunkTokenLength = this.chunkMaxTokenLength;
152
+ chunkTokenLength = this.chunkMaxTokenLength;
153
153
  }
154
154
  const encoded = encode(text);
155
- if (!this.useInputChunking || encoded.length <= chunkMaxChunkTokenLength) { // no chunking, return as is
156
- if (encoded.length >= chunkMaxChunkTokenLength) {
157
- const warnText = `Your input is possibly too long, truncating! Text length: ${text.length}`;
155
+ if (!this.useInputChunking || encoded.length <= chunkTokenLength) { // no chunking, return as is
156
+ if (encoded.length >= chunkTokenLength) {
157
+ const warnText = `Truncating long input text. Text length: ${text.length}`;
158
158
  this.warnings.push(warnText);
159
159
  console.warn(warnText);
160
- text = this.truncate(text, chunkMaxChunkTokenLength);
160
+ text = this.truncate(text, chunkTokenLength);
161
161
  }
162
162
  return [text];
163
163
  }
164
164
 
165
165
  // chunk the text and return the chunks with newline separators
166
- return getSemanticChunks({ text, maxChunkToken: chunkMaxChunkTokenLength });
166
+ return getSemanticChunks({ text, maxChunkToken: chunkTokenLength });
167
167
  }
168
168
 
169
169
  truncate(str, n) {
@@ -175,7 +175,7 @@ class PathwayResolver {
175
175
 
176
176
  async summarizeIfEnabled({ text, ...parameters }) {
177
177
  if (this.pathway.useInputSummarization) {
178
- return await callPathway(this.config, 'summary', requestState, { text, targetLength: 1000, ...parameters });
178
+ return await callPathway(this.config, 'summary', this.args, requestState, { text, targetLength: 1000, ...parameters });
179
179
  }
180
180
  return text;
181
181
  }
@@ -183,30 +183,21 @@ class PathwayResolver {
183
183
  // Calculate the maximum token length for a chunk
184
184
  getChunkMaxTokenLength() {
185
185
  // find the longest prompt
186
- const maxPromptTokenLength = Math.max(...this.prompts.map(({ prompt }) => prompt ? encode(String(prompt)).length : 0));
187
- const maxMessagesTokenLength = Math.max(...this.prompts.map(({ messages }) => messages ? messages.reduce((acc, {role, content}) => {
188
- return (role && content) ? acc + encode(role).length + encode(content).length : acc;
189
- }, 0) : 0));
190
-
191
- const maxTokenLength = Math.max(maxPromptTokenLength, maxMessagesTokenLength);
192
-
186
+ const maxPromptTokenLength = Math.max(...this.prompts.map((promptData) => this.pathwayPrompter.plugin.getCompiledPrompt('', this.args, promptData).tokenLength));
187
+
193
188
  // find out if any prompts use both text input and previous result
194
- const hasBothProperties = this.prompts.some(prompt => prompt.usesInputText && prompt.usesPreviousResult);
189
+ const hasBothProperties = this.prompts.some(prompt => prompt.usesTextInput && prompt.usesPreviousResult);
195
190
 
196
191
  // the token ratio is the ratio of the total prompt to the result text - both have to be included
197
192
  // in computing the max token length
198
193
  const promptRatio = this.pathwayPrompter.plugin.getPromptTokenRatio();
199
- let maxChunkToken = promptRatio * this.pathwayPrompter.plugin.getModelMaxTokenLength() - maxTokenLength;
200
-
194
+ let chunkMaxTokenLength = promptRatio * this.pathwayPrompter.plugin.getModelMaxTokenLength() - maxPromptTokenLength;
195
+
201
196
  // if we have to deal with prompts that have both text input
202
197
  // and previous result, we need to split the maxChunkToken in half
203
- maxChunkToken = hasBothProperties ? maxChunkToken / 2 : maxChunkToken;
204
-
205
- // detect if the longest prompt might be too long to allow any chunk size
206
- if (maxChunkToken && maxChunkToken <= 0) {
207
- throw new Error(`Your prompt is too long! Split to multiple prompts or reduce length of your prompt, prompt length: ${maxPromptLength}`);
208
- }
209
- return maxChunkToken;
198
+ chunkMaxTokenLength = hasBothProperties ? chunkMaxTokenLength / 2 : chunkMaxTokenLength;
199
+
200
+ return chunkMaxTokenLength;
210
201
  }
211
202
 
212
203
  // Process the request and return the result
@@ -300,7 +291,7 @@ class PathwayResolver {
300
291
  if (requestState[this.requestId].canceled) {
301
292
  return;
302
293
  }
303
- const result = await this.pathwayPrompter.execute(text, { ...parameters, ...this.savedContext }, prompt);
294
+ const result = await this.pathwayPrompter.execute(text, { ...parameters, ...this.savedContext }, prompt, this);
304
295
  requestState[this.requestId].completedCount++;
305
296
 
306
297
  const { completedCount, totalCount } = requestState[this.requestId];
@@ -1,19 +1,16 @@
1
1
  // AzureTranslatePlugin.js
2
2
  const ModelPlugin = require('./modelPlugin');
3
- const handlebars = require("handlebars");
4
3
 
5
4
  class AzureTranslatePlugin extends ModelPlugin {
6
- constructor(config, modelName, pathway) {
7
- super(config, modelName, pathway);
5
+ constructor(config, pathway) {
6
+ super(config, pathway);
8
7
  }
9
-
8
+
10
9
  // Set up parameters specific to the Azure Translate API
11
- requestParameters(text, parameters, prompt) {
10
+ getRequestParameters(text, parameters, prompt) {
12
11
  const combinedParameters = { ...this.promptParameters, ...parameters };
13
- const modelPrompt = this.getModelPrompt(prompt, parameters);
14
- const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
15
-
16
- return {
12
+ const { modelPromptText } = this.getCompiledPrompt(text, parameters, prompt);
13
+ const requestParameters = {
17
14
  data: [
18
15
  {
19
16
  Text: modelPromptText,
@@ -23,11 +20,12 @@ class AzureTranslatePlugin extends ModelPlugin {
23
20
  to: combinedParameters.to
24
21
  }
25
22
  };
23
+ return requestParameters;
26
24
  }
27
25
 
28
26
  // Execute the request to the Azure Translate API
29
27
  async execute(text, parameters, prompt) {
30
- const requestParameters = this.requestParameters(text, parameters, prompt);
28
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
31
29
 
32
30
  const url = this.requestUrl(text);
33
31
 
@@ -35,7 +33,7 @@ class AzureTranslatePlugin extends ModelPlugin {
35
33
  const params = requestParameters.params;
36
34
  const headers = this.model.headers || {};
37
35
 
38
- return this.executeRequest(url, data, params, headers);
36
+ return this.executeRequest(url, data, params, headers, prompt);
39
37
  }
40
38
  }
41
39
 
@@ -38,6 +38,55 @@ class ModelPlugin {
38
38
  this.shouldCache = config.get('enableCache') && (pathway.enableCache || pathway.temperature == 0);
39
39
  }
40
40
 
41
+ // Function to remove non-system messages until token length is less than target
42
+ removeMessagesUntilTarget = (messages, targetTokenLength) => {
43
+ let chatML = this.messagesToChatML(messages);
44
+ let tokenLength = encode(chatML).length;
45
+
46
+ while (tokenLength > targetTokenLength) {
47
+ for (let i = 0; i < messages.length; i++) {
48
+ if (messages[i].role !== 'system') {
49
+ messages.splice(i, 1);
50
+ chatML = this.messagesToChatML(messages);
51
+ tokenLength = encode(chatML).length;
52
+ break;
53
+ }
54
+ }
55
+ if (messages.every(message => message.role === 'system')) {
56
+ break; // All remaining messages are 'system', stop removing messages
57
+ }
58
+ }
59
+ return messages;
60
+ }
61
+
62
+ //convert a messages array to a simple chatML format
63
+ messagesToChatML = (messages) => {
64
+ let output = "";
65
+ if (messages && messages.length) {
66
+ for (let message of messages) {
67
+ output += (message.role && message.content) ? `<|im_start|>${message.role}\n${message.content}\n<|im_end|>\n` : `${message}\n`;
68
+ }
69
+ // you always want the assistant to respond next so add a
70
+ // directive for that
71
+ output += "<|im_start|>assistant\n";
72
+ }
73
+ return output;
74
+ }
75
+
76
+ getCompiledPrompt(text, parameters, prompt) {
77
+ const combinedParameters = { ...this.promptParameters, ...parameters };
78
+ const modelPrompt = this.getModelPrompt(prompt, parameters);
79
+ const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
80
+ const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
81
+ const modelPromptMessagesML = this.messagesToChatML(modelPromptMessages);
82
+
83
+ if (modelPromptMessagesML) {
84
+ return { modelPromptMessages, tokenLength: encode(modelPromptMessagesML).length };
85
+ } else {
86
+ return { modelPromptText, tokenLength: encode(modelPromptText).length };
87
+ }
88
+ }
89
+
41
90
  getModelMaxTokenLength() {
42
91
  return (this.promptParameters.maxTokenLength ?? this.model.maxTokenLength ?? DEFAULT_MAX_TOKENS);
43
92
  }
@@ -120,40 +169,37 @@ class ModelPlugin {
120
169
  return messageResult ?? textResult ?? null;
121
170
  }
122
171
 
123
- logMessagePreview(messages) {
124
- messages.forEach((message, index) => {
125
- const words = message.content.split(" ");
126
- const tokenCount = encode(message.content).length;
127
- let preview;
172
+ logRequestData(data, responseData, prompt) {
173
+ const separator = `\n=== ${this.pathwayName}.${this.requestCount++} ===\n`;
174
+ console.log(separator);
128
175
 
129
- if (index === 0) {
130
- preview = message.content;
131
- } else {
132
- preview = words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
133
- }
176
+ const modelInput = data.prompt || (data.messages && data.messages[0].content) || (data.length > 0 && data[0].Text) || null;
134
177
 
135
- console.log(`Message ${index + 1}: Role: ${message.role}, Tokens: ${tokenCount}, Content: "${preview}"`);
136
- });
137
- }
178
+ if (data && data.messages && data.messages.length > 1) {
179
+ data.messages.forEach((message, index) => {
180
+ const words = message.content.split(" ");
181
+ const tokenCount = encode(message.content).length;
182
+ const preview = words.length < 41 ? message.content : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
138
183
 
139
- async executeRequest(url, data, params, headers) {
140
- const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
141
- const modelInput = data.prompt || (data.messages && data.messages[0].content) || data[0].Text || null;
142
-
143
- console.log(`=== ${this.pathwayName}.${this.requestCount++} ===`);
144
-
145
- if (data.messages && data.messages.length > 1) {
146
- this.logMessagePreview(data.messages);
184
+ console.log(`\x1b[36mMessage ${index + 1}: Role: ${message.role}, Tokens: ${tokenCount}, Content: "${preview}"\x1b[0m`);
185
+ });
147
186
  } else {
148
187
  console.log(`\x1b[36m${modelInput}\x1b[0m`);
149
188
  }
150
-
189
+
151
190
  console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
152
191
 
192
+ prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
193
+ }
194
+
195
+ async executeRequest(url, data, params, headers, prompt) {
196
+ const responseData = await request({ url, data, params, headers, cache: this.shouldCache }, this.modelName);
197
+
153
198
  if (responseData.error) {
154
199
  throw new Exception(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
155
200
  }
156
201
 
202
+ this.logRequestData(data, responseData, prompt);
157
203
  return this.parseResponse(responseData);
158
204
  }
159
205
 
@@ -1,6 +1,5 @@
1
1
  // OpenAIChatPlugin.js
2
2
  const ModelPlugin = require('./modelPlugin');
3
- const handlebars = require("handlebars");
4
3
 
5
4
  class OpenAIChatPlugin extends ModelPlugin {
6
5
  constructor(config, pathway) {
@@ -8,30 +7,39 @@ class OpenAIChatPlugin extends ModelPlugin {
8
7
  }
9
8
 
10
9
  // Set up parameters specific to the OpenAI Chat API
11
- requestParameters(text, parameters, prompt) {
12
- const combinedParameters = { ...this.promptParameters, ...parameters };
13
- const modelPrompt = this.getModelPrompt(prompt, parameters);
14
- const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
15
- const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
16
-
10
+ getRequestParameters(text, parameters, prompt) {
11
+ const { modelPromptText, modelPromptMessages, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
17
12
  const { stream } = parameters;
18
-
19
- return {
20
- messages: modelPromptMessages || [{ "role": "user", "content": modelPromptText }],
21
- temperature: this.temperature ?? 0.7,
22
- stream
13
+
14
+ // Define the model's max token length
15
+ const modelMaxTokenLength = this.getModelMaxTokenLength() * this.getPromptTokenRatio();
16
+
17
+ let requestMessages = modelPromptMessages || [{ "role": "user", "content": modelPromptText }];
18
+
19
+ // Check if the token length exceeds the model's max token length
20
+ if (tokenLength > modelMaxTokenLength) {
21
+ // Remove older messages until the token length is within the model's limit
22
+ requestMessages = this.removeMessagesUntilTarget(requestMessages, modelMaxTokenLength);
23
+ }
24
+
25
+ const requestParameters = {
26
+ messages: requestMessages,
27
+ temperature: this.temperature ?? 0.7,
28
+ stream
23
29
  };
30
+
31
+ return requestParameters;
24
32
  }
25
33
 
26
34
  // Execute the request to the OpenAI Chat API
27
35
  async execute(text, parameters, prompt) {
28
36
  const url = this.requestUrl(text);
29
- const requestParameters = this.requestParameters(text, parameters, prompt);
37
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
30
38
 
31
39
  const data = { ...(this.model.params || {}), ...requestParameters };
32
40
  const params = {};
33
41
  const headers = this.model.headers || {};
34
- return this.executeRequest(url, data, params, headers);
42
+ return this.executeRequest(url, data, params, headers, prompt);
35
43
  }
36
44
  }
37
45
 
@@ -1,67 +1,68 @@
1
1
  // OpenAICompletionPlugin.js
2
2
  const ModelPlugin = require('./modelPlugin');
3
- const handlebars = require("handlebars");
4
3
  const { encode } = require("gpt-3-encoder");
5
4
 
6
- //convert a messages array to a simple chatML format
7
- const messagesToChatML = (messages) => {
8
- let output = "";
9
- if (messages && messages.length) {
10
- for (let message of messages) {
11
- output += (message.role && message.content) ? `<|im_start|>${message.role}\n${message.content}\n<|im_end|>\n` : `${message}\n`;
12
- }
13
- // you always want the assistant to respond next so add a
14
- // directive for that
15
- output += "<|im_start|>assistant\n";
16
- }
17
- return output;
18
- }
19
-
20
5
  class OpenAICompletionPlugin extends ModelPlugin {
21
6
  constructor(config, pathway) {
22
7
  super(config, pathway);
23
8
  }
24
9
 
25
10
  // Set up parameters specific to the OpenAI Completion API
26
- requestParameters(text, parameters, prompt) {
27
- const combinedParameters = { ...this.promptParameters, ...parameters };
28
- const modelPrompt = this.getModelPrompt(prompt, parameters);
29
- const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
30
- const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
31
- const modelPromptMessagesML = messagesToChatML(modelPromptMessages);
32
-
11
+ getRequestParameters(text, parameters, prompt) {
12
+ let { modelPromptMessages, modelPromptText, tokenLength } = this.getCompiledPrompt(text, parameters, prompt);
33
13
  const { stream } = parameters;
34
-
35
- if (modelPromptMessagesML) {
36
- return {
37
- prompt: modelPromptMessagesML,
38
- max_tokens: this.getModelMaxTokenLength() - encode(modelPromptMessagesML).length - 1,
39
- temperature: this.temperature ?? 0.7,
40
- top_p: 0.95,
41
- frequency_penalty: 0,
42
- presence_penalty: 0,
43
- stop: ["<|im_end|>"],
44
- stream
45
- };
14
+ let modelPromptMessagesML = '';
15
+ const modelMaxTokenLength = this.getModelMaxTokenLength();
16
+ let requestParameters = {};
17
+
18
+ if (modelPromptMessages) {
19
+ const requestMessages = this.removeMessagesUntilTarget(modelPromptMessages, modelMaxTokenLength - 1);
20
+ modelPromptMessagesML = this.messagesToChatML(requestMessages);
21
+ tokenLength = encode(modelPromptMessagesML).length;
22
+
23
+ if (tokenLength >= modelMaxTokenLength) {
24
+ throw new Error(`The maximum number of tokens for this model is ${modelMaxTokenLength}. Please reduce the number of messages in the prompt.`);
25
+ }
26
+
27
+ const max_tokens = modelMaxTokenLength - tokenLength - 1;
28
+
29
+ requestParameters = {
30
+ prompt: modelPromptMessagesML,
31
+ max_tokens: max_tokens,
32
+ temperature: this.temperature ?? 0.7,
33
+ top_p: 0.95,
34
+ frequency_penalty: 0,
35
+ presence_penalty: 0,
36
+ stop: ["<|im_end|>"],
37
+ stream
38
+ };
46
39
  } else {
47
- return {
48
- prompt: modelPromptText,
49
- max_tokens: this.getModelMaxTokenLength() - encode(modelPromptText).length - 1,
50
- temperature: this.temperature ?? 0.7,
51
- stream
52
- };
40
+ if (tokenLength >= modelMaxTokenLength) {
41
+ throw new Error(`The maximum number of tokens for this model is ${modelMaxTokenLength}. Please reduce the length of the prompt.`);
42
+ }
43
+
44
+ const max_tokens = modelMaxTokenLength - tokenLength - 1;
45
+
46
+ requestParameters = {
47
+ prompt: modelPromptText,
48
+ max_tokens: max_tokens,
49
+ temperature: this.temperature ?? 0.7,
50
+ stream
51
+ };
53
52
  }
53
+
54
+ return requestParameters;
54
55
  }
55
56
 
56
57
  // Execute the request to the OpenAI Completion API
57
58
  async execute(text, parameters, prompt) {
58
59
  const url = this.requestUrl(text);
59
- const requestParameters = this.requestParameters(text, parameters, prompt);
60
+ const requestParameters = this.getRequestParameters(text, parameters, prompt);
60
61
 
61
62
  const data = { ...(this.model.params || {}), ...requestParameters };
62
63
  const params = {};
63
64
  const headers = this.model.headers || {};
64
- return this.executeRequest(url, data, params, headers);
65
+ return this.executeRequest(url, data, params, headers, prompt);
65
66
  }
66
67
  }
67
68
 
@@ -0,0 +1,91 @@
1
+ // OpenAICompletionPlugin.js
2
+ const ModelPlugin = require('./modelPlugin');
3
+ const FormData = require('form-data');
4
+ const fs = require('fs');
5
+ const { splitMediaFile, isValidYoutubeUrl, processYoutubeUrl, deleteTempPath } = require('../../lib/fileChunker');
6
+ const pubsub = require('../pubsub');
7
+
8
+ class OpenAIWhisperPlugin extends ModelPlugin {
9
+ constructor(config, pathway) {
10
+ super(config, pathway);
11
+ }
12
+
13
+ // Execute the request to the OpenAI Whisper API
14
+ async execute(text, parameters, prompt, pathwayResolver) {
15
+ const url = this.requestUrl(text);
16
+ const params = {};
17
+ const { modelPromptText } = this.getCompiledPrompt(text, parameters, prompt);
18
+
19
+ const processChunk = async (chunk) => {
20
+ try {
21
+ const formData = new FormData();
22
+ formData.append('file', fs.createReadStream(chunk));
23
+ formData.append('model', this.model.params.model);
24
+ formData.append('response_format', 'text');
25
+ // formData.append('language', 'tr');
26
+ modelPromptText && formData.append('prompt', modelPromptText);
27
+
28
+ return this.executeRequest(url, formData, params, { ...this.model.headers, ...formData.getHeaders() });
29
+ } catch (err) {
30
+ console.log(err);
31
+ }
32
+ }
33
+
34
+ let result = ``;
35
+ let { file } = parameters;
36
+ let folder;
37
+ const isYoutubeUrl = isValidYoutubeUrl(file);
38
+ let totalCount = 0;
39
+ let completedCount = 0;
40
+ const { requestId } = pathwayResolver;
41
+
42
+ const sendProgress = () => {
43
+ completedCount++;
44
+ pubsub.publish('REQUEST_PROGRESS', {
45
+ requestProgress: {
46
+ requestId,
47
+ progress: completedCount / totalCount,
48
+ data: null,
49
+ }
50
+ });
51
+ }
52
+
53
+ try {
54
+ if (isYoutubeUrl) {
55
+ // totalCount += 1; // extra 1 step for youtube download
56
+ file = await processYoutubeUrl(file);
57
+ }
58
+
59
+ const { chunkPromises, uniqueOutputPath } = await splitMediaFile(file);
60
+ folder = uniqueOutputPath;
61
+ totalCount += chunkPromises.length * 2; // 2 steps for each chunk (download and upload)
62
+ // isYoutubeUrl && sendProgress(); // send progress for youtube download after total count is calculated
63
+
64
+ // sequential download of chunks
65
+ const chunks = [];
66
+ for (const chunkPromise of chunkPromises) {
67
+ sendProgress();
68
+ chunks.push(await chunkPromise);
69
+ }
70
+
71
+ // sequential processing of chunks
72
+ for (const chunk of chunks) {
73
+ result += await processChunk(chunk);
74
+ sendProgress();
75
+ }
76
+
77
+ // parallel processing, dropped
78
+ // result = await Promise.all(mediaSplit.chunks.map(processChunk));
79
+
80
+ } catch (error) {
81
+ console.error("An error occurred:", error);
82
+ } finally {
83
+ isYoutubeUrl && (await deleteTempPath(file));
84
+ folder && (await deleteTempPath(folder));
85
+ }
86
+ return result;
87
+ }
88
+ }
89
+
90
+ module.exports = OpenAIWhisperPlugin;
91
+
package/graphql/prompt.js CHANGED
@@ -12,6 +12,7 @@ class Prompt {
12
12
 
13
13
  this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages);
14
14
  this.usesPreviousResult = promptContains('previousResult', this.prompt ? this.prompt : this.messages);
15
+ this.debugInfo = '';
15
16
  }
16
17
  }
17
18
 
@@ -12,16 +12,16 @@ const rootResolver = async (parent, args, contextValue, info) => {
12
12
  info.cacheControl.setCacheHint({ maxAge: 60 * 60 * 24, scope: 'PUBLIC' });
13
13
  }
14
14
 
15
- const pathwayResolver = new PathwayResolver({ config, pathway, requestState });
15
+ const pathwayResolver = new PathwayResolver({ config, pathway, args, requestState });
16
16
  contextValue.pathwayResolver = pathwayResolver;
17
17
 
18
- // Add request parameters back as debug
19
- const requestParameters = pathwayResolver.prompts.map((prompt) => pathwayResolver.pathwayPrompter.plugin.requestParameters(args.text, args, prompt));
20
- const debug = JSON.stringify(requestParameters);
21
-
22
18
  // Execute the request with timeout
23
19
  const result = await fulfillWithTimeout(pathway.resolver(parent, args, contextValue, info), pathway.timeout);
24
20
  const { warnings, previousResult, savedContextId } = pathwayResolver;
21
+
22
+ // Add request parameters back as debug
23
+ const debug = pathwayResolver.prompts.map(prompt => prompt.debugInfo || '').join('\n').trim();
24
+
25
25
  return { debug, result, warnings, previousResult, contextId: savedContextId }
26
26
  }
27
27