@aj-archipelago/cortex 0.0.3 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ {
2
+ "git": {
3
+ "commitMessage": "chore: release v${version}"
4
+ },
5
+ "github": {
6
+ "release": true,
7
+ "autoGenerate": true
8
+ }
9
+ }
package/config.js CHANGED
@@ -50,6 +50,7 @@ var config = convict({
50
50
  format: Object,
51
51
  default: {
52
52
  "oai-td3": {
53
+ "type": "OPENAI-COMPLETION",
53
54
  "url": "{{openaiApiUrl}}",
54
55
  "headers": {
55
56
  "Authorization": "Bearer {{openaiApiKey}}",
package/graphql/parser.js CHANGED
@@ -1,22 +1,3 @@
1
- //simples form string single or list return
2
- const getResponseResult = (data) => {
3
- const { choices } = data;
4
- if (!choices || !choices.length) {
5
- return; //TODO no choices case
6
- }
7
-
8
- // if we got a choices array back with more than one choice, return the whole array
9
- if (choices.length > 1) {
10
- return choices;
11
- }
12
-
13
- // otherwise, return the first choice
14
- const textResult = choices[0].text && choices[0].text.trim();
15
- const messageResult = choices[0].message && choices[0].message.content && choices[0].message.content.trim();
16
-
17
- return messageResult || textResult || null;
18
- }
19
-
20
1
  //simply trim and parse with given regex
21
2
  const regexParser = (text, regex) => {
22
3
  return text.trim().split(regex).map(s => s.trim()).filter(s => s.length);
@@ -51,8 +32,7 @@ const parseNumberedObjectList = (text, format) => {
51
32
  }
52
33
 
53
34
  module.exports = {
54
- getResponseResult,
55
35
  regexParser,
56
36
  parseNumberedList,
57
- parseNumberedObjectList
37
+ parseNumberedObjectList,
58
38
  };
@@ -1,145 +1,58 @@
1
- const { request } = require("../lib/request");
1
+ // PathwayPrompter.js
2
+ const OpenAIChatPlugin = require('./plugins/openAIChatPlugin');
3
+ const OpenAICompletionPlugin = require('./plugins/openAICompletionPlugin');
4
+ const AzureTranslatePlugin = require('./plugins/azureTranslatePlugin');
2
5
  const handlebars = require("handlebars");
3
- const { getResponseResult } = require("./parser");
4
6
  const { Exception } = require("handlebars");
5
- const { encode } = require("gpt-3-encoder");
6
-
7
- const DEFAULT_MAX_TOKENS = 4096;
8
- const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
9
7
 
10
8
  // register functions that can be called directly in the prompt markdown
11
- handlebars.registerHelper('stripHTML', function(value) {
9
+ handlebars.registerHelper('stripHTML', function (value) {
12
10
  return value.replace(/<[^>]*>/g, '');
13
- });
11
+ });
14
12
 
15
- handlebars.registerHelper('now', function() {
13
+ handlebars.registerHelper('now', function () {
16
14
  return new Date().toISOString();
17
- });
18
-
19
- class PathwayPrompter {
20
- constructor({ config, pathway }) {
21
- // If the pathway specifies a model, use that, otherwise use the default
22
- this.modelName = pathway.model || config.get('defaultModelName');
23
- // Get the model from the config
24
- this.model = config.get('models')[this.modelName];
25
- // If the model doesn't exist, throw an exception
26
- if (!this.model) {
27
- throw new Exception(`Model ${this.modelName} not found in config`);
28
- }
29
- this.environmentVariables = config.getEnv();
30
- this.temperature = pathway.temperature;
31
- this.pathwayPrompt = pathway.prompt;
32
- this.pathwayName = pathway.name;
33
- this.promptParameters = {}
34
- // Make all of the parameters defined on the pathway itself available to the prompt
35
- for (const [k, v] of Object.entries(pathway)) {
36
- this.promptParameters[k] = v.default ?? v;
37
- }
38
- if (pathway.inputParameters) {
39
- for (const [k, v] of Object.entries(pathway.inputParameters)) {
40
- this.promptParameters[k] = v.default ?? v;
41
- }
42
- }
43
- this.requestCount = 1
44
- }
45
-
46
- getModelMaxTokenLength() {
47
- return (this.promptParameters.maxTokenLength ?? this.model.maxTokenLength ?? DEFAULT_MAX_TOKENS);
48
- }
49
-
50
- getPromptTokenRatio() {
51
- return this.promptParameters.inputParameters.tokenRatio ?? this.promptParameters.tokenRatio ?? DEFAULT_PROMPT_TOKEN_RATIO;
52
- }
53
-
54
- requestUrl() {
55
- const generateUrl = handlebars.compile(this.model.url);
56
- return generateUrl({ ...this.model, ...this.environmentVariables, ...this.config });
57
- }
58
-
59
- requestParameters(text, parameters, prompt) {
60
- // the prompt object will either have a messages property or a prompt propery
61
- // or it could be a function that returns prompt text
62
-
63
- const combinedParameters = { ...this.promptParameters, ...parameters };
15
+ });
64
16
 
65
- // if it's a messages prompt, compile the messages and send them directly
66
- // to the API - a messages prompt automatically means its a chat-style
67
- // conversation
68
- if (prompt.messages)
69
- {
70
- const compiledMessages = prompt.messages.map((message) => {
71
- const compileText = handlebars.compile(message.content);
72
- return { role: message.role,
73
- content: compileText({...combinedParameters, text})
74
- }
75
- })
17
+ handlebars.registerHelper('toJSON', function(object) {
18
+ return JSON.stringify(object);
19
+ });
20
+
76
21
 
77
- return {
78
- messages: compiledMessages,
79
- temperature: this.temperature ?? 0.7,
80
- }
81
- }
22
+ class PathwayPrompter {
23
+ constructor({ config, pathway }) {
82
24
 
83
- // otherwise, we need to get the prompt text
84
- let promptText;
25
+ const modelName = pathway.model || config.get('defaultModelName');
26
+ const model = config.get('models')[modelName];
85
27
 
86
- if (typeof (prompt) === 'function') {
87
- promptText = prompt(parameters);
88
- }
89
- else {
90
- promptText = prompt.prompt;
28
+ if (!model) {
29
+ throw new Exception(`Model ${modelName} not found in config`);
91
30
  }
92
31
 
93
- const interpolatePrompt = handlebars.compile(promptText);
94
- const constructedPrompt = interpolatePrompt({ ...combinedParameters, text });
95
-
96
- // this prompt could be for either a chat-style conversation or a completion-style
97
- // conversation. They require different parameters.
98
-
99
- let params = {};
32
+ let plugin;
100
33
 
101
- if (this.model.type === 'OPENAI_CHAT') {
102
- params = {
103
- messages: [ {"role": "user", "content": constructedPrompt} ],
104
- temperature: this.temperature ?? 0.7,
105
- }
106
- } else {
107
- params = {
108
- prompt: constructedPrompt,
109
- max_tokens: this.getModelMaxTokenLength() - encode(constructedPrompt).length - 1,
110
- // model: "text-davinci-002",
111
- temperature: this.temperature ?? 0.7,
112
- // "top_p": 1,
113
- // "n": 1,
114
- // "presence_penalty": 0,
115
- // "frequency_penalty": 0,
116
- // "best_of": 1,
117
- }
34
+ switch (model.type) {
35
+ case 'OPENAI-CHAT':
36
+ plugin = new OpenAIChatPlugin(config, pathway);
37
+ break;
38
+ case 'AZURE-TRANSLATE':
39
+ plugin = new AzureTranslatePlugin(config, pathway);
40
+ break;
41
+ case 'OPENAI-COMPLETION':
42
+ plugin = new OpenAICompletionPlugin(config, pathway);
43
+ break;
44
+ default:
45
+ throw new Exception(`Unsupported model type: ${model.type}`);
118
46
  }
119
47
 
120
- return params;
48
+ this.plugin = plugin;
121
49
  }
122
50
 
123
51
  async execute(text, parameters, prompt) {
124
- const requestParameters = this.requestParameters(text, parameters, prompt);
125
-
126
- const url = this.requestUrl(text);
127
- const params = { ...(this.model.params || {}), ...requestParameters }
128
- const headers = this.model.headers || {};
129
- const data = await request({ url, params, headers }, this.modelName);
130
- const modelInput = params.prompt || params.messages[0].content;
131
- console.log(`=== ${this.pathwayName}.${this.requestCount++} ===`)
132
- console.log(`\x1b[36m${modelInput}\x1b[0m`)
133
- console.log(`\x1b[34m> ${getResponseResult(data)}\x1b[0m`)
134
-
135
- if (data.error) {
136
- throw new Exception(`An error was returned from the server: ${JSON.stringify(data.error)}`);
137
- }
138
-
139
- return getResponseResult(data);
52
+ return await this.plugin.execute(text, parameters, prompt);
140
53
  }
141
54
  }
142
55
 
143
56
  module.exports = {
144
- PathwayPrompter
145
- }
57
+ PathwayPrompter
58
+ };
@@ -106,7 +106,7 @@ class PathwayResolver {
106
106
  const warnText = `Your input is possibly too long, truncating! Text length: ${text.length}`;
107
107
  this.warnings.push(warnText);
108
108
  console.warn(warnText);
109
- text = truncate(text, chunkMaxChunkTokenLength);
109
+ text = this.truncate(text, chunkMaxChunkTokenLength);
110
110
  }
111
111
  return [text];
112
112
  }
@@ -116,7 +116,7 @@ class PathwayResolver {
116
116
  }
117
117
 
118
118
  truncate(str, n) {
119
- if (this.pathwayPrompter.promptParameters.truncateFromFront) {
119
+ if (this.pathwayPrompter.plugin.promptParameters.truncateFromFront) {
120
120
  return getFirstNToken(str, n);
121
121
  }
122
122
  return getLastNToken(str, n);
@@ -134,7 +134,7 @@ class PathwayResolver {
134
134
  // find the longest prompt
135
135
  const maxPromptTokenLength = Math.max(...this.prompts.map(({ prompt }) => prompt ? encode(String(prompt)).length : 0));
136
136
  const maxMessagesTokenLength = Math.max(...this.prompts.map(({ messages }) => messages ? messages.reduce((acc, {role, content}) => {
137
- return acc + encode(role).length + encode(content).length;
137
+ return (role && content) ? acc + encode(role).length + encode(content).length : acc;
138
138
  }, 0) : 0));
139
139
 
140
140
  const maxTokenLength = Math.max(maxPromptTokenLength, maxMessagesTokenLength);
@@ -144,8 +144,8 @@ class PathwayResolver {
144
144
 
145
145
  // the token ratio is the ratio of the total prompt to the result text - both have to be included
146
146
  // in computing the max token length
147
- const promptRatio = this.pathwayPrompter.getPromptTokenRatio();
148
- let maxChunkToken = promptRatio * this.pathwayPrompter.getModelMaxTokenLength() - maxTokenLength;
147
+ const promptRatio = this.pathwayPrompter.plugin.getPromptTokenRatio();
148
+ let maxChunkToken = promptRatio * this.pathwayPrompter.plugin.getModelMaxTokenLength() - maxTokenLength;
149
149
 
150
150
  // if we have to deal with prompts that have both text input
151
151
  // and previous result, we need to split the maxChunkToken in half
@@ -0,0 +1,42 @@
1
+ // AzureTranslatePlugin.js
2
+ const ModelPlugin = require('./modelPlugin');
3
+ const handlebars = require("handlebars");
4
+
5
+ class AzureTranslatePlugin extends ModelPlugin {
6
+ constructor(config, modelName, pathway) {
7
+ super(config, modelName, pathway);
8
+ }
9
+
10
+ // Set up parameters specific to the Azure Translate API
11
+ requestParameters(text, parameters, prompt) {
12
+ const combinedParameters = { ...this.promptParameters, ...parameters };
13
+ const modelPrompt = this.getModelPrompt(prompt, parameters);
14
+ const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
15
+
16
+ return {
17
+ data: [
18
+ {
19
+ Text: modelPromptText,
20
+ },
21
+ ],
22
+ params: {
23
+ to: combinedParameters.to
24
+ }
25
+ };
26
+ }
27
+
28
+ // Execute the request to the Azure Translate API
29
+ async execute(text, parameters, prompt) {
30
+ const requestParameters = this.requestParameters(text, parameters, prompt);
31
+
32
+ const url = this.requestUrl(text);
33
+
34
+ const data = requestParameters.data;
35
+ const params = requestParameters.params;
36
+ const headers = this.model.headers || {};
37
+
38
+ return this.executeRequest(url, data, params, headers);
39
+ }
40
+ }
41
+
42
+ module.exports = AzureTranslatePlugin;
@@ -0,0 +1,138 @@
1
+ // ModelPlugin.js
2
+ const handlebars = require('handlebars');
3
+ const { request } = require("../../lib/request");
4
+ const { getResponseResult } = require("../parser");
5
+
6
+ const DEFAULT_MAX_TOKENS = 4096;
7
+ const DEFAULT_PROMPT_TOKEN_RATIO = 0.5;
8
+
9
+ class ModelPlugin {
10
+ constructor(config, pathway) {
11
+ // If the pathway specifies a model, use that, otherwise use the default
12
+ this.modelName = pathway.model || config.get('defaultModelName');
13
+ // Get the model from the config
14
+ this.model = config.get('models')[this.modelName];
15
+ // If the model doesn't exist, throw an exception
16
+ if (!this.model) {
17
+ throw new Error(`Model ${this.modelName} not found in config`);
18
+ }
19
+
20
+ this.config = config;
21
+ this.environmentVariables = config.getEnv();
22
+ this.temperature = pathway.temperature;
23
+ this.pathwayPrompt = pathway.prompt;
24
+ this.pathwayName = pathway.name;
25
+ this.promptParameters = {};
26
+
27
+ // Make all of the parameters defined on the pathway itself available to the prompt
28
+ for (const [k, v] of Object.entries(pathway)) {
29
+ this.promptParameters[k] = v.default ?? v;
30
+ }
31
+ if (pathway.inputParameters) {
32
+ for (const [k, v] of Object.entries(pathway.inputParameters)) {
33
+ this.promptParameters[k] = v.default ?? v;
34
+ }
35
+ }
36
+
37
+ this.requestCount = 1;
38
+ }
39
+
40
+ getModelMaxTokenLength() {
41
+ return (this.promptParameters.maxTokenLength ?? this.model.maxTokenLength ?? DEFAULT_MAX_TOKENS);
42
+ }
43
+
44
+ getPromptTokenRatio() {
45
+ // TODO: Is this the right order of precedence? inputParameters should maybe be second?
46
+ return this.promptParameters.inputParameters.tokenRatio ?? this.promptParameters.tokenRatio ?? DEFAULT_PROMPT_TOKEN_RATIO;
47
+ }
48
+
49
+
50
+ getModelPrompt(prompt, parameters) {
51
+ if (typeof(prompt) === 'function') {
52
+ return prompt(parameters);
53
+ } else {
54
+ return prompt;
55
+ }
56
+ }
57
+
58
+ getModelPromptMessages(modelPrompt, combinedParameters, text) {
59
+ if (!modelPrompt.messages) {
60
+ return null;
61
+ }
62
+
63
+ // First run handlebars compile on the pathway messages
64
+ const compiledMessages = modelPrompt.messages.map((message) => {
65
+ if (message.content) {
66
+ const compileText = handlebars.compile(message.content);
67
+ return {
68
+ role: message.role,
69
+ content: compileText({ ...combinedParameters, text }),
70
+ };
71
+ } else {
72
+ return message;
73
+ }
74
+ });
75
+
76
+ // Next add in any parameters that are referenced by name in the array
77
+ const expandedMessages = compiledMessages.flatMap((message) => {
78
+ if (typeof message === 'string') {
79
+ const match = message.match(/{{(.+?)}}/);
80
+ const placeholder = match ? match[1] : null;
81
+ if (placeholder === null) {
82
+ return message;
83
+ } else {
84
+ return combinedParameters[placeholder] || [];
85
+ }
86
+ } else {
87
+ return [message];
88
+ }
89
+ });
90
+
91
+ return expandedMessages;
92
+ }
93
+
94
+ requestUrl() {
95
+ const generateUrl = handlebars.compile(this.model.url);
96
+ return generateUrl({ ...this.model, ...this.environmentVariables, ...this.config });
97
+ }
98
+
99
+ //simples form string single or list return
100
+ parseResponse(data) {
101
+ const { choices } = data;
102
+ if (!choices || !choices.length) {
103
+ if (Array.isArray(data) && data.length > 0 && data[0].translations) {
104
+ return data[0].translations[0].text.trim();
105
+ }
106
+ }
107
+
108
+ // if we got a choices array back with more than one choice, return the whole array
109
+ if (choices.length > 1) {
110
+ return choices;
111
+ }
112
+
113
+ // otherwise, return the first choice
114
+ const textResult = choices[0].text && choices[0].text.trim();
115
+ const messageResult = choices[0].message && choices[0].message.content && choices[0].message.content.trim();
116
+
117
+ return messageResult || textResult || null;
118
+ }
119
+
120
+ async executeRequest(url, data, params, headers) {
121
+ const responseData = await request({ url, data, params, headers }, this.modelName);
122
+ const modelInput = data.prompt || (data.messages && data.messages[0].content) || data[0].Text || null;
123
+ console.log(`=== ${this.pathwayName}.${this.requestCount++} ===`)
124
+ console.log(`\x1b[36m${modelInput}\x1b[0m`)
125
+ console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
126
+
127
+ if (responseData.error) {
128
+ throw new Exception(`An error was returned from the server: ${JSON.stringify(responseData.error)}`);
129
+ }
130
+
131
+ return this.parseResponse(responseData);
132
+ }
133
+
134
+ }
135
+
136
+ module.exports = ModelPlugin;
137
+
138
+
@@ -0,0 +1,35 @@
1
+ // OpenAIChatPlugin.js
2
+ const ModelPlugin = require('./modelPlugin');
3
+ const handlebars = require("handlebars");
4
+
5
+ class OpenAIChatPlugin extends ModelPlugin {
6
+ constructor(config, pathway) {
7
+ super(config, pathway);
8
+ }
9
+
10
+ // Set up parameters specific to the OpenAI Chat API
11
+ requestParameters(text, parameters, prompt) {
12
+ const combinedParameters = { ...this.promptParameters, ...parameters };
13
+ const modelPrompt = this.getModelPrompt(prompt, parameters);
14
+ const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
15
+ const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
16
+
17
+ return {
18
+ messages: modelPromptMessages || [{ "role": "user", "content": modelPromptText }],
19
+ temperature: this.temperature ?? 0.7,
20
+ };
21
+ }
22
+
23
+ // Execute the request to the OpenAI Chat API
24
+ async execute(text, parameters, prompt) {
25
+ const url = this.requestUrl(text);
26
+ const requestParameters = this.requestParameters(text, parameters, prompt);
27
+
28
+ const data = { ...(this.model.params || {}), ...requestParameters };
29
+ const params = {};
30
+ const headers = this.model.headers || {};
31
+ return this.executeRequest(url, data, params, headers);
32
+ }
33
+ }
34
+
35
+ module.exports = OpenAIChatPlugin;
@@ -0,0 +1,65 @@
1
+ // OpenAICompletionPlugin.js
2
+ const ModelPlugin = require('./modelPlugin');
3
+ const handlebars = require("handlebars");
4
+ const { encode } = require("gpt-3-encoder");
5
+
6
+ //convert a messages array to a simple chatML format
7
+ const messagesToChatML = (messages) => {
8
+ let output = "";
9
+ if (messages && messages.length) {
10
+ for (let message of messages) {
11
+ output += (message.role && message.content) ? `<|im_start|>${message.role}\n${message.content}\n<|im_end|>\n` : `${message}\n`;
12
+ }
13
+ // you always want the assistant to respond next so add a
14
+ // directive for that
15
+ output += "<|im_start|>assistant\n";
16
+ }
17
+ return output;
18
+ }
19
+
20
+ class OpenAICompletionPlugin extends ModelPlugin {
21
+ constructor(config, pathway) {
22
+ super(config, pathway);
23
+ }
24
+
25
+ // Set up parameters specific to the OpenAI Completion API
26
+ requestParameters(text, parameters, prompt) {
27
+ const combinedParameters = { ...this.promptParameters, ...parameters };
28
+ const modelPrompt = this.getModelPrompt(prompt, parameters);
29
+ const modelPromptText = modelPrompt.prompt ? handlebars.compile(modelPrompt.prompt)({ ...combinedParameters, text }) : '';
30
+ const modelPromptMessages = this.getModelPromptMessages(modelPrompt, combinedParameters, text);
31
+ const modelPromptMessagesML = messagesToChatML(modelPromptMessages);
32
+
33
+ if (modelPromptMessagesML) {
34
+ return {
35
+ prompt: modelPromptMessagesML,
36
+ max_tokens: this.getModelMaxTokenLength() - encode(modelPromptMessagesML).length - 1,
37
+ temperature: this.temperature ?? 0.7,
38
+ top_p: 0.95,
39
+ frequency_penalty: 0,
40
+ presence_penalty: 0,
41
+ stop: ["<|im_end|>"]
42
+ };
43
+ } else {
44
+ return {
45
+ prompt: modelPromptText,
46
+ max_tokens: this.getModelMaxTokenLength() - encode(modelPromptText).length - 1,
47
+ temperature: this.temperature ?? 0.7,
48
+ };
49
+ }
50
+ }
51
+
52
+ // Execute the request to the OpenAI Completion API
53
+ async execute(text, parameters, prompt) {
54
+ const url = this.requestUrl(text);
55
+ const requestParameters = this.requestParameters(text, parameters, prompt);
56
+
57
+ const data = { ...(this.model.params || {}), ...requestParameters };
58
+ const params = {};
59
+ const headers = this.model.headers || {};
60
+ return this.executeRequest(url, data, params, headers);
61
+ }
62
+ }
63
+
64
+ module.exports = OpenAICompletionPlugin;
65
+
package/graphql/prompt.js CHANGED
@@ -25,7 +25,7 @@ function promptContains(variable, prompt) {
25
25
  // if it's an array, it's the messages format
26
26
  if (Array.isArray(prompt)) {
27
27
  prompt.forEach(p => {
28
- while ((match = p.content && regexp.exec(p.content)) !== null) {
28
+ while (match = p.content && regexp.exec(p.content)) {
29
29
  matches.push(match[1]);
30
30
  }
31
31
  });
@@ -16,7 +16,7 @@ const rootResolver = async (parent, args, contextValue, info) => {
16
16
  contextValue.pathwayResolver = pathwayResolver;
17
17
 
18
18
  // Add request parameters back as debug
19
- const requestParameters = pathwayResolver.prompts.map((prompt) => pathwayResolver.pathwayPrompter.requestParameters(args.text, args, prompt));
19
+ const requestParameters = pathwayResolver.prompts.map((prompt) => pathwayResolver.pathwayPrompter.plugin.requestParameters(args.text, args, prompt));
20
20
  const debug = JSON.stringify(requestParameters);
21
21
 
22
22
  // Execute the request with timeout
@@ -12,10 +12,11 @@ const typeDef = (pathway) => {
12
12
  const fieldsStr = !fields ? `` : fields.map(f => `${f}: String`).join('\n ');
13
13
 
14
14
  const typeName = fields ? `${objName}Result` : `String`;
15
+ const messageType = `input Message { role: String, content: String }`;
16
+
15
17
  const type = fields ? `type ${typeName} {
16
18
  ${fieldsStr}
17
- }` : ``;
18
-
19
+ }` : ``;
19
20
 
20
21
  const resultStr = pathway.list ? `[${typeName}]` : typeName;
21
22
 
@@ -29,18 +30,21 @@ const typeDef = (pathway) => {
29
30
 
30
31
 
31
32
  const params = { ...defaultInputParameters, ...inputParameters };
32
- const paramsStr = Object.entries(params).map(
33
- ([key, value]) => `${key}: ${GRAPHQL_TYPE_MAP[typeof (value)]} = ${typeof (value) == `string` ? `"${value}"` : value}`).join('\n');
34
-
35
33
 
36
- return `${type}
37
-
38
- ${responseType}
39
-
40
- extend type Query {
41
- ${name}(${paramsStr}): ${objName}
42
- }
43
- `;
34
+ const paramsStr = Object.entries(params).map(
35
+ ([key, value]) => {
36
+ if (typeof value === 'object' && Array.isArray(value)) {
37
+ return `${key}: [Message] = []`;
38
+ } else {
39
+ return `${key}: ${GRAPHQL_TYPE_MAP[typeof (value)]} = ${typeof (value) === 'string' ? `"${value}"` : value}`;
40
+ }
41
+ }
42
+ ).join('\n');
43
+
44
+
45
+ const definition = `${messageType}\n\n${type}\n\n${responseType}\n\nextend type Query {${name}(${paramsStr}): ${objName}}`;
46
+ //console.log(definition);
47
+ return definition;
44
48
  }
45
49
 
46
50
  module.exports = {
package/lib/request.js CHANGED
@@ -14,21 +14,21 @@ const buildLimiters = (config) => {
14
14
  }
15
15
 
16
16
  const MAX_RETRY = 10;
17
- const postRequest = async ({ url, params, headers }, model) => {
17
+ const postRequest = async ({ url, data, params, headers }, model) => {
18
18
  let retry = 0;
19
19
  const errors = []
20
20
  for (let i = 0; i < MAX_RETRY; i++) {
21
21
  try {
22
22
  if (i > 0) {
23
- console.log(`Retrying request #retry ${i}: ${JSON.stringify(params)}...`);
23
+ console.log(`Retrying request #retry ${i}: ${JSON.stringify(data)}...`);
24
24
  await new Promise(r => setTimeout(r, 200 * Math.pow(2, i))); // exponential backoff
25
25
  }
26
26
  if (!limiters[model]) {
27
27
  throw new Error(`No limiter for model ${model}!`);
28
28
  }
29
- return await limiters[model].schedule(() => axios.post(url, params, { headers }));
29
+ return await limiters[model].schedule(() => axios.post(url, data, { params, headers }));
30
30
  } catch (e) {
31
- console.error(`Failed request with params ${JSON.stringify(params)}: ${e}`);
31
+ console.error(`Failed request with data ${JSON.stringify(data)}: ${e}`);
32
32
  errors.push(e);
33
33
  }
34
34
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "0.0.3",
3
+ "version": "0.0.5",
4
4
  "description": "Project Cortex",
5
5
  "repository": {
6
6
  "type": "git",
@@ -19,5 +19,5 @@ module.exports = {
19
19
  useParallelChunkProcessing: false,
20
20
  useInputSummarization: false,
21
21
  truncateFromFront: false,
22
- timeout: 60, // in seconds
22
+ timeout: 120, // in seconds
23
23
  }
@@ -1,3 +1,3 @@
1
1
  module.exports = {
2
- prompt: `Rewrite the following:\n\n{{text}}`
2
+ prompt: `Rewrite the following:\n\n{{{text}}}`
3
3
  }