@aj-archipelago/cortex 1.3.29 → 1.3.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.29",
3
+ "version": "1.3.30",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -59,12 +59,17 @@ export default {
59
59
 
60
60
  logger.debug(`Using generator pathway: ${generatorPathway}`);
61
61
 
62
- const result = await callPathway(generatorPathway, newArgs, resolver);
62
+ let result = await callPathway(generatorPathway, newArgs, resolver);
63
63
 
64
64
  if (!result && !args.stream) {
65
65
  result = await callPathway('sys_generator_error', { ...args, text: `Tried to use a tool (${generatorPathway}), but no result was returned`, stream: false }, resolver);
66
66
  }
67
67
 
68
+ if (resolver.errors.length > 0) {
69
+ result = await callPathway('sys_generator_error', { ...args, text: resolver.errors.join('\n'), stream: false }, resolver);
70
+ resolver.errors = [];
71
+ }
72
+
68
73
  return result;
69
74
 
70
75
  } catch (e) {
@@ -4,8 +4,11 @@ export default {
4
4
  prompt:
5
5
  [
6
6
  new Prompt({ messages: [
7
- {"role": "system", "content": `{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\nThe user has requested information that you have already determined can be found in the indexes that you can search, and you were trying to search for it, but encountered the following error: {{{text}}}. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.\n{{renderTemplate AI_DATETIME}}`},
8
- "{{chatHistory}}",
7
+ {"role": "system", "content": `{{renderTemplate AI_MEMORY}}\n\n{{renderTemplate AI_DIRECTIVES}}\n\n{{renderTemplate AI_COMMON_INSTRUCTIONS}}\n\n{{renderTemplate AI_EXPERTISE}}\n\n{{renderTemplate AI_CONVERSATION_HISTORY}}\n\nYou were trying to fulfill the user's last request in the above conversation, but ran into an error. You cannot resolve this error.\n{{renderTemplate AI_DATETIME}}`},
8
+ {
9
+ "role": "user",
10
+ "content": `The model that you were trying to use to fulfill the user's request returned the following error(s): {{{text}}}. Please let them know what happened. Your response should be concise, fit the rest of the conversation, include detail appropriate for the technical level of the user if you can determine it, and be appropriate for the context. You cannot resolve this error.`
11
+ },
9
12
  ]}),
10
13
  ],
11
14
  inputParameters: {
@@ -140,7 +140,7 @@ class Gemini15ChatPlugin extends ModelPlugin {
140
140
  dataToMerge = data.contents;
141
141
  } else if (data && data.candidates && Array.isArray(data.candidates)) {
142
142
  const { content, finishReason, safetyRatings } = data.candidates[0];
143
- if (finishReason === 'STOP') {
143
+ if (finishReason === 'STOP' || finishReason === 'MAX_TOKENS') {
144
144
  return content?.parts?.[0]?.text ?? '';
145
145
  } else {
146
146
  const returnString = `Response was not completed. Finish reason: ${finishReason}, Safety ratings: ${JSON.stringify(safetyRatings, null, 2)}`;
@@ -143,8 +143,8 @@ class Gemini15VisionPlugin extends Gemini15ChatPlugin {
143
143
  if (data.error.code === 400 && data.error.message === 'Precondition check failed.') {
144
144
  throw new Error('One or more of the included files is too large to process. Please try again with a smaller file.');
145
145
  }
146
- throw e;
147
146
  }
147
+ throw e;
148
148
  }
149
149
  return result;
150
150
  }