@aj-archipelago/cortex 1.3.38 → 1.3.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aj-archipelago/cortex",
3
- "version": "1.3.38",
3
+ "version": "1.3.40",
4
4
  "description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
5
5
  "private": false,
6
6
  "repository": {
@@ -16,6 +16,7 @@ export default {
16
16
  }),
17
17
  ],
18
18
  inputParameters: {
19
+ chatHistory: [{role: '', content: []}],
19
20
  title: '',
20
21
  text: '',
21
22
  },
@@ -16,11 +16,7 @@ export default {
16
16
  privateData: false,
17
17
  chatHistory: [{role: '', content: []}],
18
18
  contextId: ``,
19
- indexName: ``,
20
- semanticConfiguration: ``,
21
- roleInformation: ``,
22
- calculateEmbeddings: false,
23
- dataSources: ["mydata", "aja", "aje", "wires", "bing"],
19
+ chatId: ``,
24
20
  language: "English",
25
21
  aiName: "Jarvis",
26
22
  aiMemorySelfModify: true,
@@ -186,7 +182,7 @@ export default {
186
182
  let pathwayResolver = resolver;
187
183
 
188
184
  // Load input parameters and information into args
189
- const { entityId, voiceResponse, aiMemorySelfModify, stream } = { ...pathwayResolver.pathway.inputParameters, ...args };
185
+ const { entityId, voiceResponse, aiMemorySelfModify, chatId } = { ...pathwayResolver.pathway.inputParameters, ...args };
190
186
 
191
187
  const entityConfig = loadEntityConfig(entityId);
192
188
  const { entityTools, entityToolsOpenAiFormat } = getToolsForEntity(entityConfig);
@@ -205,7 +201,8 @@ export default {
205
201
  entityUseMemory,
206
202
  entityInstructions,
207
203
  voiceResponse,
208
- aiMemorySelfModify
204
+ aiMemorySelfModify,
205
+ chatId
209
206
  };
210
207
 
211
208
  pathwayResolver.args = {...args};
@@ -252,8 +249,6 @@ export default {
252
249
  // truncate the chat history in case there is really long content
253
250
  const truncatedChatHistory = resolver.modelExecutor.plugin.truncateMessagesToTargetLength(args.chatHistory, null, 1000);
254
251
 
255
- const fetchTitleResponsePromise = callPathway('chat_title', {...args, chatHistory: truncatedChatHistory, stream: false});
256
-
257
252
  // Add the memory context to the chat history if applicable
258
253
  if (truncatedChatHistory.length > 1 && entityUseMemory) {
259
254
  const memoryContext = await callPathway('sys_read_memory', { ...args, chatHistory: truncatedChatHistory, section: 'memoryContext', priority: 0, recentHours: 0, stream: false }, resolver);
@@ -271,9 +266,6 @@ export default {
271
266
  try {
272
267
  let currentMessages = JSON.parse(JSON.stringify(args.chatHistory));
273
268
 
274
- const title = await fetchTitleResponsePromise;
275
- pathwayResolver.tool = JSON.stringify({ title });
276
-
277
269
  let response = await runAllPrompts({
278
270
  ...args,
279
271
  chatHistory: currentMessages,
@@ -158,7 +158,7 @@ export default {
158
158
  ],
159
159
 
160
160
  executePathway: async ({args, runAllPrompts, resolver}) => {
161
- const { text, filter, top, titleOnly, stream, dataSources, indexName, semanticConfiguration } = args;
161
+ const { text, filter, top, titleOnly, stream, chatId, indexName, semanticConfiguration } = args;
162
162
 
163
163
  // Map tool names to index names
164
164
  const toolToIndex = {
@@ -192,7 +192,8 @@ export default {
192
192
  titleOnly: titleOnly || false,
193
193
  indexName: toolIndexName,
194
194
  semanticConfiguration,
195
- stream: stream || false
195
+ stream: stream || false,
196
+ chatId
196
197
  });
197
198
 
198
199
  const parsedResponse = JSON.parse(response);