@aj-archipelago/cortex 1.0.4 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +3 -3
  2. package/config/default.example.json +18 -0
  3. package/config.js +28 -8
  4. package/helper_apps/MediaFileChunker/Dockerfile +20 -0
  5. package/helper_apps/MediaFileChunker/package-lock.json +18 -18
  6. package/helper_apps/MediaFileChunker/package.json +1 -1
  7. package/helper_apps/WhisperX/.dockerignore +27 -0
  8. package/helper_apps/WhisperX/Dockerfile +31 -0
  9. package/helper_apps/WhisperX/app-ts.py +76 -0
  10. package/helper_apps/WhisperX/app.py +115 -0
  11. package/helper_apps/WhisperX/docker-compose.debug.yml +12 -0
  12. package/helper_apps/WhisperX/docker-compose.yml +10 -0
  13. package/helper_apps/WhisperX/requirements.txt +6 -0
  14. package/index.js +1 -1
  15. package/lib/gcpAuthTokenHelper.js +37 -0
  16. package/lib/redisSubscription.js +1 -1
  17. package/package.json +9 -7
  18. package/pathways/basePathway.js +2 -2
  19. package/pathways/index.js +8 -2
  20. package/pathways/summary.js +2 -2
  21. package/pathways/sys_openai_chat.js +19 -0
  22. package/pathways/sys_openai_completion.js +11 -0
  23. package/pathways/{lc_test.mjs → test_langchain.mjs} +1 -1
  24. package/pathways/test_palm_chat.js +31 -0
  25. package/pathways/transcribe.js +3 -1
  26. package/pathways/translate.js +2 -1
  27. package/{graphql → server}/graphql.js +64 -62
  28. package/{graphql → server}/pathwayPrompter.js +9 -1
  29. package/{graphql → server}/pathwayResolver.js +46 -47
  30. package/{graphql → server}/plugins/azureTranslatePlugin.js +22 -0
  31. package/{graphql → server}/plugins/modelPlugin.js +15 -42
  32. package/server/plugins/openAiChatPlugin.js +134 -0
  33. package/{graphql → server}/plugins/openAiCompletionPlugin.js +38 -2
  34. package/{graphql → server}/plugins/openAiWhisperPlugin.js +59 -7
  35. package/server/plugins/palmChatPlugin.js +229 -0
  36. package/server/plugins/palmCompletionPlugin.js +134 -0
  37. package/{graphql → server}/prompt.js +11 -4
  38. package/server/rest.js +321 -0
  39. package/{graphql → server}/typeDef.js +30 -13
  40. package/tests/chunkfunction.test.js +1 -1
  41. package/tests/config.test.js +1 -1
  42. package/tests/main.test.js +282 -43
  43. package/tests/mocks.js +1 -1
  44. package/tests/modelPlugin.test.js +3 -15
  45. package/tests/openAiChatPlugin.test.js +125 -0
  46. package/tests/openai_api.test.js +147 -0
  47. package/tests/palmChatPlugin.test.js +256 -0
  48. package/tests/palmCompletionPlugin.test.js +87 -0
  49. package/tests/pathwayResolver.test.js +1 -1
  50. package/tests/server.js +23 -0
  51. package/tests/truncateMessages.test.js +1 -1
  52. package/graphql/plugins/openAiChatPlugin.js +0 -46
  53. package/tests/chunking.test.js +0 -155
  54. package/tests/translate.test.js +0 -126
  55. /package/{graphql → server}/chunker.js +0 -0
  56. /package/{graphql → server}/parser.js +0 -0
  57. /package/{graphql → server}/pathwayResponseParser.js +0 -0
  58. /package/{graphql → server}/plugins/localModelPlugin.js +0 -0
  59. /package/{graphql → server}/pubsub.js +0 -0
  60. /package/{graphql → server}/requestState.js +0 -0
  61. /package/{graphql → server}/resolver.js +0 -0
  62. /package/{graphql → server}/subscriptions.js +0 -0
package/pathways/index.js CHANGED
@@ -3,10 +3,13 @@ import chat from './chat.js';
3
3
  import bias from './bias.js';
4
4
  import complete from './complete.js';
5
5
  import entities from './entities.js';
6
- import lc_test from './lc_test.mjs';
7
6
  import paraphrase from './paraphrase.js';
8
7
  import sentiment from './sentiment.js';
9
8
  import summary from './summary.js';
9
+ import sys_openai_chat from './sys_openai_chat.js';
10
+ import sys_openai_completion from './sys_openai_completion.js';
11
+ import test_langchain from './test_langchain.mjs';
12
+ import test_palm_chat from './test_palm_chat.js';
10
13
  import transcribe from './transcribe.js';
11
14
  import translate from './translate.js';
12
15
 
@@ -16,10 +19,13 @@ export {
16
19
  bias,
17
20
  complete,
18
21
  entities,
19
- lc_test,
20
22
  paraphrase,
21
23
  sentiment,
22
24
  summary,
25
+ sys_openai_chat,
26
+ sys_openai_completion,
27
+ test_langchain,
28
+ test_palm_chat,
23
29
  transcribe,
24
30
  translate
25
31
  };
@@ -3,8 +3,8 @@
3
3
  // This module exports a prompt that takes an input text and generates a summary using a custom resolver.
4
4
 
5
5
  // Import required modules
6
- import { semanticTruncate } from '../graphql/chunker.js';
7
- import { PathwayResolver } from '../graphql/pathwayResolver.js';
6
+ import { semanticTruncate } from '../server/chunker.js';
7
+ import { PathwayResolver } from '../server/pathwayResolver.js';
8
8
 
9
9
  export default {
10
10
  // The main prompt function that takes the input text and asks to generate a summary.
@@ -0,0 +1,19 @@
1
+ // sys_openai_chat.js
2
+ // default handler for openAI chat endpoints when REST endpoints are enabled
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt:
8
+ [
9
+ new Prompt({ messages: [
10
+ "{{messages}}",
11
+ ]}),
12
+ ],
13
+ inputParameters: {
14
+ messages: [],
15
+ },
16
+ model: 'oai-gpturbo',
17
+ useInputChunking: false,
18
+ emulateOpenAIChatModel: '*',
19
+ }
@@ -0,0 +1,11 @@
1
+ // sys_openai_completion.js
2
+ // default handler for openAI completion endpoints when REST endpoints are enabled
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ export default {
7
+ prompt: `{{text}}`,
8
+ model: 'oai-gpturbo',
9
+ useInputChunking: false,
10
+ emulateOpenAICompletionModel: '*',
11
+ }
@@ -1,4 +1,4 @@
1
- // lc_test.js
1
+ // test_langchain.mjs
2
2
  // LangChain Cortex integration test
3
3
 
4
4
  // Import required modules
@@ -0,0 +1,31 @@
1
+ //test_palm_chat.mjs
2
+ // Test for handling of prompts in the PaLM chat format for Cortex
3
+
4
+ import { Prompt } from '../server/prompt.js';
5
+
6
+ // Description: Have a chat with a bot that uses context to understand the conversation
7
+ export default {
8
+ prompt:
9
+ [
10
+ new Prompt({
11
+ context: "Instructions:\nYou an AI entity working a global media network. You are truthful, kind, and helpful. Your expertise includes journalism, journalistic ethics, researching and composing documents, and technology. You know the current date and time - it is {{now}}.",
12
+ examples: [
13
+ {
14
+ input: {"content": "What is your expertise?"},
15
+ output: {"content": "I am an expert in journalism and journalistic ethics."}
16
+ }],
17
+ messages: [
18
+ {"author": "user", "content": "Hi how are you today?"},
19
+ {"author": "assistant", "content": "I am doing well. How are you?"},
20
+ {"author": "user", "content": "I am doing well. What is your name?"},
21
+ {"author": "assistant", "content": "My name is Hula. What is your name?"},
22
+ {"author": "user", "content": "My name is Bob. What is your expertise?"},
23
+ ]}),
24
+ ],
25
+ inputParameters: {
26
+ chatHistory: [],
27
+ contextId: ``,
28
+ },
29
+ model: 'palm-chat',
30
+ useInputChunking: false,
31
+ }
@@ -4,8 +4,10 @@ export default {
4
4
  inputParameters: {
5
5
  file: ``,
6
6
  language: ``,
7
+ responseFormat: `text`,
8
+ wordTimestamped: false,
7
9
  },
8
- timeout: 1800, // in seconds
10
+ timeout: 3600, // in seconds
9
11
  };
10
12
 
11
13
 
@@ -14,6 +14,7 @@ export default {
14
14
  },
15
15
 
16
16
  // Set the timeout for the translation process, in seconds.
17
- timeout: 300,
17
+ timeout: 400,
18
+ inputChunkSize: 500,
18
19
  };
19
20
 
@@ -1,24 +1,29 @@
1
- import { createServer } from 'http';
2
- import {
3
- ApolloServerPluginDrainHttpServer,
4
- ApolloServerPluginLandingPageLocalDefault,
5
- } from 'apollo-server-core';
1
+ // graphql.js
2
+ // Setup the Apollo server and Express middleware
3
+
4
+ import { ApolloServerPluginDrainHttpServer } from '@apollo/server/plugin/drainHttpServer';
5
+ import { ApolloServerPluginLandingPageLocalDefault } from '@apollo/server/plugin/landingPage/default';
6
+ import { ApolloServer } from '@apollo/server';
7
+ import { expressMiddleware } from '@apollo/server/express4';
6
8
  import { makeExecutableSchema } from '@graphql-tools/schema';
7
9
  import { WebSocketServer } from 'ws';
8
10
  import { useServer } from 'graphql-ws/lib/use/ws';
9
11
  import express from 'express';
10
- import { ApolloServer } from 'apollo-server-express';
12
+ import http from 'http';
11
13
  import Keyv from 'keyv';
14
+ import cors from 'cors';
12
15
  import { KeyvAdapter } from '@apollo/utils.keyvadapter';
13
- import responseCachePlugin from 'apollo-server-plugin-response-cache';
16
+ import responseCachePlugin from '@apollo/server-plugin-response-cache';
14
17
  import subscriptions from './subscriptions.js';
15
18
  import { buildLimiters } from '../lib/request.js';
16
19
  import { cancelRequestResolver } from './resolver.js';
17
20
  import { buildPathways, buildModels } from '../config.js';
18
21
  import { requestState } from './requestState.js';
22
+ import { buildRestEndpoints } from './rest.js';
19
23
 
24
+ // Utility functions
25
+ // Server plugins
20
26
  const getPlugins = (config) => {
21
- // server plugins
22
27
  const plugins = [
23
28
  ApolloServerPluginLandingPageLocalDefault({ embed: true }), // For local development.
24
29
  ];
@@ -39,41 +44,8 @@ const getPlugins = (config) => {
39
44
  return { plugins, cache };
40
45
  }
41
46
 
42
- const buildRestEndpoints = (pathways, app, server, config) => {
43
- for (const [name, pathway] of Object.entries(pathways)) {
44
- // Only expose endpoints for enabled pathways that explicitly want to expose a REST endpoint
45
- if (pathway.disabled || !config.get('enableRestEndpoints')) continue;
46
-
47
- const fieldVariableDefs = pathway.typeDef(pathway).restDefinition || [];
48
-
49
- app.post(`/rest/${name}`, async (req, res) => {
50
- const variables = fieldVariableDefs.reduce((acc, variableDef) => {
51
- if (Object.prototype.hasOwnProperty.call(req.body, variableDef.name)) {
52
- acc[variableDef.name] = req.body[variableDef.name];
53
- }
54
- return acc;
55
- }, {});
56
-
57
- const variableParams = fieldVariableDefs.map(({ name, type }) => `$${name}: ${type}`).join(', ');
58
- const queryArgs = fieldVariableDefs.map(({ name }) => `${name}: $${name}`).join(', ');
59
-
60
- const query = `
61
- query ${name}(${variableParams}) {
62
- ${name}(${queryArgs}) {
63
- contextId
64
- previousResult
65
- result
66
- }
67
- }
68
- `;
69
-
70
- const result = await server.executeOperation({ query, variables });
71
- res.json(result.data[name]);
72
- });
73
- }
74
- };
75
47
 
76
- //typeDefs
48
+ // Type Definitions for GraphQL
77
49
  const getTypedefs = (pathways) => {
78
50
 
79
51
  const defaultTypeDefs = `#graphql
@@ -111,6 +83,7 @@ const getTypedefs = (pathways) => {
111
83
  return typeDefs.join('\n');
112
84
  }
113
85
 
86
+ // Resolvers for GraphQL
114
87
  const getResolvers = (config, pathways) => {
115
88
  const resolverFunctions = {};
116
89
  for (const [name, pathway] of Object.entries(pathways)) {
@@ -118,6 +91,7 @@ const getResolvers = (config, pathways) => {
118
91
  resolverFunctions[name] = (parent, args, contextValue, info) => {
119
92
  // add shared state to contextValue
120
93
  contextValue.pathway = pathway;
94
+ contextValue.config = config;
121
95
  return pathway.rootResolver(parent, args, contextValue, info);
122
96
  }
123
97
  }
@@ -131,7 +105,7 @@ const getResolvers = (config, pathways) => {
131
105
  return resolvers;
132
106
  }
133
107
 
134
- //graphql api build factory method
108
+ // Build the server including the GraphQL schema and REST endpoints
135
109
  const build = async (config) => {
136
110
  // First perform config build
137
111
  await buildPathways(config);
@@ -150,9 +124,9 @@ const build = async (config) => {
150
124
 
151
125
  const { plugins, cache } = getPlugins(config);
152
126
 
153
- const app = express()
127
+ const app = express();
154
128
 
155
- const httpServer = createServer(app);
129
+ const httpServer = http.createServer(app);
156
130
 
157
131
  // Creating the WebSocket server
158
132
  const wsServer = new WebSocketServer({
@@ -182,35 +156,63 @@ const build = async (config) => {
182
156
  },
183
157
  };
184
158
  },
185
- }]),
186
- context: ({ req, res }) => ({ req, res, config, requestState }),
159
+ }
160
+ ]),
187
161
  });
188
162
 
189
163
  // If CORTEX_API_KEY is set, we roll our own auth middleware - usually not used if you're being fronted by a proxy
190
164
  const cortexApiKey = config.get('cortexApiKey');
165
+ if (cortexApiKey) {
166
+ app.use((req, res, next) => {
167
+ if (cortexApiKey && req.headers['Cortex-Api-Key'] !== cortexApiKey && req.query['Cortex-Api-Key'] !== cortexApiKey) {
168
+ if (req.baseUrl === '/graphql' || req.headers['content-type'] === 'application/graphql') {
169
+ res.status(401)
170
+ .set('WWW-Authenticate', 'Cortex-Api-Key')
171
+ .set('X-Cortex-Api-Key-Info', 'Server requires Cortex API Key')
172
+ .json({
173
+ errors: [
174
+ {
175
+ message: 'Unauthorized',
176
+ extensions: {
177
+ code: 'UNAUTHENTICATED',
178
+ },
179
+ },
180
+ ],
181
+ });
182
+ } else {
183
+ res.status(401)
184
+ .set('WWW-Authenticate', 'Cortex-Api-Key')
185
+ .set('X-Cortex-Api-Key-Info', 'Server requires Cortex API Key')
186
+ .send('Unauthorized');
187
+ }
188
+ } else {
189
+ next();
190
+ }
191
+ });
192
+ };
191
193
 
192
- app.use((req, res, next) => {
193
- if (cortexApiKey && req.headers.cortexApiKey !== cortexApiKey && req.query.cortexApiKey !== cortexApiKey) {
194
- res.status(401).send('Unauthorized');
195
- } else {
196
- next();
197
- }
198
- });
199
-
200
- // Use the JSON body parser middleware for REST endpoints
194
+ // Parse the body for REST endpoints
201
195
  app.use(express.json());
202
-
203
- // add the REST endpoints
204
- buildRestEndpoints(pathways, app, server, config);
205
196
 
206
- // if local start server
197
+ // Server Startup Function
207
198
  const startServer = async () => {
208
199
  await server.start();
209
- server.applyMiddleware({ app });
200
+ app.use(
201
+ '/graphql',
202
+
203
+ cors(),
204
+
205
+ expressMiddleware(server, {
206
+ context: async ({ req, res }) => ({ req, res, config, requestState }),
207
+ }),
208
+ );
209
+
210
+ // add the REST endpoints
211
+ buildRestEndpoints(pathways, app, server, config);
210
212
 
211
213
  // Now that our HTTP server is fully set up, we can listen to it.
212
214
  httpServer.listen(config.get('PORT'), () => {
213
- console.log(`🚀 Server is now running at http://localhost:${config.get('PORT')}${server.graphqlPath}`);
215
+ console.log(`🚀 Server is now running at http://localhost:${config.get('PORT')}/graphql`);
214
216
  });
215
217
  };
216
218
 
@@ -4,6 +4,8 @@ import OpenAICompletionPlugin from './plugins/openAiCompletionPlugin.js';
4
4
  import AzureTranslatePlugin from './plugins/azureTranslatePlugin.js';
5
5
  import OpenAIWhisperPlugin from './plugins/openAiWhisperPlugin.js';
6
6
  import LocalModelPlugin from './plugins/localModelPlugin.js';
7
+ import PalmChatPlugin from './plugins/palmChatPlugin.js';
8
+ import PalmCompletionPlugin from './plugins/palmCompletionPlugin.js';
7
9
 
8
10
  class PathwayPrompter {
9
11
  constructor({ config, pathway }) {
@@ -27,12 +29,18 @@ class PathwayPrompter {
27
29
  case 'OPENAI-COMPLETION':
28
30
  plugin = new OpenAICompletionPlugin(config, pathway);
29
31
  break;
30
- case 'OPENAI_WHISPER':
32
+ case 'OPENAI-WHISPER':
31
33
  plugin = new OpenAIWhisperPlugin(config, pathway);
32
34
  break;
33
35
  case 'LOCAL-CPP-MODEL':
34
36
  plugin = new LocalModelPlugin(config, pathway);
35
37
  break;
38
+ case 'PALM-CHAT':
39
+ plugin = new PalmChatPlugin(config, pathway);
40
+ break;
41
+ case 'PALM-COMPLETION':
42
+ plugin = new PalmCompletionPlugin(config, pathway);
43
+ break;
36
44
  default:
37
45
  throw new Error(`Unsupported model type: ${model.type}`);
38
46
  }
@@ -42,66 +42,57 @@ class PathwayResolver {
42
42
  }
43
43
 
44
44
  async asyncResolve(args) {
45
- // Wait with a sleep promise for the race condition to resolve
46
- // const results = await Promise.all([this.promptAndParse(args), await new Promise(resolve => setTimeout(resolve, 250))]);
47
- const data = await this.promptAndParse(args);
48
- // Process the results for async
49
- if(args.async || typeof data === 'string') { // if async flag set or processed async and got string response
45
+ const responseData = await this.promptAndParse(args);
46
+
47
+ // Either we're dealing with an async request or a stream
48
+ if(args.async || typeof responseData === 'string') {
50
49
  const { completedCount, totalCount } = requestState[this.requestId];
51
- requestState[this.requestId].data = data;
50
+ requestState[this.requestId].data = responseData;
52
51
  pubsub.publish('REQUEST_PROGRESS', {
53
52
  requestProgress: {
54
53
  requestId: this.requestId,
55
54
  progress: completedCount / totalCount,
56
- data: JSON.stringify(data),
55
+ data: JSON.stringify(responseData),
57
56
  }
58
57
  });
59
- } else { //stream
60
- for (const handle of data) {
61
- handle.on('data', data => {
62
- console.log(data.toString());
63
- const lines = data.toString().split('\n').filter(line => line.trim() !== '');
64
- for (const line of lines) {
65
- const message = line.replace(/^data: /, '');
66
- if (message === '[DONE]') {
67
- // Send stream finished message
68
- pubsub.publish('REQUEST_PROGRESS', {
69
- requestProgress: {
70
- requestId: this.requestId,
71
- data: null,
72
- progress: 1,
73
- }
74
- });
75
- return; // Stream finished
76
- }
77
- try {
78
- const parsed = JSON.parse(message);
79
- const result = this.pathwayPrompter.plugin.parseResponse(parsed)
80
-
81
- pubsub.publish('REQUEST_PROGRESS', {
82
- requestProgress: {
83
- requestId: this.requestId,
84
- data: JSON.stringify(result)
85
- }
86
- });
87
- } catch (error) {
88
- console.error('Could not JSON parse stream message', message, error);
89
- }
58
+ } else { // stream
59
+ const incomingMessage = Array.isArray(responseData) && responseData.length > 0 ? responseData[0] : responseData;
60
+ incomingMessage.on('data', data => {
61
+ const events = data.toString().split('\n');
62
+
63
+ events.forEach(event => {
64
+ if (event.trim() === '') return; // Skip empty lines
65
+
66
+ const message = event.replace(/^data: /, '');
67
+
68
+ //console.log(`====================================`);
69
+ //console.log(`STREAM EVENT: ${event}`);
70
+ //console.log(`MESSAGE: ${message}`);
71
+
72
+ const requestProgress = {
73
+ requestId: this.requestId,
74
+ data: message,
75
+ }
76
+
77
+ if (message.trim() === '[DONE]') {
78
+ requestProgress.progress = 1;
79
+ }
80
+
81
+ try {
82
+ pubsub.publish('REQUEST_PROGRESS', {
83
+ requestProgress: requestProgress
84
+ });
85
+ } catch (error) {
86
+ console.error('Could not JSON parse stream message', message, error);
90
87
  }
91
88
  });
92
-
93
- // data.on('end', () => {
94
- // console.log("stream done");
95
- // });
96
- }
97
-
89
+ });
98
90
  }
99
91
  }
100
92
 
101
93
  async resolve(args) {
94
+ // Either we're dealing with an async request, stream, or regular request
102
95
  if (args.async || args.stream) {
103
- // Asyncronously process the request
104
- // this.asyncResolve(args);
105
96
  if (!requestState[this.requestId]) {
106
97
  requestState[this.requestId] = {}
107
98
  }
@@ -289,7 +280,15 @@ class PathwayResolver {
289
280
  if (requestState[this.requestId].canceled) {
290
281
  return;
291
282
  }
292
- const result = await this.pathwayPrompter.execute(text, { ...parameters, ...this.savedContext }, prompt, this);
283
+ let result = '';
284
+
285
+ // If this text is empty, skip applying the prompt as it will likely be a nonsensical result
286
+ if (!/^\s*$/.test(text) || parameters?.file) {
287
+ result = await this.pathwayPrompter.execute(text, { ...parameters, ...this.savedContext }, prompt, this);
288
+ } else {
289
+ result = text;
290
+ }
291
+
293
292
  requestState[this.requestId].completedCount++;
294
293
 
295
294
  const { completedCount, totalCount } = requestState[this.requestId];
@@ -35,6 +35,28 @@ class AzureTranslatePlugin extends ModelPlugin {
35
35
 
36
36
  return this.executeRequest(url, data, params, headers, prompt);
37
37
  }
38
+
39
+ // Parse the response from the Azure Translate API
40
+ parseResponse(data) {
41
+ if (Array.isArray(data) && data.length > 0 && data[0].translations) {
42
+ return data[0].translations[0].text.trim();
43
+ } else {
44
+ return data;
45
+ }
46
+ }
47
+
48
+ // Override the logging function to display the request and response
49
+ logRequestData(data, responseData, prompt) {
50
+ const separator = `\n=== ${this.pathwayName}.${this.requestCount++} ===\n`;
51
+ console.log(separator);
52
+
53
+ const modelInput = data[0].Text;
54
+
55
+ console.log(`\x1b[36m${modelInput}\x1b[0m`);
56
+ console.log(`\x1b[34m> ${this.parseResponse(responseData)}\x1b[0m`);
57
+
58
+ prompt && prompt.debugInfo && (prompt.debugInfo += `${separator}${JSON.stringify(data)}`);
59
+ }
38
60
  }
39
61
 
40
62
  export default AzureTranslatePlugin;
@@ -62,7 +62,7 @@ class ModelPlugin {
62
62
  const message = tokenLengths[index].message;
63
63
 
64
64
  // Skip system messages
65
- if (message.role === 'system') {
65
+ if (message?.role === 'system') {
66
66
  index++;
67
67
  continue;
68
68
  }
@@ -113,7 +113,7 @@ class ModelPlugin {
113
113
  let output = "";
114
114
  if (messages && messages.length) {
115
115
  for (let message of messages) {
116
- output += (message.role && (message.content || message.content === '')) ? `<|im_start|>${message.role}\n${message.content}\n<|im_end|>\n` : `${message}\n`;
116
+ output += ((message.author || message.role) && (message.content || message.content === '')) ? `<|im_start|>${(message.author || message.role)}\n${message.content}\n<|im_end|>\n` : `${message}\n`;
117
117
  }
118
118
  // you always want the assistant to respond next so add a
119
119
  // directive for that
@@ -124,6 +124,7 @@ class ModelPlugin {
124
124
  return output;
125
125
  }
126
126
 
127
+ // compile the Prompt
127
128
  getCompiledPrompt(text, parameters, prompt) {
128
129
  const combinedParameters = { ...this.promptParameters, ...parameters };
129
130
  const modelPrompt = this.getModelPrompt(prompt, parameters);
@@ -132,9 +133,9 @@ class ModelPlugin {
132
133
  const modelPromptMessagesML = this.messagesToChatML(modelPromptMessages);
133
134
 
134
135
  if (modelPromptMessagesML) {
135
- return { modelPromptMessages, tokenLength: encode(modelPromptMessagesML).length };
136
+ return { modelPromptMessages, tokenLength: encode(modelPromptMessagesML).length, modelPrompt };
136
137
  } else {
137
- return { modelPromptText, tokenLength: encode(modelPromptText).length };
138
+ return { modelPromptText, tokenLength: encode(modelPromptText).length, modelPrompt };
138
139
  }
139
140
  }
140
141
 
@@ -147,12 +148,11 @@ class ModelPlugin {
147
148
  return this.promptParameters.inputParameters?.tokenRatio ?? this.promptParameters.tokenRatio ?? DEFAULT_PROMPT_TOKEN_RATIO;
148
149
  }
149
150
 
150
-
151
151
  getModelPrompt(prompt, parameters) {
152
152
  if (typeof(prompt) === 'function') {
153
- return prompt(parameters);
153
+ return prompt(parameters);
154
154
  } else {
155
- return prompt;
155
+ return prompt;
156
156
  }
157
157
  }
158
158
 
@@ -160,20 +160,20 @@ class ModelPlugin {
160
160
  if (!modelPrompt.messages) {
161
161
  return null;
162
162
  }
163
-
163
+
164
164
  // First run handlebars compile on the pathway messages
165
165
  const compiledMessages = modelPrompt.messages.map((message) => {
166
166
  if (message.content) {
167
167
  const compileText = HandleBars.compile(message.content);
168
168
  return {
169
- role: message.role,
169
+ ...message,
170
170
  content: compileText({ ...combinedParameters, text }),
171
171
  };
172
172
  } else {
173
173
  return message;
174
174
  }
175
175
  });
176
-
176
+
177
177
  // Next add in any parameters that are referenced by name in the array
178
178
  const expandedMessages = compiledMessages.flatMap((message) => {
179
179
  if (typeof message === 'string') {
@@ -188,7 +188,7 @@ class ModelPlugin {
188
188
  return [message];
189
189
  }
190
190
  });
191
-
191
+
192
192
  return expandedMessages;
193
193
  }
194
194
 
@@ -197,44 +197,17 @@ class ModelPlugin {
197
197
  return generateUrl({ ...this.model, ...this.environmentVariables, ...this.config });
198
198
  }
199
199
 
200
- //simples form string single or list return
201
- parseResponse(data) {
202
- const { choices } = data;
203
- if (!choices || !choices.length) {
204
- if (Array.isArray(data) && data.length > 0 && data[0].translations) {
205
- return data[0].translations[0].text.trim();
206
- } else {
207
- return data;
208
- }
209
- }
210
-
211
- // if we got a choices array back with more than one choice, return the whole array
212
- if (choices.length > 1) {
213
- return choices;
214
- }
215
-
216
- // otherwise, return the first choice
217
- const textResult = choices[0].text && choices[0].text.trim();
218
- const messageResult = choices[0].message && choices[0].message.content && choices[0].message.content.trim();
219
-
220
- return messageResult ?? textResult ?? null;
221
- }
200
+ // Default response parsing
201
+ parseResponse(data) { return data; };
222
202
 
203
+ // Default simple logging
223
204
  logRequestData(data, responseData, prompt) {
224
205
  const separator = `\n=== ${this.pathwayName}.${this.requestCount++} ===\n`;
225
206
  console.log(separator);
226
207
 
227
208
  const modelInput = data.prompt || (data.messages && data.messages[0].content) || (data.length > 0 && data[0].Text) || null;
228
209
 
229
- if (data && data.messages && data.messages.length > 1) {
230
- data.messages.forEach((message, index) => {
231
- const words = message.content.split(" ");
232
- const tokenCount = encode(message.content).length;
233
- const preview = words.length < 41 ? message.content : words.slice(0, 20).join(" ") + " ... " + words.slice(-20).join(" ");
234
-
235
- console.log(`\x1b[36mMessage ${index + 1}: Role: ${message.role}, Tokens: ${tokenCount}, Content: "${preview}"\x1b[0m`);
236
- });
237
- } else {
210
+ if (modelInput) {
238
211
  console.log(`\x1b[36m${modelInput}\x1b[0m`);
239
212
  }
240
213