@aj-archipelago/cortex 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/config/default.example.json +18 -0
- package/config.js +28 -8
- package/helper_apps/MediaFileChunker/Dockerfile +20 -0
- package/helper_apps/MediaFileChunker/package-lock.json +18 -18
- package/helper_apps/MediaFileChunker/package.json +1 -1
- package/helper_apps/WhisperX/.dockerignore +27 -0
- package/helper_apps/WhisperX/Dockerfile +31 -0
- package/helper_apps/WhisperX/app-ts.py +76 -0
- package/helper_apps/WhisperX/app.py +115 -0
- package/helper_apps/WhisperX/docker-compose.debug.yml +12 -0
- package/helper_apps/WhisperX/docker-compose.yml +10 -0
- package/helper_apps/WhisperX/requirements.txt +6 -0
- package/index.js +1 -1
- package/lib/gcpAuthTokenHelper.js +37 -0
- package/lib/redisSubscription.js +1 -1
- package/package.json +9 -7
- package/pathways/basePathway.js +2 -2
- package/pathways/index.js +8 -2
- package/pathways/summary.js +2 -2
- package/pathways/sys_openai_chat.js +19 -0
- package/pathways/sys_openai_completion.js +11 -0
- package/pathways/{lc_test.mjs → test_langchain.mjs} +1 -1
- package/pathways/test_palm_chat.js +31 -0
- package/pathways/transcribe.js +3 -1
- package/pathways/translate.js +2 -1
- package/{graphql → server}/graphql.js +64 -62
- package/{graphql → server}/pathwayPrompter.js +9 -1
- package/{graphql → server}/pathwayResolver.js +46 -47
- package/{graphql → server}/plugins/azureTranslatePlugin.js +22 -0
- package/{graphql → server}/plugins/modelPlugin.js +15 -42
- package/server/plugins/openAiChatPlugin.js +134 -0
- package/{graphql → server}/plugins/openAiCompletionPlugin.js +38 -2
- package/{graphql → server}/plugins/openAiWhisperPlugin.js +59 -7
- package/server/plugins/palmChatPlugin.js +229 -0
- package/server/plugins/palmCompletionPlugin.js +134 -0
- package/{graphql → server}/prompt.js +11 -4
- package/server/rest.js +321 -0
- package/{graphql → server}/typeDef.js +30 -13
- package/tests/chunkfunction.test.js +1 -1
- package/tests/config.test.js +1 -1
- package/tests/main.test.js +282 -43
- package/tests/mocks.js +1 -1
- package/tests/modelPlugin.test.js +3 -15
- package/tests/openAiChatPlugin.test.js +125 -0
- package/tests/openai_api.test.js +147 -0
- package/tests/palmChatPlugin.test.js +256 -0
- package/tests/palmCompletionPlugin.test.js +87 -0
- package/tests/pathwayResolver.test.js +1 -1
- package/tests/server.js +23 -0
- package/tests/truncateMessages.test.js +1 -1
- package/graphql/plugins/openAiChatPlugin.js +0 -46
- package/tests/chunking.test.js +0 -155
- package/tests/translate.test.js +0 -126
- /package/{graphql → server}/chunker.js +0 -0
- /package/{graphql → server}/parser.js +0 -0
- /package/{graphql → server}/pathwayResponseParser.js +0 -0
- /package/{graphql → server}/plugins/localModelPlugin.js +0 -0
- /package/{graphql → server}/pubsub.js +0 -0
- /package/{graphql → server}/requestState.js +0 -0
- /package/{graphql → server}/resolver.js +0 -0
- /package/{graphql → server}/subscriptions.js +0 -0
|
@@ -3,15 +3,21 @@ class Prompt {
|
|
|
3
3
|
if (typeof params === 'string' || params instanceof String) {
|
|
4
4
|
this.prompt = params;
|
|
5
5
|
} else {
|
|
6
|
-
const { prompt, saveResultTo, messages } = params;
|
|
6
|
+
const { prompt, saveResultTo, messages, context, examples } = params;
|
|
7
7
|
this.prompt = prompt;
|
|
8
8
|
this.saveResultTo = saveResultTo;
|
|
9
9
|
this.messages = messages;
|
|
10
|
+
this.context = context;
|
|
11
|
+
this.examples = examples;
|
|
10
12
|
this.params = params;
|
|
11
13
|
}
|
|
12
14
|
|
|
13
|
-
this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages)
|
|
14
|
-
|
|
15
|
+
this.usesTextInput = promptContains('text', this.prompt ? this.prompt : this.messages) ||
|
|
16
|
+
(this.context && promptContains('text', this.context)) ||
|
|
17
|
+
(this.examples && promptContains('text', this.examples));
|
|
18
|
+
this.usesPreviousResult = promptContains('previousResult', this.prompt ? this.prompt : this.messages) ||
|
|
19
|
+
(this.context && promptContains('previousResult', this.context)) ||
|
|
20
|
+
(this.examples && promptContains('previousResult', this.examples));
|
|
15
21
|
this.debugInfo = '';
|
|
16
22
|
}
|
|
17
23
|
}
|
|
@@ -23,7 +29,8 @@ function promptContains(variable, prompt) {
|
|
|
23
29
|
let matches = [];
|
|
24
30
|
let match;
|
|
25
31
|
|
|
26
|
-
// if it's an array, it's
|
|
32
|
+
// if it's an array, it's either an OpenAI messages array or a PaLM messages
|
|
33
|
+
// array or a PaLM examples array, all of which have a content property
|
|
27
34
|
if (Array.isArray(prompt)) {
|
|
28
35
|
prompt.forEach(p => {
|
|
29
36
|
// eslint-disable-next-line no-cond-assign
|
package/server/rest.js
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
// rest.js
|
|
2
|
+
// Implement the REST endpoints for the pathways
|
|
3
|
+
|
|
4
|
+
import { json } from 'express';
|
|
5
|
+
import pubsub from './pubsub.js';
|
|
6
|
+
import { requestState } from './requestState.js';
|
|
7
|
+
import { v4 as uuidv4 } from 'uuid';
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
const processRestRequest = async (server, req, pathway, name, parameterMap = {}) => {
|
|
11
|
+
const fieldVariableDefs = pathway.typeDef(pathway).restDefinition || [];
|
|
12
|
+
|
|
13
|
+
const convertType = (value, type) => {
|
|
14
|
+
if (type === 'Boolean') {
|
|
15
|
+
return Boolean(value);
|
|
16
|
+
} else if (type === 'Int') {
|
|
17
|
+
return parseInt(value, 10);
|
|
18
|
+
} else {
|
|
19
|
+
return value;
|
|
20
|
+
}
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
const variables = fieldVariableDefs.reduce((acc, variableDef) => {
|
|
24
|
+
const requestBodyParamName = Object.keys(parameterMap).includes(variableDef.name)
|
|
25
|
+
? parameterMap[variableDef.name]
|
|
26
|
+
: variableDef.name;
|
|
27
|
+
|
|
28
|
+
if (Object.prototype.hasOwnProperty.call(req.body, requestBodyParamName)) {
|
|
29
|
+
acc[variableDef.name] = convertType(req.body[requestBodyParamName], variableDef.type);
|
|
30
|
+
}
|
|
31
|
+
return acc;
|
|
32
|
+
}, {});
|
|
33
|
+
|
|
34
|
+
const variableParams = fieldVariableDefs.map(({ name, type }) => `$${name}: ${type}`).join(', ');
|
|
35
|
+
const queryArgs = fieldVariableDefs.map(({ name }) => `${name}: $${name}`).join(', ');
|
|
36
|
+
|
|
37
|
+
const query = `
|
|
38
|
+
query ${name}(${variableParams}) {
|
|
39
|
+
${name}(${queryArgs}) {
|
|
40
|
+
contextId
|
|
41
|
+
previousResult
|
|
42
|
+
result
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
`;
|
|
46
|
+
|
|
47
|
+
const result = await server.executeOperation({ query, variables });
|
|
48
|
+
const resultText = result?.body?.singleResult?.data?.[name]?.result || result?.body?.singleResult?.errors?.[0]?.message || "";
|
|
49
|
+
|
|
50
|
+
return resultText;
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
const processIncomingStream = (requestId, res, jsonResponse) => {
|
|
54
|
+
|
|
55
|
+
const startStream = (res) => {
|
|
56
|
+
// Set the headers for streaming
|
|
57
|
+
res.setHeader('Content-Type', 'text/event-stream');
|
|
58
|
+
res.setHeader('Cache-Control', 'no-cache');
|
|
59
|
+
res.setHeader('Connection', 'keep-alive');
|
|
60
|
+
res.flushHeaders();
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const finishStream = (res, jsonResponse) => {
|
|
64
|
+
|
|
65
|
+
// If we haven't sent the stop message yet, do it now
|
|
66
|
+
if (jsonResponse.choices?.[0]?.finish_reason !== "stop") {
|
|
67
|
+
|
|
68
|
+
let jsonEndStream = JSON.parse(JSON.stringify(jsonResponse));
|
|
69
|
+
|
|
70
|
+
if (jsonEndStream.object === 'text_completion') {
|
|
71
|
+
jsonEndStream.choices[0].index = 0;
|
|
72
|
+
jsonEndStream.choices[0].finish_reason = "stop";
|
|
73
|
+
jsonEndStream.choices[0].text = "";
|
|
74
|
+
} else {
|
|
75
|
+
jsonEndStream.choices[0].finish_reason = "stop";
|
|
76
|
+
jsonEndStream.choices[0].index = 0;
|
|
77
|
+
jsonEndStream.choices[0].delta = {};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
//console.log(`REST SEND: data: ${JSON.stringify(jsonEndStream)}`);
|
|
81
|
+
res.write(`data: ${JSON.stringify(jsonEndStream)}\n\n`);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
//console.log(`REST SEND: data: [DONE]\n\n`);
|
|
85
|
+
res.write(`data: [DONE]\n\n`);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const sendStreamData = (data) => {
|
|
89
|
+
//console.log(`REST SEND: data: ${JSON.stringify(data)}`);
|
|
90
|
+
res.write(`data: ${JSON.stringify(data)}\n\n`);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const fillJsonResponse = (jsonResponse, inputText, finishReason) => {
|
|
94
|
+
|
|
95
|
+
jsonResponse.choices[0].finish_reason = finishReason;
|
|
96
|
+
if (jsonResponse.object === 'text_completion') {
|
|
97
|
+
jsonResponse.choices[0].text = inputText;
|
|
98
|
+
} else {
|
|
99
|
+
jsonResponse.choices[0].delta.content = inputText;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return jsonResponse;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
startStream(res);
|
|
106
|
+
|
|
107
|
+
let subscription;
|
|
108
|
+
|
|
109
|
+
const unsubscribe = async () => {
|
|
110
|
+
if (subscription) {
|
|
111
|
+
pubsub.unsubscribe(await subscription);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
subscription = pubsub.subscribe('REQUEST_PROGRESS', (data) => {
|
|
116
|
+
if (data.requestProgress.requestId === requestId) {
|
|
117
|
+
//console.log(`REQUEST_PROGRESS received progress: ${data.requestProgress.progress}, data: ${data.requestProgress.data}`);
|
|
118
|
+
|
|
119
|
+
const progress = data.requestProgress.progress;
|
|
120
|
+
const progressData = data.requestProgress.data;
|
|
121
|
+
|
|
122
|
+
try {
|
|
123
|
+
const messageJson = JSON.parse(progressData);
|
|
124
|
+
if (messageJson.choices) {
|
|
125
|
+
const { text, delta, finish_reason } = messageJson.choices[0];
|
|
126
|
+
|
|
127
|
+
if (messageJson.object === 'text_completion') {
|
|
128
|
+
fillJsonResponse(jsonResponse, text, finish_reason);
|
|
129
|
+
} else {
|
|
130
|
+
fillJsonResponse(jsonResponse, delta.content, finish_reason);
|
|
131
|
+
}
|
|
132
|
+
} else {
|
|
133
|
+
fillJsonResponse(jsonResponse, messageJson, null);
|
|
134
|
+
}
|
|
135
|
+
} catch (error) {
|
|
136
|
+
console.log(`progressData not JSON: ${progressData}`);
|
|
137
|
+
fillJsonResponse(jsonResponse, progressData, "stop");
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (progress === 1 && progressData.trim() === "[DONE]") {
|
|
141
|
+
finishStream(res, jsonResponse);
|
|
142
|
+
unsubscribe();
|
|
143
|
+
return;
|
|
144
|
+
}
|
|
145
|
+
sendStreamData(jsonResponse);
|
|
146
|
+
|
|
147
|
+
if (progress === 1) {
|
|
148
|
+
finishStream(res, jsonResponse);
|
|
149
|
+
unsubscribe();
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
// Fire the resolver for the async requestProgress
|
|
155
|
+
console.log(`Rest Endpoint starting async requestProgress, requestId: ${requestId}`);
|
|
156
|
+
const { resolver, args } = requestState[requestId];
|
|
157
|
+
resolver(args);
|
|
158
|
+
|
|
159
|
+
return subscription;
|
|
160
|
+
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
function buildRestEndpoints(pathways, app, server, config) {
|
|
164
|
+
|
|
165
|
+
if (config.get('enableRestEndpoints')) {
|
|
166
|
+
const openAIChatModels = {};
|
|
167
|
+
const openAICompletionModels = {};
|
|
168
|
+
|
|
169
|
+
// Create normal REST endpoints or emulate OpenAI API per pathway
|
|
170
|
+
for (const [name, pathway] of Object.entries(pathways)) {
|
|
171
|
+
// Only expose endpoints for enabled pathways that explicitly want to expose a REST endpoint
|
|
172
|
+
if (pathway.disabled) continue;
|
|
173
|
+
|
|
174
|
+
// The pathway can either emulate an OpenAI endpoint or be a normal REST endpoint
|
|
175
|
+
if (pathway.emulateOpenAIChatModel || pathway.emulateOpenAICompletionModel) {
|
|
176
|
+
if (pathway.emulateOpenAIChatModel) {
|
|
177
|
+
openAIChatModels[pathway.emulateOpenAIChatModel] = name;
|
|
178
|
+
}
|
|
179
|
+
if (pathway.emulateOpenAICompletionModel) {
|
|
180
|
+
openAICompletionModels[pathway.emulateOpenAICompletionModel] = name;
|
|
181
|
+
}
|
|
182
|
+
} else {
|
|
183
|
+
app.post(`/rest/${name}`, async (req, res) => {
|
|
184
|
+
const resultText = await processRestRequest(server, req, pathway, name);
|
|
185
|
+
res.send(resultText);
|
|
186
|
+
});
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Create OpenAI compatible endpoints
|
|
191
|
+
app.post('/v1/completions', async (req, res) => {
|
|
192
|
+
const modelName = req.body.model || 'gpt-3.5-turbo';
|
|
193
|
+
const pathwayName = openAICompletionModels[modelName] || openAICompletionModels['*'];
|
|
194
|
+
|
|
195
|
+
if (!pathwayName) {
|
|
196
|
+
res.status(404).json({
|
|
197
|
+
error: `Model ${modelName} not found.`,
|
|
198
|
+
});
|
|
199
|
+
return;
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
const pathway = pathways[pathwayName];
|
|
203
|
+
|
|
204
|
+
const parameterMap = {
|
|
205
|
+
text: 'prompt'
|
|
206
|
+
};
|
|
207
|
+
|
|
208
|
+
const resultText = await processRestRequest(server, req, pathway, pathwayName, parameterMap);
|
|
209
|
+
|
|
210
|
+
const jsonResponse = {
|
|
211
|
+
id: `cmpl`,
|
|
212
|
+
object: "text_completion",
|
|
213
|
+
created: Date.now(),
|
|
214
|
+
model: req.body.model,
|
|
215
|
+
choices: [
|
|
216
|
+
{
|
|
217
|
+
text: resultText,
|
|
218
|
+
index: 0,
|
|
219
|
+
logprobs: null,
|
|
220
|
+
finish_reason: "stop"
|
|
221
|
+
}
|
|
222
|
+
],
|
|
223
|
+
};
|
|
224
|
+
|
|
225
|
+
if (Boolean(req.body.stream)) {
|
|
226
|
+
jsonResponse.id = `cmpl-${resultText}`;
|
|
227
|
+
jsonResponse.choices[0].finish_reason = null;
|
|
228
|
+
//jsonResponse.object = "text_completion.chunk";
|
|
229
|
+
|
|
230
|
+
const subscription = processIncomingStream(resultText, res, jsonResponse);
|
|
231
|
+
} else {
|
|
232
|
+
const requestId = uuidv4();
|
|
233
|
+
jsonResponse.id = `cmpl-${requestId}`;
|
|
234
|
+
res.json(jsonResponse);
|
|
235
|
+
};
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
app.post('/v1/chat/completions', async (req, res) => {
|
|
239
|
+
const modelName = req.body.model || 'gpt-3.5-turbo';
|
|
240
|
+
const pathwayName = openAIChatModels[modelName] || openAIChatModels['*'];
|
|
241
|
+
|
|
242
|
+
if (!pathwayName) {
|
|
243
|
+
res.status(404).json({
|
|
244
|
+
error: `Model ${modelName} not found.`,
|
|
245
|
+
});
|
|
246
|
+
return;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
const pathway = pathways[pathwayName];
|
|
250
|
+
|
|
251
|
+
const resultText = await processRestRequest(server, req, pathway, pathwayName);
|
|
252
|
+
|
|
253
|
+
const jsonResponse = {
|
|
254
|
+
id: `chatcmpl`,
|
|
255
|
+
object: "chat.completion",
|
|
256
|
+
created: Date.now(),
|
|
257
|
+
model: req.body.model,
|
|
258
|
+
choices: [
|
|
259
|
+
{
|
|
260
|
+
message: {
|
|
261
|
+
role: "assistant",
|
|
262
|
+
content: resultText
|
|
263
|
+
},
|
|
264
|
+
index: 0,
|
|
265
|
+
finish_reason: "stop"
|
|
266
|
+
}
|
|
267
|
+
],
|
|
268
|
+
};
|
|
269
|
+
|
|
270
|
+
if (Boolean(req.body.stream)) {
|
|
271
|
+
jsonResponse.id = `chatcmpl-${resultText}`;
|
|
272
|
+
jsonResponse.choices[0] = {
|
|
273
|
+
delta: {
|
|
274
|
+
role: "assistant",
|
|
275
|
+
content: resultText
|
|
276
|
+
},
|
|
277
|
+
finish_reason: null
|
|
278
|
+
}
|
|
279
|
+
jsonResponse.object = "chat.completion.chunk";
|
|
280
|
+
|
|
281
|
+
const subscription = processIncomingStream(resultText, res, jsonResponse);
|
|
282
|
+
} else {
|
|
283
|
+
const requestId = uuidv4();
|
|
284
|
+
jsonResponse.id = `chatcmpl-${requestId}`;
|
|
285
|
+
|
|
286
|
+
res.json(jsonResponse);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
});
|
|
290
|
+
|
|
291
|
+
app.get('/v1/models', async (req, res) => {
|
|
292
|
+
const openAIModels = { ...openAIChatModels, ...openAICompletionModels };
|
|
293
|
+
const defaultModelId = 'gpt-3.5-turbo';
|
|
294
|
+
|
|
295
|
+
const models = Object.entries(openAIModels)
|
|
296
|
+
.map(([modelId]) => {
|
|
297
|
+
if (modelId.includes('*')) {
|
|
298
|
+
modelId = defaultModelId;
|
|
299
|
+
}
|
|
300
|
+
return {
|
|
301
|
+
id: modelId,
|
|
302
|
+
object: 'model',
|
|
303
|
+
owned_by: 'openai',
|
|
304
|
+
permission: '',
|
|
305
|
+
};
|
|
306
|
+
})
|
|
307
|
+
.filter((model, index, self) => {
|
|
308
|
+
return index === self.findIndex((m) => m.id === model.id);
|
|
309
|
+
})
|
|
310
|
+
.sort((a, b) => a.id.localeCompare(b.id));
|
|
311
|
+
|
|
312
|
+
res.json({
|
|
313
|
+
data: models,
|
|
314
|
+
object: 'list',
|
|
315
|
+
});
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
}
|
|
319
|
+
};
|
|
320
|
+
|
|
321
|
+
export { buildRestEndpoints };
|
|
@@ -1,8 +1,30 @@
|
|
|
1
|
-
const
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
1
|
+
const getGraphQlType = (value) => {
|
|
2
|
+
switch (typeof value) {
|
|
3
|
+
case 'boolean':
|
|
4
|
+
return {type: 'Boolean', defaultValue: 'false'};
|
|
5
|
+
break;
|
|
6
|
+
case 'string':
|
|
7
|
+
return {type: 'String', defaultValue: `""`};
|
|
8
|
+
break;
|
|
9
|
+
case 'number':
|
|
10
|
+
return {type: 'Int', defaultValue: '0'};
|
|
11
|
+
break;
|
|
12
|
+
case 'object':
|
|
13
|
+
if (Array.isArray(value)) {
|
|
14
|
+
if (value.length > 0 && typeof(value[0]) === 'string') {
|
|
15
|
+
return {type: '[String]', defaultValue: '[]'};
|
|
16
|
+
}
|
|
17
|
+
else {
|
|
18
|
+
return {type: '[Message]', defaultValue: '[]'};
|
|
19
|
+
}
|
|
20
|
+
} else {
|
|
21
|
+
return {type: `[${value.objName}]`, defaultValue: 'null'};
|
|
22
|
+
}
|
|
23
|
+
break;
|
|
24
|
+
default:
|
|
25
|
+
return {type: 'String', defaultValue: `""`};
|
|
26
|
+
}
|
|
27
|
+
};
|
|
6
28
|
|
|
7
29
|
const typeDef = (pathway) => {
|
|
8
30
|
const { name, objName, defaultInputParameters, inputParameters, format } = pathway;
|
|
@@ -31,20 +53,15 @@ const typeDef = (pathway) => {
|
|
|
31
53
|
|
|
32
54
|
const paramsStr = Object.entries(params)
|
|
33
55
|
.map(([key, value]) => {
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
} else {
|
|
37
|
-
return `${key}: ${GRAPHQL_TYPE_MAP[typeof value]} = ${
|
|
38
|
-
typeof value === 'string' ? `"${value}"` : value
|
|
39
|
-
}`;
|
|
40
|
-
}
|
|
56
|
+
const { type, defaultValue } = getGraphQlType(value);
|
|
57
|
+
return `${key}: ${type} = ${defaultValue}`;
|
|
41
58
|
})
|
|
42
59
|
.join('\n');
|
|
43
60
|
|
|
44
61
|
const restDefinition = Object.entries(params).map(([key, value]) => {
|
|
45
62
|
return {
|
|
46
63
|
name: key,
|
|
47
|
-
type: `${
|
|
64
|
+
type: `${getGraphQlType(value).type}`,
|
|
48
65
|
};
|
|
49
66
|
});
|
|
50
67
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import test from 'ava';
|
|
2
|
-
import { getSemanticChunks } from '../
|
|
2
|
+
import { getSemanticChunks } from '../server/chunker.js';
|
|
3
3
|
import { encode } from 'gpt-3-encoder';
|
|
4
4
|
|
|
5
5
|
const testText = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. In id erat sem. Phasellus ac dapibus purus, in fermentum nunc. Mauris quis rutrum magna. Quisque rutrum, augue vel blandit posuere, augue magna convallis turpis, nec elementum augue mauris sit amet nunc. Aenean sit amet leo est. Nunc ante ex, blandit et felis ut, iaculis lacinia est. Phasellus dictum orci id libero ullamcorper tempor.
|
package/tests/config.test.js
CHANGED
|
@@ -45,7 +45,7 @@ test('config enableRestEndpoints', (t) => {
|
|
|
45
45
|
});
|
|
46
46
|
|
|
47
47
|
test('config openaiDefaultModel', (t) => {
|
|
48
|
-
const expectedDefault = '
|
|
48
|
+
const expectedDefault = 'gpt-3.5-turbo';
|
|
49
49
|
t.is(config.get('openaiDefaultModel'), expectedDefault);
|
|
50
50
|
});
|
|
51
51
|
|