backend-manager 4.0.3 → 4.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "backend-manager",
3
- "version": "4.0.3",
3
+ "version": "4.0.5",
4
4
  "description": "Quick tools for developing Firebase functions",
5
5
  "main": "src/manager/index.js",
6
6
  "bin": {
@@ -51,8 +51,8 @@
51
51
  "chalk": "^4.1.2",
52
52
  "cors": "^2.8.5",
53
53
  "dotenv": "^16.4.5",
54
- "express": "^4.21.0",
55
- "firebase-admin": "^12.5.0",
54
+ "express": "^4.21.1",
55
+ "firebase-admin": "^12.6.0",
56
56
  "firebase-functions": "^6.0.1",
57
57
  "fs-jetpack": "^5.1.0",
58
58
  "glob": "^11.0.0",
@@ -213,10 +213,10 @@ BackendAssistant.prototype.init = function (ref, options) {
213
213
  self.constant.pastTime.timestampUNIX = 915148800;
214
214
 
215
215
  // Schema
216
- self.schema = {
217
- dir: '',
218
- name: '',
219
- }
216
+ // self.schema = {
217
+ // dir: '',
218
+ // name: '',
219
+ // }
220
220
 
221
221
  // Log options
222
222
  if (
@@ -100,8 +100,8 @@ Middleware.prototype.run = function (libPath, options) {
100
100
  // Resolve settings
101
101
  try {
102
102
  // Attach schema to assistant
103
- assistant.schema.dir = schemasDir;
104
- assistant.schema.name = options.schema;
103
+ // assistant.schema.dir = schemasDir;
104
+ // assistant.schema.name = options.schema;
105
105
  assistant.settings = Manager.Settings().resolve(assistant, undefined, data, {dir: schemasDir, schema: options.schema});
106
106
  } catch (e) {
107
107
  return assistant.respond(new Error(`Unable to resolve schema ${options.schema}: ${e.message}`), {code: 500, sentry: true});
@@ -7,56 +7,86 @@ const JSON5 = require('json5');
7
7
  // Constants
8
8
  const DEFAULT_MODEL = 'gpt-4o';
9
9
  const TOKEN_COST_TABLE = {
10
- // Sept 21st, 2024
10
+ // Oct 7th, 2024
11
11
  'gpt-4o': {
12
- input: 0.00500,
13
- output: 0.01500,
12
+ input: 0.002500,
13
+ output: 0.010000,
14
14
  },
15
15
  'gpt-4o-mini': {
16
16
  input: 0.000150,
17
17
  output: 0.000600,
18
18
  },
19
19
  'o1-preview': {
20
- input: 0.01500,
21
- output: 0.06000,
20
+ input: 0.015000,
21
+ output: 0.060000,
22
22
  },
23
23
  'o1-mini': {
24
- input: 0.00300,
25
- output: 0.01200,
24
+ input: 0.003000,
25
+ output: 0.012000,
26
26
  },
27
27
  'gpt-4-turbo': {
28
- input: 0.01000,
29
- output: 0.03000,
28
+ input: 0.010000,
29
+ output: 0.030000,
30
30
  },
31
31
  'gpt-4': {
32
- input: 0.03000,
33
- output: 0.06000,
32
+ input: 0.030000,
33
+ output: 0.060000,
34
34
  },
35
35
  'gpt-3.5-turbo': {
36
- input: 0.00050,
37
- output: 0.00150,
36
+ input: 0.000500,
37
+ output: 0.001500,
38
38
  },
39
39
 
40
+ // // Sept 21st, 2024
41
+ // 'gpt-4o': {
42
+ // input: 0.005000,
43
+ // output: 0.015000,
44
+ // },
45
+ // 'gpt-4o-mini': {
46
+ // input: 0.000150,
47
+ // output: 0.000600,
48
+ // },
49
+ // 'o1-preview': {
50
+ // input: 0.015000,
51
+ // output: 0.060000,
52
+ // },
53
+ // 'o1-mini': {
54
+ // input: 0.003000,
55
+ // output: 0.012000,
56
+ // },
57
+ // 'gpt-4-turbo': {
58
+ // input: 0.010000,
59
+ // output: 0.030000,
60
+ // },
61
+ // 'gpt-4': {
62
+ // input: 0.030000,
63
+ // output: 0.060000,
64
+ // },
65
+ // 'gpt-3.5-turbo': {
66
+ // input: 0.000500,
67
+ // output: 0.001500,
68
+ // },
69
+
40
70
  // // Jul 18th, 2024
41
71
  // 'gpt-4o': {
42
- // input: 0.00500,
43
- // output: 0.01500,
72
+ // input: 0.005000,
73
+ // output: 0.015000,
44
74
  // },
45
75
  // 'gpt-4o-mini': {
46
76
  // input: 0.000150,
47
77
  // output: 0.000600,
48
78
  // },
49
79
  // 'gpt-4-turbo': {
50
- // input: 0.01000,
51
- // output: 0.03000,
80
+ // input: 0.010000,
81
+ // output: 0.030000,
52
82
  // },
53
83
  // 'gpt-4': {
54
- // input: 0.03000,
55
- // output: 0.06000,
84
+ // input: 0.030000,
85
+ // output: 0.060000,
56
86
  // },
57
87
  // 'gpt-3.5-turbo': {
58
- // input: 0.00050,
59
- // output: 0.00150,
88
+ // input: 0.000500,
89
+ // output: 0.001500,
60
90
  // },
61
91
 
62
92
  // // May 13th, 2024
@@ -194,36 +224,46 @@ OpenAI.prototype.request = function (options) {
194
224
  const assistant = self.assistant;
195
225
 
196
226
  return new Promise(async function(resolve, reject) {
227
+ // Deep merge options
197
228
  options = _.merge({}, options);
198
229
 
230
+ // Set defaults
199
231
  options.model = typeof options.model === 'undefined' ? DEFAULT_MODEL : options.model;
232
+ options.response = typeof options.response === 'undefined' ? undefined : options.response;
200
233
  options.timeout = typeof options.timeout === 'undefined' ? 120000 : options.timeout;
201
234
  options.moderate = typeof options.moderate === 'undefined' ? true : options.moderate;
202
235
  options.log = typeof options.log === 'undefined' ? false : options.log;
203
236
  options.user = options.user || assistant.getUser();
204
237
 
238
+ // Format retries
205
239
  options.retries = typeof options.retries === 'undefined' ? 0 : options.retries;
206
240
  options.retryTriggers = typeof options.retryTriggers === 'undefined' ? ['network', 'parse'] : options.retryTriggers;
207
241
 
242
+ // Format other options
243
+ options.temperature = typeof options.temperature === 'undefined' ? 0.7 : options.temperature;
244
+ options.maxTokens = typeof options.maxTokens === 'undefined' ? 512 : options.maxTokens;
245
+
246
+ // Custom options
247
+ options.dedupeConsecutiveRoles = typeof options.dedupeConsecutiveRoles === 'undefined' ? true : options.dedupeConsecutiveRoles;
248
+
249
+ // Format prompt
208
250
  options.prompt = options.prompt || {};
209
251
  options.prompt.path = options.prompt.path || '';
210
- options.prompt.text = options.prompt.text || options.prompt.content || '';
252
+ options.prompt.content = options.prompt.content || options.prompt.content || '';
211
253
  options.prompt.settings = options.prompt.settings || {};
212
254
 
255
+ // Format message
213
256
  options.message = options.message || {};
214
257
  options.message.path = options.message.path || '';
215
- options.message.text = options.message.text || options.message.content || '';
258
+ options.message.content = options.message.content || options.message.content || '';
216
259
  options.message.settings = options.message.settings || {};
217
260
  options.message.images = options.message.images || [];
218
261
 
262
+ // Format history
219
263
  options.history = options.history || {};
220
264
  options.history.messages = options.history.messages || [];
221
265
  options.history.limit = typeof options.history.limit === 'undefined' ? 5 : options.history.limit;
222
266
 
223
- options.response = typeof options.response === 'undefined' ? undefined : options.response;
224
- options.temperature = typeof options.temperature === 'undefined' ? 0.7 : options.temperature;
225
- options.maxTokens = typeof options.maxTokens === 'undefined' ? 512 : options.maxTokens;
226
-
227
267
  let attempt = 0;
228
268
 
229
269
  function _log() {
@@ -239,9 +279,9 @@ OpenAI.prototype.request = function (options) {
239
279
  // console.log('*** input.content', input.content.slice(0, 50));
240
280
  // console.log('*** input.path', input.path);
241
281
 
242
- let text = '';
282
+ let content = '';
243
283
 
244
- // Load text
284
+ // Load content
245
285
  if (input.path) {
246
286
  const exists = jetpack.exists(input.path);
247
287
 
@@ -254,15 +294,15 @@ OpenAI.prototype.request = function (options) {
254
294
  }
255
295
 
256
296
  try {
257
- text = jetpack.read(input.path);
297
+ content = jetpack.read(input.path);
258
298
  } catch (e) {
259
299
  return new Error(`Error reading file ${input.path}: ${e}`);
260
300
  }
261
301
  } else {
262
- text = input.text;
302
+ content = input.content;
263
303
  }
264
304
 
265
- return powertools.template(text, input.settings).trim();
305
+ return powertools.template(content, input.settings).trim();
266
306
  }
267
307
 
268
308
  // Log
@@ -315,51 +355,63 @@ OpenAI.prototype.request = function (options) {
315
355
  body: {},
316
356
  }
317
357
 
358
+ // Format depending on mode
318
359
  if (mode === 'chatgpt') {
319
- request.url = 'https://api.openai.com/v1/chat/completions';
320
-
321
- // Get history
360
+ // Get history with respect to the message limit
322
361
  const history = options.history.messages.slice(-options.history.limit);
323
362
 
324
- // Add prompt to history
363
+ // Add prompt to beginning of history
325
364
  history.unshift({
326
365
  role: 'system',
327
- text: prompt,
366
+ content: prompt,
328
367
  images: [],
329
368
  });
330
369
 
331
- // Set last history item
370
+ // Get last history item
332
371
  const lastHistory = history[history.length - 1];
333
372
 
334
- // If message is different than last message in history, add it
335
- if (lastHistory?.text !== message) {
336
- history.push({
337
- role: 'user',
338
- text: message,
339
- images: options.message.images,
340
- });
373
+ // Remove last message from history
374
+ if (
375
+ options.dedupeConsecutiveRoles
376
+ && lastHistory?.role === 'user'
377
+ ) {
378
+ history.pop();
341
379
  }
342
380
 
381
+ // Add message to history
382
+ history.push({
383
+ role: 'user',
384
+ content: message,
385
+ images: options.message.images,
386
+ });
387
+
343
388
  // Format history
344
389
  history.map((m) => {
345
- m.role = m.role || 'system';
390
+ const originalContent = m.content;
391
+ const originalImages = m.images;
346
392
 
393
+ // Set properties
394
+ m.role = m.role || 'system';
347
395
  m.content = [];
396
+ m.images = [];
348
397
 
349
- // Set content
350
- if (m.text) {
398
+ // Format content
399
+ if (originalContent) {
351
400
  m.content.push({
352
401
  type: 'text',
353
- text: m.text,
402
+ text: originalContent,
354
403
  })
355
404
  }
356
405
 
357
- // Set images
358
- m.images = m.images || [];
406
+ // Format images
407
+ if (originalImages) {
408
+ originalImages.forEach((i) => {
409
+ // Skip if no URL
410
+ if (!i.url) {
411
+ return
412
+ }
359
413
 
360
- // Loop through and add
361
- m.images.forEach((i) => {
362
- if (i.url) {
414
+ // Add image
363
415
  m.content.push({
364
416
  type: 'image_url',
365
417
  image_url: {
@@ -367,12 +419,15 @@ OpenAI.prototype.request = function (options) {
367
419
  detail: i.detail || 'low',
368
420
  }
369
421
  });
370
- }
371
- }),
422
+ });
423
+ }
372
424
 
373
- // Delete text and images
374
- delete m.text;
375
- delete m.images;
425
+ // Delete any field except for role, content, images
426
+ Object.keys(m).forEach((key) => {
427
+ if (!['role', 'content', 'images'].includes(key)) {
428
+ delete m[key];
429
+ }
430
+ });
376
431
  })
377
432
 
378
433
  // Log message
@@ -380,16 +435,20 @@ OpenAI.prototype.request = function (options) {
380
435
  _log('Message', m.role, m.content);
381
436
  });
382
437
 
438
+ // Set request
439
+ request.url = 'https://api.openai.com/v1/chat/completions';
383
440
  request.body = {
384
441
  model: options.model,
385
442
  response_format: responseFormat,
386
443
  messages: history,
387
444
  temperature: options.temperature,
388
- max_tokens: options.maxTokens,
445
+ // max_tokens: options.maxTokens,
446
+ max_completion_tokens: options.maxTokens,
389
447
  user: user,
390
448
  }
391
449
  resultPath = 'choices[0].message.content';
392
450
  } else if (mode === 'moderation') {
451
+ // Set request
393
452
  request.url = 'https://api.openai.com/v1/moderations';
394
453
  request.body = {
395
454
  input: message,
@@ -401,8 +460,40 @@ OpenAI.prototype.request = function (options) {
401
460
  // Request
402
461
  await fetch(request.url, request)
403
462
  .then(async (r) => {
463
+ // Log
464
+ // _log('Response RAW', JSON.stringify(r));
465
+ // {
466
+ // "id": "chatcmpl-AGKe03mwx644T6db3QRoXFz0aFuil",
467
+ // "object": "chat.completion",
468
+ // "created": 1728455968,
469
+ // "model": "gpt-4o-mini-2024-07-18",
470
+ // "choices": [{
471
+ // "index": 0,
472
+ // "message": {
473
+ // "role": "assistant",
474
+ // "content": "{\n \"message\": \"We offer several pricing plans:\\n\\n1. **Basic Plan**: Free\\n - Chatsy branding on chat\\n - 1 chatbot\\n - 5 knowledge base FAQs per chatbot\\n - English only\\n\\n2. **Premium Plan**: $19/month\\n - Chatsy branding removed\\n - 1 chatbot\\n - 10 knowledge base FAQs per chatbot\\n - English only\\n\\n3. **Pro Plan**: $29/month\\n - Chatsy branding removed\\n - 3 chatbots\\n - 10 knowledge base FAQs per chatbot\\n - Automatically chats in the language of your customers\\n\\n4. **Pro Plan**: $49/month\\n - Chatsy branding removed\\n - 10 chatbots\\n - 10 knowledge base FAQs per chatbot\\n - Automatically chats in the language of your customers\\n\\nLet me know if you need more details or assistance with anything else!\",\n \"user\": {\n \"name\": \"\"\n },\n \"scores\": {\n \"questionRelevancy\": 1\n }\n}",
475
+ // "refusal": null
476
+ // },
477
+ // "logprobs": null,
478
+ // "finish_reason": "stop"
479
+ // }],
480
+ // "usage": {
481
+ // "prompt_tokens": 1306,
482
+ // "completion_tokens": 231,
483
+ // "total_tokens": 1537,
484
+ // "prompt_tokens_details": {
485
+ // "cached_tokens": 1024
486
+ // },
487
+ // "completion_tokens_details": {
488
+ // "reasoning_tokens": 0
489
+ // }
490
+ // },
491
+ // "system_fingerprint": "fp_e2bde53e6e"
492
+ // }
493
+
404
494
  // Set token counts
405
- self.tokens.input.count += r?.usage?.prompt_tokens || 0;
495
+ self.tokens.input.count += (r?.usage?.prompt_tokens || 0)
496
+ - (r?.usage?.prompt_tokens_details?.cached_tokens || 0);
406
497
  self.tokens.output.count += r?.usage?.completion_tokens || 0;
407
498
  self.tokens.total.count = self.tokens.input.count + self.tokens.output.count;
408
499